// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package cloud
import (
"fmt"
"os"
"reflect"
"sort"
"strings"
"github.com/juju/errors"
"github.com/juju/schema"
"github.com/juju/utils/v4"
"gopkg.in/yaml.v2"
"github.com/juju/juju/juju/osenv"
)
//go:generate go run github.com/juju/juju/generate/filetoconst fallbackPublicCloudInfo fallback-public-cloud.yaml fallback_public_cloud.go 2015 cloud
// AuthType is the type of authentication used by the cloud.
type AuthType string
// AuthTypes is defined to allow sorting AuthType slices.
type AuthTypes []AuthType
func (a AuthTypes) Len() int { return len(a) }
func (a AuthTypes) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a AuthTypes) Less(i, j int) bool { return a[i] < a[j] }
// Contains checks if AuthType t is in a AuthTypes.
func (a AuthTypes) Contains(t AuthType) bool {
for _, v := range a {
if v == t {
return true
}
}
return false
}
// String implements the Stringer interface for AuthType.
func (a AuthType) String() string {
return string(a)
}
const (
// AccessKeyAuthType is an authentication type using a key and secret.
AccessKeyAuthType AuthType = "access-key"
// InstanceRoleAuthType is an authentication type used by sourcing
// credentials from within the machine's context in a given cloud provider.
// You only get these credentials by running within that machine.
InstanceRoleAuthType AuthType = "instance-role"
// ManagedIdentityAuthType is an authentication type used by sourcing
// credentials from a user managed identity from within the machine's context.
// You only get these credentials by running within that machine.
ManagedIdentityAuthType AuthType = "managed-identity"
// UserPassAuthType is an authentication type using a username and password.
UserPassAuthType AuthType = "userpass"
// OAuth1AuthType is an authentication type using oauth1.
OAuth1AuthType AuthType = "oauth1"
// OAuth2AuthType is an authentication type using oauth2.
OAuth2AuthType AuthType = "oauth2"
// JSONFileAuthType is an authentication type that takes a path to
// a JSON file.
JSONFileAuthType AuthType = "jsonfile"
// ClientCertificateAuthType is an authentication type using client
// certificates.
ClientCertificateAuthType AuthType = "clientcertificate"
// HTTPSigAuthType is an authentication type that uses HTTP signatures:
// https://tools.ietf.org/html/draft-cavage-http-signatures-06
HTTPSigAuthType AuthType = "httpsig"
// InteractiveAuthType is a credential auth-type provided as an option to
// "juju add-credential", which takes the user through the process of
// adding credentials. e.g. for lxd: generating a certificate credential.
// This authType should used in a CredentialSchema, not a CloudSchema.
InteractiveAuthType = "interactive"
// EmptyAuthType is the authentication type used for providers
// that require no credentials, e.g. "lxd", and "manual".
EmptyAuthType AuthType = "empty"
// AuthTypesKey is the name of the key in a cloud config or cloud schema
// that holds the cloud's auth types.
AuthTypesKey = "auth-types"
// EndpointKey is the name of the key in a cloud config or cloud schema
// that holds the cloud's endpoint url.
EndpointKey = "endpoint"
// RegionsKey is the name of the key in a cloud schema that holds the list
// of regions a cloud supports.
RegionsKey = "regions"
// CertFilenameKey is the name of the key in a cloud schema that holds
// the filename of a CA Certificate to be used to access the cloud, in conjunction
// with an auth type.
CertFilenameKey = "certfilename"
)
// Const legacy auth types. Their should be no reason to use these anymore in
// Juju. They are kept to maintain backwards compatibility.
const (
// CertificateAuthType is an authentication type using certificates.
// NOTE: This type should never be used in practice as it doesn't exist. We
// maintain it here to maintain backwards functionality for Juju in a pre
// 2.9 world. Consider using ClientCertificateAuthType instead. This note
// applies to Kubernetes.
// TODO: tlm update lxd to stop using this auth type
CertificateAuthType AuthType = "certificate"
// OAuth2WithCertAuthType is an authentication type using oauth2 and a client certificate
// NOTE: This type should never be used in practice as it doesn't exist. We
// maintain it here to maintain backwards functionality for Juju in a pre
// 2.9 world. Consider using OAuth2AuthType instead.
OAuth2WithCertAuthType AuthType = "oauth2withcert"
)
// Attrs serves as a map to hold regions specific configuration attributes.
// This serves to reduce confusion over having a nested map, i.e.
// map[string]map[string]interface{}
type Attrs map[string]interface{}
// RegionConfig holds a map of regions and the attributes that serve as the
// region specific configuration options. This allows model inheritance to
// function, providing a place to store configuration for a specific region
// which is passed down to other models under the same controller.
type RegionConfig map[string]Attrs
// Cloud is a cloud definition.
type Cloud struct {
// Name of the cloud.
Name string
// Type is the type of cloud, eg ec2, openstack etc.
// This is one of the provider names registered with
// environs.RegisterProvider.
Type string
// HostCloudRegion represents the k8s host cloud. The format is <cloudType>/<region>.
HostCloudRegion string
// Description describes the type of cloud.
Description string
// AuthTypes are the authentication modes supported by the cloud.
AuthTypes AuthTypes
// Endpoint is the default endpoint for the cloud regions, may be
// overridden by a region.
Endpoint string
// IdentityEndpoint is the default identity endpoint for the cloud
// regions, may be overridden by a region.
IdentityEndpoint string
// StorageEndpoint is the default storage endpoint for the cloud
// regions, may be overridden by a region.
StorageEndpoint string
// Regions are the regions available in the cloud.
//
// Regions is a slice, and not a map, because order is important.
// The first region in the slice is the default region for the
// cloud.
Regions []Region
// Config contains optional cloud-specific configuration to use
// when bootstrapping Juju in this cloud. The cloud configuration
// will be combined with Juju-generated, and user-supplied values;
// user-supplied values taking precedence.
Config map[string]interface{}
// RegionConfig contains optional region specific configuration.
// Like Config above, this will be combined with Juju-generated and user
// supplied values; with user supplied values taking precedence.
RegionConfig RegionConfig
// CACertificates contains an optional list of Certificate
// Authority certificates to be used to validate certificates
// of cloud infrastructure components
// The contents are Base64 encoded x.509 certs.
CACertificates []string
// SkipTLSVerify is true if the client should be asked not to
// validate certificates. It is not recommended for production clouds.
// It is secure (false) by default.
SkipTLSVerify bool
// IsControllerCloud is true when this is the cloud used by the controller.
IsControllerCloud bool
}
// SplitHostCloudRegion splits host cloud region to cloudType and region.
func SplitHostCloudRegion(hostCloudRegion string) (string, string, error) {
fields := strings.SplitN(hostCloudRegion, "/", 2)
if len(fields) == 0 || fields[0] == "" {
return "", "", errors.NotValidf("host cloud region %q", hostCloudRegion)
}
region := ""
if len(fields) > 1 {
region = fields[1]
}
return fields[0], region, nil
}
// BuildHostCloudRegion combines cloudType with region to host cloud region.
func BuildHostCloudRegion(cloudType, region string) string {
if region == "" {
return cloudType
}
return cloudType + "/" + region
}
// Region is a cloud region.
type Region struct {
// Name is the name of the region.
Name string
// Endpoint is the region's primary endpoint URL.
Endpoint string
// IdentityEndpoint is the region's identity endpoint URL.
// If the cloud/region does not have an identity-specific
// endpoint URL, this will be empty.
IdentityEndpoint string
// StorageEndpoint is the region's storage endpoint URL.
// If the cloud/region does not have a storage-specific
// endpoint URL, this will be empty.
StorageEndpoint string
}
// IsEmpty indicates if it's an empty region.
func (r Region) IsEmpty() bool {
return r.Endpoint == "" && r.IdentityEndpoint == "" && r.StorageEndpoint == ""
}
// cloudSet contains cloud definitions, used for marshalling and
// unmarshalling.
type cloudSet struct {
// Clouds is a map of cloud definitions, keyed on cloud name.
Clouds map[string]cloud `yaml:"clouds"`
}
// cloud is equivalent to Cloud, for marshalling and unmarshalling.
type cloud struct {
Name string `yaml:"name,omitempty"`
Type string `yaml:"type"`
HostCloudRegion string `yaml:"host-cloud-region,omitempty"`
Description string `yaml:"description,omitempty"`
AuthTypes []AuthType `yaml:"auth-types,omitempty,flow"`
Endpoint string `yaml:"endpoint,omitempty"`
IdentityEndpoint string `yaml:"identity-endpoint,omitempty"`
StorageEndpoint string `yaml:"storage-endpoint,omitempty"`
Regions regions `yaml:"regions,omitempty"`
Config map[string]interface{} `yaml:"config,omitempty"`
RegionConfig RegionConfig `yaml:"region-config,omitempty"`
CACertificates []string `yaml:"ca-certificates,omitempty"`
SkipTLSVerify bool `yaml:"skip-tls-verify,omitempty"`
IsControllerCloud bool `yaml:"is-controller-cloud,omitempty"`
}
// regions is a collection of regions, either as a map and/or
// as a yaml.MapSlice.
//
// When marshalling, we populate the Slice field only. This is
// necessary for us to control the order of map items, because the
// type yaml.MapSlice preserves the order of the keys when encoding
// and decoding.
//
// When unmarshalling, we populate both Map and Slice. Map is
// populated to simplify conversion to Region objects. Slice
// is populated so we can identify the first map item, which
// becomes the default region for the cloud.
type regions struct {
Map map[string]*region
Slice yaml.MapSlice
}
// region is equivalent to Region, for marshalling and unmarshalling.
type region struct {
Endpoint string `yaml:"endpoint,omitempty"`
IdentityEndpoint string `yaml:"identity-endpoint,omitempty"`
StorageEndpoint string `yaml:"storage-endpoint,omitempty"`
}
const (
// CloudTypeKubernetes is the kubernetes cloud type.
CloudTypeKubernetes = "kubernetes"
)
// DefaultCloudRegion is the name of the default region that Juju creates for clouds that do not define a region.
const DefaultCloudRegion = "default"
var caasCloudTypes = map[string]bool{
CloudTypeKubernetes: true,
}
// CloudIsCAAS checks if cloud is a CAAS cloud.
func CloudIsCAAS(cloud Cloud) bool {
return CloudTypeIsCAAS(cloud.Type)
}
// CloudTypeIsCAAS checks if a given cloud type is a CAAS cloud
func CloudTypeIsCAAS(cloudType string) bool {
return caasCloudTypes[cloudType]
}
// CloudByName returns the cloud with the specified name.
// If there exists no cloud with the specified name, an
// error satisfying errors.IsNotFound will be returned.
//
// TODO(axw) write unit tests for this.
func CloudByName(name string) (*Cloud, error) {
// Personal clouds take precedence.
personalClouds, err := PersonalCloudMetadata()
if err != nil {
return nil, errors.Trace(err)
}
if cloud, ok := personalClouds[name]; ok {
return &cloud, nil
}
clouds, _, err := PublicCloudMetadata(JujuPublicCloudsPath())
if err != nil {
return nil, errors.Trace(err)
}
if cloud, ok := clouds[name]; ok {
return &cloud, nil
}
return nil, errors.NotFoundf("cloud %s", name)
}
// RegionByName finds the region in the given slice with the
// specified name, with case folding.
func RegionByName(regions []Region, name string) (*Region, error) {
for _, region := range regions {
if !strings.EqualFold(region.Name, name) {
continue
}
return ®ion, nil
}
availableRegions := "cloud has no regions"
if len(regions) > 0 {
availableRegions = fmt.Sprintf("expected one of %q", RegionNames(regions))
}
return nil, errors.NewNotFound(nil, fmt.Sprintf(
"region %q not found (%v)",
name, availableRegions,
))
}
// RegionNames returns a sorted list of the names of the given regions.
func RegionNames(regions []Region) []string {
names := make([]string, len(regions))
for i, region := range regions {
names[i] = region.Name
}
sort.Strings(names)
return names
}
// JujuPublicCloudsPath is the location where public cloud information is
// expected to be found. Requires JUJU_HOME to be set.
func JujuPublicCloudsPath() string {
return osenv.JujuXDGDataHomePath("public-clouds.yaml")
}
// PublicCloudMetadata looks in searchPath for cloud metadata files and if none
// are found, returns the fallback public cloud metadata.
func PublicCloudMetadata(searchPath ...string) (result map[string]Cloud, fallbackUsed bool, err error) {
defer func() {
// Until we can be sure the public clouds yaml is updated to support
// Azure managed identity auth types, add it manually.
// This is a short term compatibility fix.
for name, cld := range result {
if cld.Type != "azure" || cld.AuthTypes.Contains(ManagedIdentityAuthType) {
continue
}
cld.AuthTypes = append(cld.AuthTypes, ManagedIdentityAuthType)
result[name] = cld
}
}()
for _, file := range searchPath {
data, err := os.ReadFile(file)
if err != nil && os.IsNotExist(err) {
continue
}
if err != nil {
return nil, false, errors.Trace(err)
}
clouds, err := ParseCloudMetadata(data)
if err != nil {
return nil, false, errors.Trace(err)
}
return clouds, false, err
}
clouds, err := ParseCloudMetadata([]byte(fallbackPublicCloudInfo))
return clouds, true, err
}
// ParseOneCloud parses the given yaml bytes into a single Cloud metadata.
func ParseOneCloud(data []byte) (Cloud, error) {
var c cloud
if err := yaml.Unmarshal(data, &c); err != nil {
return Cloud{}, errors.Annotate(err, "cannot unmarshal yaml cloud metadata")
}
return cloudFromInternal(c), nil
}
// ParseCloudMetadata parses the given yaml bytes into Clouds metadata.
//
// The expected regular yaml formal is:
//
// clouds:
//
// garage-maas:
// type: maas
// auth-types: [oauth1]
// endpoint: "http://garagemaas"
// skip-tls-verify: true`
// ...
//
// It also accepts a yaml format without the 'clouds' key at the top,
// e.g.
//
// garage-maas:
//
// type: maas
// auth-types: [oauth1]
// endpoint: "http://garagemaas"
// skip-tls-verify: true`
//
// ...
func ParseCloudMetadata(data []byte) (map[string]Cloud, error) {
var metadata cloudSet
// Unmarshal with a generic type first
yamlMap := make(map[string]interface{})
if err := yaml.Unmarshal(data, &yamlMap); err != nil {
return nil, errors.Annotate(err, "cannot unmarshal yaml cloud metadata")
}
cloudsetSchema := schema.FieldMap(schema.Fields{
"clouds": schema.Map(schema.String(), schema.Any()),
}, nil)
// Try to coerce the schema with the 'clouds' keyword, if so,
// read directly into a cloudSet, otherwise read it as a map
// and construct the cloudSet manually
regularMap, _ := cloudsetSchema.Coerce(yamlMap, []string{})
if regularMap != nil {
// Able to coerce, so read directly into the cloudSet
if errCloudSet := yaml.Unmarshal(data, &metadata); errCloudSet != nil {
return nil, errors.Errorf("Invalid cloud metadata %s", yamlMap)
}
} else {
// Unable to coerce cloudSet, try to unmarshal into a map[string]*cloud
cloudMap := make(map[string]cloud)
if errCloudMap := yaml.Unmarshal(data, &cloudMap); errCloudMap != nil {
return nil, errors.Errorf("Invalid cloud metadata %s", yamlMap)
}
metadata.Clouds = cloudMap
}
// Translate to the exported type. For each cloud, we store
// the first region for the cloud as its default region.
clouds := make(map[string]Cloud)
for name, cloud := range metadata.Clouds {
details := cloudFromInternal(cloud)
details.Name = name
if details.Description == "" {
var ok bool
if details.Description, ok = defaultCloudDescription[name]; !ok {
details.Description = defaultCloudDescription[cloud.Type]
}
}
clouds[name] = details
}
return clouds, nil
}
// DefaultCloudDescription returns the description for the specified cloud
// type, or an empty string if the cloud type is unknown.
func DefaultCloudDescription(cloudType string) string {
return defaultCloudDescription[cloudType]
}
var defaultCloudDescription = map[string]string{
"aws": "Amazon Web Services",
"aws-china": "Amazon China",
"aws-gov": "Amazon (USA Government)",
"google": "Google Cloud Platform",
"azure": "Microsoft Azure",
"azure-china": "Microsoft Azure China",
"lxd": "LXD Container Hypervisor",
"maas": "Metal As A Service",
"openstack": "Openstack Cloud",
"oracle": "Oracle Compute Cloud Service",
"kubernetes": "A Kubernetes Cluster",
}
// WritePublicCloudMetadata marshals to YAML and writes the cloud metadata
// to the public cloud file.
func WritePublicCloudMetadata(cloudsMap map[string]Cloud) error {
data, err := marshalCloudMetadata(cloudsMap)
if err != nil {
return errors.Trace(err)
}
return utils.AtomicWriteFile(JujuPublicCloudsPath(), data, 0600)
}
// IsSameCloudMetadata returns true if both meta and meta2 contain the
// same cloud metadata.
func IsSameCloudMetadata(meta1, meta2 map[string]Cloud) (bool, error) {
// The easiest approach is to simply marshall to YAML and compare.
yaml1, err := marshalCloudMetadata(meta1)
if err != nil {
return false, err
}
yaml2, err := marshalCloudMetadata(meta2)
if err != nil {
return false, err
}
return string(yaml1) == string(yaml2), nil
}
// marshalCloudMetadata marshals the given clouds to YAML.
func marshalCloudMetadata(cloudsMap map[string]Cloud) ([]byte, error) {
clouds := cloudSet{make(map[string]cloud)}
for name, metadata := range cloudsMap {
clouds.Clouds[name] = cloudToInternal(metadata, false)
}
data, err := yaml.Marshal(clouds)
if err != nil {
return nil, errors.Annotate(err, "cannot marshal cloud metadata")
}
return data, nil
}
// MarshalCloud marshals a Cloud to an opaque byte array.
func MarshalCloud(cloud Cloud) ([]byte, error) {
return yaml.Marshal(cloudToInternal(cloud, true))
}
// UnmarshalCloud unmarshals a Cloud from a byte array produced by MarshalCloud.
func UnmarshalCloud(in []byte) (Cloud, error) {
var internal cloud
if err := yaml.Unmarshal(in, &internal); err != nil {
return Cloud{}, errors.Annotate(err, "cannot unmarshal yaml cloud metadata")
}
return cloudFromInternal(internal), nil
}
func cloudToInternal(in Cloud, withName bool) cloud {
var regions regions
for _, r := range in.Regions {
regions.Slice = append(regions.Slice, yaml.MapItem{
Key: r.Name,
Value: region{
Endpoint: r.Endpoint,
IdentityEndpoint: r.IdentityEndpoint,
StorageEndpoint: r.StorageEndpoint,
},
})
}
name := in.Name
if !withName {
name = ""
}
return cloud{
Name: name,
Type: in.Type,
HostCloudRegion: in.HostCloudRegion,
AuthTypes: in.AuthTypes,
Endpoint: in.Endpoint,
IdentityEndpoint: in.IdentityEndpoint,
StorageEndpoint: in.StorageEndpoint,
Regions: regions,
Config: in.Config,
RegionConfig: in.RegionConfig,
CACertificates: in.CACertificates,
SkipTLSVerify: in.SkipTLSVerify,
IsControllerCloud: in.IsControllerCloud,
}
}
func cloudFromInternal(in cloud) Cloud {
var regions []Region
if len(in.Regions.Map) > 0 {
for _, item := range in.Regions.Slice {
name := fmt.Sprint(item.Key)
r := in.Regions.Map[name]
if r == nil {
// r will be nil if none of the fields in
// the YAML are set.
regions = append(regions, Region{Name: name})
} else {
regions = append(regions, Region{
name,
r.Endpoint,
r.IdentityEndpoint,
r.StorageEndpoint,
})
}
}
}
meta := Cloud{
Name: in.Name,
Type: in.Type,
HostCloudRegion: in.HostCloudRegion,
AuthTypes: in.AuthTypes,
Endpoint: in.Endpoint,
IdentityEndpoint: in.IdentityEndpoint,
StorageEndpoint: in.StorageEndpoint,
Regions: regions,
Config: in.Config,
RegionConfig: in.RegionConfig,
Description: in.Description,
CACertificates: in.CACertificates,
SkipTLSVerify: in.SkipTLSVerify,
IsControllerCloud: in.IsControllerCloud,
}
meta.denormaliseMetadata()
return meta
}
// MarshalYAML implements the yaml.Marshaler interface.
func (r regions) MarshalYAML() (interface{}, error) {
return r.Slice, nil
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (r *regions) UnmarshalYAML(f func(interface{}) error) error {
if err := f(&r.Map); err != nil {
return err
}
return f(&r.Slice)
}
// To keep the metadata concise, attributes on the metadata struct which
// have the same value for each item may be moved up to a higher level in
// the tree. denormaliseMetadata descends the tree and fills in any missing
// attributes with values from a higher level.
func (cloud Cloud) denormaliseMetadata() {
for name, region := range cloud.Regions {
r := region
inherit(&r, &cloud)
cloud.Regions[name] = r
}
}
type structTags map[reflect.Type]map[string]int
var tagsForType = make(structTags)
// RegisterStructTags ensures the yaml tags for the given structs are able to be used
// when parsing cloud metadata.
func RegisterStructTags(vals ...interface{}) {
tags := mkTags(vals...)
for k, v := range tags {
tagsForType[k] = v
}
}
func init() {
RegisterStructTags(Cloud{}, Region{})
}
func mkTags(vals ...interface{}) map[reflect.Type]map[string]int {
typeMap := make(map[reflect.Type]map[string]int)
for _, v := range vals {
t := reflect.TypeOf(v)
typeMap[t] = yamlTags(t)
}
return typeMap
}
// yamlTags returns a map from yaml tag to the field index for the string fields in the given type.
func yamlTags(t reflect.Type) map[string]int {
if t.Kind() != reflect.Struct {
panic(errors.Errorf("cannot get yaml tags on type %s", t))
}
tags := make(map[string]int)
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
if f.Type != reflect.TypeOf("") {
continue
}
if tag := f.Tag.Get("yaml"); tag != "" {
if i := strings.Index(tag, ","); i >= 0 {
tag = tag[0:i]
}
if tag == "-" {
continue
}
if tag != "" {
f.Name = tag
}
}
tags[f.Name] = i
}
return tags
}
// inherit sets any blank fields in dst to their equivalent values in fields in src that have matching json tags.
// The dst parameter must be a pointer to a struct.
func inherit(dst, src interface{}) {
for tag := range tags(dst) {
setFieldByTag(dst, tag, fieldByTag(src, tag), false)
}
}
// tags returns the field offsets for the JSON tags defined by the given value, which must be
// a struct or a pointer to a struct.
func tags(x interface{}) map[string]int {
t := reflect.TypeOf(x)
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
panic(errors.Errorf("expected struct, not %s", t))
}
if tagm := tagsForType[t]; tagm != nil {
return tagm
}
panic(errors.Errorf("%s not found in type table", t))
}
// fieldByTag returns the value for the field in x with the given JSON tag, or "" if there is no such field.
func fieldByTag(x interface{}, tag string) string {
tagm := tags(x)
v := reflect.ValueOf(x)
if v.Kind() == reflect.Ptr {
v = v.Elem()
}
if i, ok := tagm[tag]; ok {
return v.Field(i).Interface().(string)
}
return ""
}
// setFieldByTag sets the value for the field in x with the given JSON tag to val.
// The override parameter specifies whether the value will be set even if the original value is non-empty.
func setFieldByTag(x interface{}, tag, val string, override bool) {
i, ok := tags(x)[tag]
if !ok {
return
}
v := reflect.ValueOf(x).Elem()
f := v.Field(i)
if override || f.Interface().(string) == "" {
f.Set(reflect.ValueOf(val))
}
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package cloud
import (
"encoding/json"
"encoding/pem"
"fmt"
"os"
"strings"
"github.com/juju/errors"
"github.com/juju/schema"
"github.com/juju/utils/v4"
"gopkg.in/yaml.v2"
"github.com/juju/juju/internal/configschema"
)
// CloudCredential contains attributes used to define credentials for a cloud.
type CloudCredential struct {
// DefaultCredential is the named credential to use by default.
DefaultCredential string `yaml:"default-credential,omitempty"`
// DefaultRegion is the cloud region to use by default.
DefaultRegion string `yaml:"default-region,omitempty"`
// AuthCredentials is the credentials for a cloud, keyed on name.
AuthCredentials map[string]Credential `yaml:",omitempty,inline"`
}
func (c *CloudCredential) validateDefaultCredential() {
if c.DefaultCredential != "" {
stillHaveDefault := false
for name := range c.AuthCredentials {
if name == c.DefaultCredential {
stillHaveDefault = true
break
}
}
if !stillHaveDefault {
c.DefaultCredential = ""
}
}
}
// Credential instances represent cloud credentials.
type Credential struct {
authType AuthType
attributes map[string]string
// Revoked is true if the credential has been revoked.
Revoked bool
// Label is optionally set to describe the credentials to a user.
Label string
// Invalid is true if the credential is invalid.
Invalid bool
// InvalidReason contains the reason why a credential was flagged as invalid.
// It is expected that this string will be empty when a credential is valid.
InvalidReason string
}
// AuthType returns the authentication type.
func (c Credential) AuthType() AuthType {
return c.authType
}
func copyStringMap(in map[string]string) map[string]string {
if in == nil {
return nil
}
out := make(map[string]string)
for k, v := range in {
out[k] = v
}
return out
}
// Attributes returns the credential attributes.
func (c Credential) Attributes() map[string]string {
return copyStringMap(c.attributes)
}
type credentialInternal struct {
AuthType AuthType `yaml:"auth-type" json:"auth-type"`
Attributes map[string]string `yaml:",omitempty,inline" json:",omitempty,inline"`
}
// MarshalYAML implements the yaml.Marshaler interface.
func (c Credential) MarshalYAML() (interface{}, error) {
return credentialInternal{c.authType, c.attributes}, nil
}
// UnmarshalYAML implements the yaml.Marshaler interface.
func (c *Credential) UnmarshalYAML(unmarshal func(interface{}) error) error {
var internal credentialInternal
if err := unmarshal(&internal); err != nil {
return err
}
*c = Credential{authType: internal.AuthType, attributes: internal.Attributes}
return nil
}
// MarshalJSON implements the json.Marshaler interface.
func (c Credential) MarshalJSON() ([]byte, error) {
return json.Marshal(credentialInternal{c.authType, c.attributes})
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (c *Credential) UnmarshalJSON(b []byte) error {
var internal credentialInternal
if err := json.Unmarshal(b, &internal); err != nil {
return err
}
*c = Credential{authType: internal.AuthType, attributes: internal.Attributes}
return nil
}
// NewCredential returns a new, immutable, Credential with the supplied
// auth-type and attributes.
func NewCredential(authType AuthType, attributes map[string]string) Credential {
return Credential{authType: authType, attributes: copyStringMap(attributes)}
}
// NewNamedCredential returns an immutable Credential with the supplied properties.
func NewNamedCredential(name string, authType AuthType, attributes map[string]string, revoked bool) Credential {
return Credential{
Label: name,
authType: authType,
attributes: copyStringMap(attributes),
Revoked: revoked,
}
}
// NewEmptyCredential returns a new Credential with the EmptyAuthType
// auth-type.
func NewEmptyCredential() Credential {
return Credential{authType: EmptyAuthType, attributes: nil}
}
// NewEmptyCloudCredential returns a new CloudCredential with an empty
// default credential.
func NewEmptyCloudCredential() *CloudCredential {
return &CloudCredential{AuthCredentials: map[string]Credential{"default": NewEmptyCredential()}}
}
// NamedCredentialAttr describes the properties of a named credential attribute.
type NamedCredentialAttr struct {
// Name is the name of the credential value.
Name string
// CredentialAttr holds the properties of the credential value.
CredentialAttr
}
// CredentialSchema describes the schema of a credential. Credential schemas
// are specific to cloud providers.
type CredentialSchema []NamedCredentialAttr
// Attribute returns the named CredentialAttr value.
func (s CredentialSchema) Attribute(name string) (*CredentialAttr, bool) {
for _, value := range s {
if value.Name == name {
result := value.CredentialAttr
return &result, true
}
}
return nil, false
}
// FinalizeCredential finalizes a credential by matching it with one of the
// provided credential schemas, and reading any file attributes into their
// corresponding non-file attributes. This will also validate the credential.
//
// If there is no schema with the matching auth-type, an error satisfying
// errors.IsNotSupported will be returned.
func FinalizeCredential(
credential Credential,
schemas map[AuthType]CredentialSchema,
readFile func(string) ([]byte, error),
) (*Credential, error) {
schema, ok := schemas[credential.authType]
if !ok {
return nil, errors.NotSupportedf("auth-type %q", credential.authType)
}
attrs, err := schema.Finalize(credential.attributes, readFile)
if err != nil {
return nil, errors.Trace(err)
}
return &Credential{authType: credential.authType, attributes: attrs}, nil
}
// Finalize finalizes the given credential attributes against the credential
// schema. If the attributes are invalid, Finalize will return an error.
//
// An updated attribute map will be returned, having any file attributes
// deleted, and replaced by their non-file counterparts with the values set
// to the contents of the files.
func (s CredentialSchema) Finalize(
attrs map[string]string,
readFile func(string) ([]byte, error),
) (map[string]string, error) {
checker, err := s.schemaChecker()
if err != nil {
return nil, errors.Trace(err)
}
m := make(map[string]interface{})
for k, v := range attrs {
m[k] = v
}
result, err := checker.Coerce(m, nil)
if err != nil {
return nil, errors.Trace(err)
}
resultMap := result.(map[string]interface{})
newAttrs := make(map[string]string)
// Construct the final credential attributes map, reading values from files as necessary.
for _, field := range s {
if field.FileAttr != "" {
if err := s.processFileAttrValue(field, resultMap, newAttrs, readFile); err != nil {
return nil, errors.Trace(err)
}
continue
}
name := field.Name
if field.FilePath {
pathValue, ok := resultMap[name]
if ok && pathValue != "" {
absPath, err := ValidateFileAttrValue(pathValue.(string))
if err != nil {
return nil, errors.Trace(err)
}
data, err := readFile(absPath)
if err != nil {
return nil, errors.Annotatef(err, "reading file for %q", name)
}
if len(data) == 0 {
return nil, errors.NotValidf("empty file for %q", name)
}
newAttrs[name] = string(data)
continue
}
}
if val, ok := resultMap[name]; ok {
newAttrs[name] = val.(string)
}
}
return newAttrs, nil
}
// ExpandFilePathsOfCredential iterates over the credential schema attributes
// and checks if the credential attribute has the ExpandFilePath flag set. If so
// the value of the credential attribute will be interrupted as a file with it's
// contents replaced with that of the file.
func ExpandFilePathsOfCredential(
cred Credential,
schemas map[AuthType]CredentialSchema,
) (Credential, error) {
schema, exists := schemas[cred.AuthType()]
if !exists {
return cred, nil
}
attributes := cred.Attributes()
for _, credAttr := range schema {
if !credAttr.CredentialAttr.ExpandFilePath {
continue
}
val, exists := attributes[credAttr.Name]
if !exists || val == "" {
continue
}
// NOTE: tlm dirty fix for lp1976620. This will be removed in Juju 3.0
// when we stop overloading the keys in cloud credentials with different
// values.
if block, _ := pem.Decode([]byte(val)); block != nil {
continue
}
abs, err := ValidateFileAttrValue(val)
if err != nil {
return cred, fmt.Errorf("determining file path value for credential attribute: %w", err)
}
contents, err := os.ReadFile(abs)
if err != nil {
return cred, fmt.Errorf("reading file %q contents for credential attribute %q: %w", abs, credAttr.Name, err)
}
attributes[credAttr.Name] = string(contents)
}
return NewNamedCredential(cred.Label, cred.AuthType(), attributes, cred.Revoked), nil
}
// ValidateFileAttrValue returns the normalised file path, so
// long as the specified path is valid and not a directory.
func ValidateFileAttrValue(path string) (string, error) {
absPath, err := utils.ExpandPath(path)
if err != nil {
return "", err
}
info, err := os.Stat(absPath)
if err != nil {
return "", fmt.Errorf("invalid file path: %w", err)
}
if info.IsDir() {
return "", fmt.Errorf("file path %q must be a file", absPath)
}
return absPath, nil
}
func (s CredentialSchema) processFileAttrValue(
field NamedCredentialAttr, resultMap map[string]interface{}, newAttrs map[string]string,
readFile func(string) ([]byte, error),
) error {
name := field.Name
if fieldVal, ok := resultMap[name]; ok {
if _, ok := resultMap[field.FileAttr]; ok {
return errors.NotValidf(
"specifying both %q and %q",
name, field.FileAttr,
)
}
newAttrs[name] = fieldVal.(string)
return nil
}
fieldVal, ok := resultMap[field.FileAttr]
if !ok {
return errors.NewNotValid(nil, fmt.Sprintf(
"either %q or %q must be specified",
name, field.FileAttr,
))
}
data, err := readFile(fieldVal.(string))
if err != nil {
return errors.Annotatef(err, "reading file for %q", name)
}
if len(data) == 0 {
return errors.NotValidf("empty file for %q", name)
}
newAttrs[name] = string(data)
return nil
}
func (s CredentialSchema) schemaChecker() (schema.Checker, error) {
fields := make(configschema.Fields)
for _, field := range s {
fields[field.Name] = configschema.Attr{
Description: field.Description,
Type: configschema.Tstring,
Group: configschema.AccountGroup,
Mandatory: field.FileAttr == "" && !field.Optional,
Secret: field.Hidden,
Values: field.Options,
}
}
// TODO(axw) add support to environschema for attributes whose values
// can be read in from a file.
for _, field := range s {
if field.FileAttr == "" {
continue
}
if _, ok := fields[field.FileAttr]; ok {
return nil, errors.Errorf("duplicate field %q", field.FileAttr)
}
fields[field.FileAttr] = configschema.Attr{
Description: field.Description + " (file)",
Type: configschema.Tstring,
Group: configschema.AccountGroup,
Mandatory: false,
Secret: false,
}
}
schemaFields, schemaDefaults, err := fields.ValidationSchema()
if err != nil {
return nil, errors.Trace(err)
}
return schema.StrictFieldMap(schemaFields, schemaDefaults), nil
}
// CredentialAttr describes the properties of a credential attribute.
type CredentialAttr struct {
// Description is a human-readable description of the credential
// attribute.
Description string
// Hidden controls whether or not the attribute value will be hidden
// when being entered interactively. Regardless of this, all credential
// attributes are provided only to the Juju controllers.
Hidden bool
// FileAttr is the name of an attribute that may be specified instead
// of this one, which points to a file that will be read in and its
// value used for this attribute.
FileAttr string
// FilePath is true if the value of this attribute is a file path. If
// this is true, then the attribute value will be set to the contents
// of the file when the credential is "finalized".
FilePath bool
// ExpandFilePath reads in the FilePath, validating the file path correctly.
// If the file path is correct, it will then read and replace the path,
// with the associated content. The contents of the file in "finalized" will
// be the file contents, not the filepath.
ExpandFilePath bool
// Optional controls whether the attribute is required to have a non-empty
// value or not. Attributes default to mandatory.
Optional bool
// Options, if set, define the allowed values for this field.
Options []interface{}
// ShortSuffix is a human-readable suffix that we add to the name of
// the attribute when prompting. This replaces the (optional) suffix
// to prompt users about why this is optional.
// Requires setting Optional: true
ShortSuffix string
}
type cloudCredentialChecker struct{}
func (c cloudCredentialChecker) Coerce(v interface{}, path []string) (interface{}, error) {
out := CloudCredential{
AuthCredentials: make(map[string]Credential),
}
v, err := schema.StringMap(cloudCredentialValueChecker{}).Coerce(v, path)
if err != nil {
return nil, err
}
mapv := v.(map[string]interface{})
for k, v := range mapv {
switch k {
case "default-region":
out.DefaultRegion = v.(string)
case "default-credential":
out.DefaultCredential = v.(string)
default:
out.AuthCredentials[k] = v.(Credential)
}
}
return out, nil
}
type cloudCredentialValueChecker struct{}
func (c cloudCredentialValueChecker) Coerce(v interface{}, path []string) (interface{}, error) {
field := path[len(path)-1]
switch field {
case "default-region", "default-credential":
return schema.String().Coerce(v, path)
}
v, err := schema.StringMap(schema.String()).Coerce(v, path)
if err != nil {
return nil, err
}
mapv := v.(map[string]interface{})
authType, _ := mapv["auth-type"].(string)
if authType == "" {
return nil, errors.Errorf("%v: missing auth-type", strings.Join(path, ""))
}
attrs := make(map[string]string)
delete(mapv, "auth-type")
for k, v := range mapv {
attrs[k] = v.(string)
}
if len(attrs) == 0 {
attrs = nil
}
return Credential{authType: AuthType(authType), attributes: attrs}, nil
}
// ParseCredentials parses the given yaml bytes into Credentials, but does
// not validate the credential attributes.
func ParseCredentials(data []byte) (map[string]CloudCredential, error) {
credentialCollection, err := ParseCredentialCollection(data)
if err != nil {
return nil, errors.Trace(err)
}
cloudNames := credentialCollection.CloudNames()
credentials := make(map[string]CloudCredential)
for _, cloud := range cloudNames {
v, err := credentialCollection.CloudCredential(cloud)
if err != nil {
return nil, errors.Trace(err)
}
credentials[cloud] = *v
}
return credentials, nil
}
// RemoveSecrets returns a copy of the given credential with secret fields removed.
func RemoveSecrets(
credential Credential,
schemas map[AuthType]CredentialSchema,
) (*Credential, error) {
schema, ok := schemas[credential.authType]
if !ok {
return nil, errors.NotSupportedf("auth-type %q", credential.authType)
}
redactedAttrs := credential.Attributes()
for _, attr := range schema {
if attr.Hidden {
delete(redactedAttrs, attr.Name)
}
}
return &Credential{authType: credential.authType, attributes: redactedAttrs}, nil
}
// CredentialCollection holds CloudCredential(s) that are lazily validated.
type CredentialCollection struct {
Credentials map[string]interface{} `yaml:"credentials"`
}
// ParseCredentialCollection parses YAML bytes for the credential
func ParseCredentialCollection(data []byte) (*CredentialCollection, error) {
collection := CredentialCollection{}
err := yaml.Unmarshal(data, &collection)
if err != nil {
return nil, errors.Annotate(err, "cannot unmarshal yaml credentials")
}
return &collection, nil
}
// CloudCredential returns a copy of the CloudCredential for the specified cloud or
// an error when the CloudCredential was not found or failed to pass validation.
func (c *CredentialCollection) CloudCredential(cloudName string) (*CloudCredential, error) {
credentialValue, ok := c.Credentials[cloudName]
if !ok {
return nil, errors.NotFoundf("credentials for cloud %s", cloudName)
}
if credential, ok := credentialValue.(CloudCredential); ok {
return &credential, nil
}
credentialValue, err := cloudCredentialChecker{}.Coerce(
credentialValue, []string{"credentials." + cloudName},
)
if err != nil {
return nil, errors.Trace(err)
}
credential := credentialValue.(CloudCredential)
credential.validateDefaultCredential()
c.Credentials[cloudName] = credential
return &credential, nil
}
// CloudNames returns the cloud names to which credentials inside the CredentialCollection belong.
func (c *CredentialCollection) CloudNames() []string {
var cloudNames []string
for k := range c.Credentials {
cloudNames = append(cloudNames, k)
}
return cloudNames
}
// UpdateCloudCredential stores a CloudCredential for a specific cloud.
func (c *CredentialCollection) UpdateCloudCredential(cloudName string, details CloudCredential) {
if len(details.AuthCredentials) == 0 {
delete(c.Credentials, cloudName)
return
}
if c.Credentials == nil {
c.Credentials = make(map[string]interface{})
}
details.validateDefaultCredential()
c.Credentials[cloudName] = details
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
// Package cloud provides functionality to parse information
// describing clouds, including regions, supported auth types etc.
package cloud
import (
"os"
"github.com/juju/errors"
"github.com/juju/juju/juju/osenv"
)
// JujuPersonalCloudsPath is the location where personal cloud information is
// expected to be found. Requires JUJU_HOME to be set.
func JujuPersonalCloudsPath() string {
return osenv.JujuXDGDataHomePath("clouds.yaml")
}
// PersonalCloudMetadata loads any personal cloud metadata defined
// in the Juju Home directory. If not cloud metadata is found,
// that is not an error; nil is returned.
func PersonalCloudMetadata() (map[string]Cloud, error) {
clouds, err := ParseCloudMetadataFile(JujuPersonalCloudsPath())
if err != nil && os.IsNotExist(err) {
return nil, nil
}
return clouds, err
}
// ParseCloudMetadataFile loads any cloud metadata defined
// in the specified file.
func ParseCloudMetadataFile(file string) (map[string]Cloud, error) {
data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
clouds, err := ParseCloudMetadata(data)
if err != nil {
return nil, err
}
return clouds, err
}
// WritePersonalCloudMetadata marshals to YAML and writes the cloud metadata
// to the personal cloud file.
func WritePersonalCloudMetadata(cloudsMap map[string]Cloud) error {
data, err := marshalCloudMetadata(cloudsMap)
if err != nil {
return errors.Trace(err)
}
return os.WriteFile(JujuPersonalCloudsPath(), data, os.FileMode(0600))
}
// Copyright 2017 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
// Package cloud provides functionality to parse information
// describing clouds, including regions, supported auth types etc.
package cloud
import (
"fmt"
"reflect"
"strings"
"github.com/juju/errors"
"github.com/juju/gojsonschema"
"gopkg.in/yaml.v2"
)
// ValidationWarning are JSON schema validation errors used to warn users about
// potential schema violations
type ValidationWarning struct {
Messages []string
}
func (e *ValidationWarning) Error() string {
str := ""
for _, msg := range e.Messages {
str = fmt.Sprintf("%s\n%s", str, msg)
}
return str
}
var cloudSetSchema = map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"clouds": map[string]interface{}{
"type": "object",
"additionalProperties": cloudSchema,
},
},
"additionalProperties": false,
}
var cloudSchema = map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"name": map[string]interface{}{"type": "string"},
"type": map[string]interface{}{"type": "string"},
"description": map[string]interface{}{"type": "string"},
"auth-types": map[string]interface{}{
"type": "array",
"items": map[string]interface{}{"type": "string"},
},
"host-cloud-region": map[string]interface{}{"type": "string"},
"endpoint": map[string]interface{}{"type": "string"},
"identity-endpoint": map[string]interface{}{"type": "string"},
"storage-endpoint": map[string]interface{}{"type": "string"},
"config": map[string]interface{}{"type": "object"},
"regions": regionsSchema,
"region-config": map[string]interface{}{"type": "object"},
"ca-certificates": map[string]interface{}{
"type": "array",
"items": map[string]interface{}{"type": "string"},
},
},
"additionalProperties": false,
}
var regionsSchema = map[string]interface{}{
"type": "object",
"additionalProperties": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"endpoint": map[string]interface{}{"type": "string"},
"identity-endpoint": map[string]interface{}{"type": "string"},
"storage-endpoint": map[string]interface{}{"type": "string"},
},
"additionalProperties": false,
},
}
// ValidateCloudSet reports any erroneous properties found in cloud metadata
// YAML. If there are no erroneous properties, then ValidateCloudSet returns nil
// otherwise it return an error listing all erroneous properties and possible
// suggestion.
func ValidateCloudSet(data []byte) error {
return validateCloud(data, &cloudSetSchema)
}
// ValidateOneCloud is like ValidateCloudSet but validates the metadata for only
// one cloud and not multiple.
func ValidateOneCloud(data []byte) error {
return validateCloud(data, &cloudSchema)
}
func validateCloud(data []byte, jsonSchema *map[string]interface{}) error {
var body interface{}
if err := yaml.Unmarshal(data, &body); err != nil {
return errors.Annotate(err, "cannot unmarshal yaml cloud metadata")
}
jsonBody := yamlToJSON(body)
invalidKeys, err := validateCloudMetaData(jsonBody, jsonSchema)
if err != nil {
return errors.Annotate(err, "cannot validate yaml cloud metadata")
}
formatKeyError := func(invalidKey, similarValidKey string) string {
str := fmt.Sprintf("property %s is invalid.", invalidKey)
if similarValidKey != "" {
str = fmt.Sprintf("%s Perhaps you mean %q.", str, similarValidKey)
}
return str
}
cloudValidationError := ValidationWarning{}
for k, v := range invalidKeys {
cloudValidationError.Messages = append(cloudValidationError.Messages, formatKeyError(k, v))
}
if len(cloudValidationError.Messages) != 0 {
return &cloudValidationError
}
return nil
}
func cloudTags() []string {
keys := make(map[string]struct{})
collectTags(reflect.TypeOf((*cloud)(nil)), "yaml", []string{"map[string]*cloud.region", "yaml.MapSlice"}, &keys)
keyList := make([]string, 0, len(keys))
for k := range keys {
keyList = append(keyList, k)
}
return keyList
}
// collectTags returns a set of keys for a specified struct tag. If no tag is
// specified for a particular field of the argument struct type, then the
// all-lowercase field name is used as per Go tag conventions. If the tag
// specified is not the name a conventionally formatted go struct tag, then the
// results of this function are invalid. Values of invalid kinds result in no
// processing.
func collectTags(t reflect.Type, tag string, ignoreTypes []string, keys *map[string]struct{}) {
switch t.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.Ptr:
collectTags(t.Elem(), tag, ignoreTypes, keys)
case reflect.Struct:
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
fieldTag := field.Tag.Get(tag)
var fieldTagKey string
ignoredType := false
for _, it := range ignoreTypes {
if field.Type.String() == it {
ignoredType = true
break
}
}
if fieldTag == "-" || ignoredType {
continue
}
if len(fieldTag) > 0 {
fieldTagKey = strings.Split(fieldTag, ",")[0]
} else {
fieldTagKey = strings.ToLower(field.Name)
}
(*keys)[fieldTagKey] = struct{}{}
collectTags(field.Type, tag, ignoreTypes, keys)
}
}
}
func validateCloudMetaData(body interface{}, jsonSchema *map[string]interface{}) (map[string]string, error) {
documentLoader := gojsonschema.NewGoLoader(body)
schemaLoader := gojsonschema.NewGoLoader(jsonSchema)
result, err := gojsonschema.Validate(schemaLoader, documentLoader)
if err != nil {
return nil, err
}
minEditingDistance := 5
validCloudProperties := cloudTags()
suggestionMap := map[string]string{}
for _, rsltErr := range result.Errors() {
invalidProperty := strings.Split(rsltErr.Description, " ")[2]
suggestionMap[invalidProperty] = ""
editingDistance := minEditingDistance
for _, validProperty := range validCloudProperties {
dist := distance(invalidProperty, validProperty)
if dist < editingDistance && dist < minEditingDistance {
editingDistance = dist
suggestionMap[invalidProperty] = validProperty
}
}
}
return suggestionMap, nil
}
func yamlToJSON(i interface{}) interface{} {
switch x := i.(type) {
case map[interface{}]interface{}:
m2 := map[string]interface{}{}
for k, v := range x {
m2[k.(string)] = yamlToJSON(v)
}
return m2
case []interface{}:
for i, v := range x {
x[i] = yamlToJSON(v)
}
}
return i
}
// The following "editing distance" comparator was lifted from
// https://github.com/arbovm/levenshtein/blob/master/levenshtein.go which has a
// compatible BSD license. We use it to calculate the distance between a
// discovered invalid yaml property and known good properties to identify
// suggestions.
func distance(str1, str2 string) int {
var cost, lastdiag, olddiag int
s1 := []rune(str1)
s2 := []rune(str2)
lenS1 := len(s1)
lenS2 := len(s2)
column := make([]int, lenS1+1)
for y := 1; y <= lenS1; y++ {
column[y] = y
}
for x := 1; x <= lenS2; x++ {
column[0] = x
lastdiag = x - 1
for y := 1; y <= lenS1; y++ {
olddiag = column[y]
cost = 0
if s1[y-1] != s2[x-1] {
cost = 1
}
column[y] = min(
column[y]+1,
column[y-1]+1,
lastdiag+cost)
lastdiag = olddiag
}
}
return column[lenS1]
}
func min(a, b, c int) int {
if a < b {
if a < c {
return a
}
} else {
if b < c {
return b
}
}
return c
}
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package cloud
import (
"fmt"
"sort"
"strings"
"github.com/juju/collections/set"
"github.com/juju/errors"
"github.com/juju/juju/internal/provider/lxd/lxdnames"
)
// WhiteList contains a cloud compatibility matrix:
// if controller was bootstrapped on a particular cloud type,
// what other cloud types can be added to it.
type WhiteList struct {
matrix map[string]set.Strings
}
// String constructs user friendly white list representation.
func (w *WhiteList) String() string {
if len(w.matrix) == 0 {
return "empty whitelist"
}
sorted := []string{}
for one := range w.matrix {
sorted = append(sorted, one)
}
sort.Strings(sorted)
result := []string{}
for _, one := range sorted {
result = append(result, fmt.Sprintf(" - controller cloud type %q supports %v", one, w.matrix[one].SortedValues()))
}
return strings.Join(result, "\n")
}
// Check will err out if 'existing' controller cloud type is
// not compatible with a 'new' cloud type according to this white list.
func (w *WhiteList) Check(existing, new string) error {
if list, ok := w.matrix[existing]; ok {
if !list.Contains(new) {
return errors.Errorf("cloud type %q is not whitelisted for controller cloud type %q, current whitelist: %v", new, existing, list.SortedValues())
}
return nil
}
return errors.Errorf("controller cloud type %q is not whitelisted, current whitelist: \n%v", existing, w)
}
// CurrentWhiteList returns current clouds whitelist supported by Juju.
func CurrentWhiteList() *WhiteList {
return &WhiteList{map[string]set.Strings{
"kubernetes": set.NewStrings(lxdnames.ProviderType, "maas", "openstack"),
lxdnames.ProviderType: set.NewStrings(lxdnames.ProviderType, "maas", "openstack"),
"maas": set.NewStrings("maas", "openstack"),
"openstack": set.NewStrings("openstack"),
}}
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package controller
import (
"fmt"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"github.com/go-macaroon-bakery/macaroon-bakery/v3/bakery"
"github.com/juju/collections/set"
"github.com/juju/errors"
"github.com/juju/names/v6"
"github.com/juju/utils/v4"
"gopkg.in/yaml.v2"
"github.com/juju/juju/core/objectstore"
"github.com/juju/juju/internal/configschema"
"github.com/juju/juju/internal/pki"
)
const (
// MongoProfLow represents the most conservative mongo memory profile.
MongoProfLow = "low"
// MongoProfDefault represents the mongo memory profile shipped by default.
MongoProfDefault = "default"
)
// docs:controller-config-keys
const (
// APIPort is the port used for api connections.
APIPort = "api-port"
// ControllerAPIPort is an optional port that may be set for controllers
// that have a very heavy load. If this port is set, this port is used by
// the controllers to talk to each other - used for the local API connection
// as well as the pubsub forwarders. If this value is set, the api-port
// isn't opened until the controllers have started properly.
ControllerAPIPort = "controller-api-port"
// ControllerName is the canonical name for the controller.
ControllerName = "controller-name"
// ApplicationResourceDownloadLimit limits the number of concurrent resource download
// requests from unit agents which will be served. The limit is per application.
// Use a value of 0 to disable the limit.
ApplicationResourceDownloadLimit = "application-resource-download-limit"
// ControllerResourceDownloadLimit limits the number of concurrent resource download
// requests from unit agents which will be served. The limit is for the combined total
// of all applications on the controller.
// Use a value of 0 to disable the limit.
ControllerResourceDownloadLimit = "controller-resource-download-limit"
// AgentRateLimitMax is the maximum size of the token bucket used to
// ratelimit the agent connections to the API server.
AgentRateLimitMax = "agent-ratelimit-max"
// AgentRateLimitRate is the interval at which a new token is added to
// the token bucket, in milliseconds (ms).
AgentRateLimitRate = "agent-ratelimit-rate"
// APIPortOpenDelay is a duration that the controller will wait
// between when the controller has been deemed to be ready to open
// the api-port and when the api-port is actually opened. This value
// is only used when a controller-api-port value is set.
APIPortOpenDelay = "api-port-open-delay"
// AuditingEnabled determines whether the controller will record
// auditing information.
AuditingEnabled = "auditing-enabled"
// AuditLogCaptureArgs determines whether the audit log will
// contain the arguments passed to API methods.
AuditLogCaptureArgs = "audit-log-capture-args"
// AuditLogMaxSize is the maximum size for the current audit log
// file, eg "250M".
AuditLogMaxSize = "audit-log-max-size"
// AuditLogMaxBackups is the number of old audit log files to keep
// (compressed).
AuditLogMaxBackups = "audit-log-max-backups"
// AuditLogExcludeMethods is a list of Facade.Method names that
// aren't interesting for audit logging purposes. A conversation
// with only calls to these will be excluded from the
// log. (They'll still appear in conversations that have other
// interesting calls though.)
AuditLogExcludeMethods = "audit-log-exclude-methods"
// ReadOnlyMethodsWildcard is the special value that can be added
// to the exclude-methods list that represents all of the read
// only methods (see apiserver/observer/auditfilter.go). This
// value will be stored in the DB (rather than being expanded at
// write time) so any changes to the set of read-only methods in
// new versions of Juju will be honoured.
ReadOnlyMethodsWildcard = "ReadOnlyMethods"
// StatePort is the port used for mongo connections.
StatePort = "state-port"
// CACertKey is the key for the controller's CA certificate attribute.
CACertKey = "ca-cert"
// ControllerUUIDKey is the key for the controller UUID attribute.
ControllerUUIDKey = "controller-uuid"
// LoginTokenRefreshURL sets the URL of the login JWT well-known endpoint.
// Use this when authentication/authorisation is done using a JWT in the
// login request rather than a username/password or macaroon and a local
// permissions model.
LoginTokenRefreshURL = "login-token-refresh-url"
// IdentityURL sets the URL of the identity manager.
// Use this when users should be managed externally rather than
// created locally on the controller.
IdentityURL = "identity-url"
// IdentityPublicKey sets the public key of the identity manager.
// Use this when users should be managed externally rather than
// created locally on the controller.
IdentityPublicKey = "identity-public-key"
// SetNUMAControlPolicyKey (true/false) is deprecated.
// Use to configure whether mongo is started with NUMA
// controller policy turned on.
SetNUMAControlPolicyKey = "set-numa-control-policy"
// AutocertDNSNameKey sets the DNS name of the controller. If a
// client connects to this name, an official certificate will be
// automatically requested. Connecting to any other host name
// will use the usual self-generated certificate.
AutocertDNSNameKey = "autocert-dns-name"
// AutocertURLKey sets the URL used to obtain official TLS
// certificates when a client connects to the API. By default,
// certficates are obtains from LetsEncrypt. A good value for
// testing is
// "https://acme-staging.api.letsencrypt.org/directory".
AutocertURLKey = "autocert-url"
// AllowModelAccessKey sets whether the controller will allow users to
// connect to models they have been authorized for, even when
// they don't have any access rights to the controller itself.
AllowModelAccessKey = "allow-model-access"
// MongoMemoryProfile sets the memory profile for MongoDB. Valid values are:
// - "low": use the least possible memory
// - "default": use the default memory profile
MongoMemoryProfile = "mongo-memory-profile"
// JujuDBSnapChannel selects the channel to use when installing Mongo
// snaps for focal or later. The value is ignored for older releases.
JujuDBSnapChannel = "juju-db-snap-channel"
// MaxDebugLogDuration is used to provide a backstop to the execution of a
// debug-log command. If someone starts a debug-log session in a remote
// screen for example, it is very easy to disconnect from the screen while
// leaving the debug-log process running. This causes unnecessary load on
// the API server. The max debug-log duration has a default of 24 hours,
// which should be more than enough time for a debugging session.
MaxDebugLogDuration = "max-debug-log-duration"
// AgentLogfileMaxSize is the maximum file size of each agent log file,
// in MB.
AgentLogfileMaxSize = "agent-logfile-max-size"
// AgentLogfileMaxBackups is the maximum number of old agent log files
// to keep (compressed; saved on each unit, synced to the controller).
AgentLogfileMaxBackups = "agent-logfile-max-backups"
// ModelLogfileMaxSize is the maximum size of the log file written out by the
// controller on behalf of workers running for a model.
ModelLogfileMaxSize = "model-logfile-max-size"
// ModelLogfileMaxBackups is the number of old model
// log files to keep (compressed).
ModelLogfileMaxBackups = "model-logfile-max-backups"
// MaxTxnLogSize is the maximum size the of capped txn log collection, eg "10M"
MaxTxnLogSize = "max-txn-log-size"
// MaxPruneTxnBatchSize (deprecated) is the maximum number of transactions
// we will evaluate in one go when pruning. Default is 1M transactions.
// A value <= 0 indicates to do all transactions at once.
MaxPruneTxnBatchSize = "max-prune-txn-batch-size"
// MaxPruneTxnPasses (deprecated) is the maximum number of batches that
// we will process. So total number of transactions that can be processed
// is MaxPruneTxnBatchSize * MaxPruneTxnPasses. A value <= 0 implies
// 'do a single pass'. If both MaxPruneTxnBatchSize and MaxPruneTxnPasses
// are 0, then the default value of 1M BatchSize and 100 passes
// will be used instead.
MaxPruneTxnPasses = "max-prune-txn-passes"
// PruneTxnQueryCount is the number of transactions to read in a single query.
// Minimum of 10, a value of 0 will indicate to use the default value (1000)
PruneTxnQueryCount = "prune-txn-query-count"
// PruneTxnSleepTime is the amount of time to sleep between processing each
// batch query. This is used to reduce load on the system, allowing other
// queries to time to operate. On large controllers, processing 1000 txs
// seems to take about 100ms, so a sleep time of 10ms represents a 10%
// slowdown, but allows other systems to operate concurrently.
// A negative number will indicate to use the default, a value of 0
// indicates to not sleep at all.
PruneTxnSleepTime = "prune-txn-sleep-time"
// MaxCharmStateSize is the maximum allowed size of charm-specific
// per-unit state data that charms can store to the controller in
// bytes. A value of 0 disables the quota checks although in
// principle, mongo imposes a hard (but configurable) limit of 16M.
MaxCharmStateSize = "max-charm-state-size"
// MaxAgentStateSize is the maximum allowed size of internal state
// data that agents can store to the controller in bytes. A value of 0
// disables the quota checks although in principle, mongo imposes a
// hard (but configurable) limit of 16M.
MaxAgentStateSize = "max-agent-state-size"
// MigrationMinionWaitMax is the maximum time that the migration-master
// worker will wait for agents to report for a migration phase when
// executing a model migration.
MigrationMinionWaitMax = "migration-agent-wait-time"
// JujuHASpace is the network space within which the MongoDB replica-set
// should communicate.
JujuHASpace = "juju-ha-space"
// JujuManagementSpace is the network space that agents should use to
// communicate with controllers.
JujuManagementSpace = "juju-mgmt-space"
// CAASOperatorImagePath sets the URL of the docker image
// used for the application operator.
// Deprecated: use CAASImageRepo
CAASOperatorImagePath = "caas-operator-image-path"
// CAASImageRepo sets the docker repo to use
// for the jujud operator and mongo images.
CAASImageRepo = "caas-image-repo"
// Features allows a list of runtime changeable features to be updated.
Features = "features"
// PublicDNSAddress is the public DNS address (and port) of the controller.
PublicDNSAddress = "public-dns-address"
// QueryTracingEnabled returns whether query tracing is enabled. If so, any
// queries which take longer than QueryTracingThreshold will be logged.
QueryTracingEnabled = "query-tracing-enabled"
// QueryTracingThreshold returns the "threshold" for query tracing. Any
// queries which take longer than this value will be logged (if query tracing
// is enabled). The lower the threshold, the more queries will be output. A
// value of 0 means all queries will be output.
QueryTracingThreshold = "query-tracing-threshold"
// OpenTelemetryEnabled returns whether open telemetry is enabled.
OpenTelemetryEnabled = "open-telemetry-enabled"
// OpenTelemetryEndpoint returns the endpoint at which the telemetry will
// be pushed to.
OpenTelemetryEndpoint = "open-telemetry-endpoint"
// OpenTelemetryInsecure returns if the telemetry collector endpoint is
// insecure or not. Useful for debug or local testing.
OpenTelemetryInsecure = "open-telemetry-insecure"
// OpenTelemetryStackTraces return whether stack traces should be added per
// span.
OpenTelemetryStackTraces = "open-telemetry-stack-traces"
// OpenTelemetrySampleRatio returns the sample ratio for open telemetry.
OpenTelemetrySampleRatio = "open-telemetry-sample-ratio"
// OpenTelemetryTailSamplingThreshold returns the tail sampling threshold
// for open telemetry as a duration.
OpenTelemetryTailSamplingThreshold = "open-telemetry-tail-sampling-threshold"
// ObjectStoreType is the type of object store to use for storing blobs.
// This isn't currently allowed to be changed dynamically, that will come
// when we support multiple object store types (not including state).
ObjectStoreType = "object-store-type"
// ObjectStoreS3Endpoint is the endpoint to use for S3 object stores.
ObjectStoreS3Endpoint = "object-store-s3-endpoint"
// ObjectStoreS3StaticKey is the static key to use for S3 object stores.
ObjectStoreS3StaticKey = "object-store-s3-static-key"
// ObjectStoreS3StaticSecret is the static secret to use for S3 object
// stores.
ObjectStoreS3StaticSecret = "object-store-s3-static-secret"
// ObjectStoreS3StaticSession is the static session token to use for S3
// object stores.
ObjectStoreS3StaticSession = "object-store-s3-static-session"
// SystemSSHKeys returns the set of ssh keys that should be trusted by
// agents of this controller regardless of the model.
SystemSSHKeys = "system-ssh-keys"
// JujudControllerSnapSource returns the source for the controller snap.
// Can be set to "legacy", "snapstore", "local" or "local-dangerous".
// Cannot be changed.
JujudControllerSnapSource = "jujud-controller-snap-source"
// SSHServerPort is the port used for the embedded SSH server.
SSHServerPort = "ssh-server-port"
// SSHMaxConcurrentConnections is the maximum number of concurrent SSH
// connections to the controller.
SSHMaxConcurrentConnections = "ssh-max-concurrent-connections"
)
// Attribute Defaults
const (
// DefaultSSHMaxConcurrentConnections is the default maximum number of
// concurrent SSH connections to the controller.
DefaultSSHMaxConcurrentConnections = 100
// DefaultSSHServerPort is the default port used for the embedded SSH server.
DefaultSSHServerPort = 17022
// DefaultApplicationResourceDownloadLimit allows unlimited
// resource download requests initiated by a unit agent per application.
DefaultApplicationResourceDownloadLimit = 0
// DefaultControllerResourceDownloadLimit allows unlimited concurrent resource
// download requests initiated by unit agents for any application on the controller.
DefaultControllerResourceDownloadLimit = 0
// DefaultAgentRateLimitMax allows the first 10 agents to connect without
// any issue. After that the rate limiting kicks in.
DefaultAgentRateLimitMax = 10
// DefaultAgentRateLimitRate will allow four agents to connect every
// second. A token is added to the ratelimit token bucket every 250ms.
DefaultAgentRateLimitRate = 250 * time.Millisecond
// DefaultAuditingEnabled contains the default value for the
// AuditingEnabled config value.
DefaultAuditingEnabled = true
// DefaultAuditLogCaptureArgs is the default for the
// AuditLogCaptureArgs setting (which is not to capture them).
DefaultAuditLogCaptureArgs = false
// DefaultAuditLogMaxSizeMB is the default size in MB at which we
// roll the audit log file.
DefaultAuditLogMaxSizeMB = 300
// DefaultAuditLogMaxBackups is the default number of files to
// keep.
DefaultAuditLogMaxBackups = 10
// DefaultNUMAControlPolicy should not be used by default.
// Only use numactl if user specifically requests it
DefaultNUMAControlPolicy = false
// DefaultStatePort is the default port the controller is listening on.
DefaultStatePort int = 37017
// DefaultAPIPort is the default port the API server is listening on.
DefaultAPIPort int = 17070
// DefaultAPIPortOpenDelay is the default value for api-port-open-delay.
DefaultAPIPortOpenDelay = 2 * time.Second
// DefaultMongoMemoryProfile is the default profile used by mongo.
DefaultMongoMemoryProfile = MongoProfDefault
// DefaultJujuDBSnapChannel is the default snap channel for installing
// mongo in focal or later.
DefaultJujuDBSnapChannel = "4.4/stable"
// DefaultMaxDebugLogDuration is the default duration that debug-log
// commands can run before being terminated by the API server.
DefaultMaxDebugLogDuration = 24 * time.Hour
// DefaultMaxTxnLogCollectionMB is the maximum size the txn log collection.
DefaultMaxTxnLogCollectionMB = 10 // 10 MB
// DefaultMaxPruneTxnBatchSize is the normal number of transaction
// we will prune in a given pass (1M) (deprecated)
DefaultMaxPruneTxnBatchSize = 1 * 1000 * 1000
// DefaultMaxPruneTxnPasses is the default number of
// batches we will process. (deprecated)
DefaultMaxPruneTxnPasses = 100
// DefaultAgentLogfileMaxSize is the maximum file size in MB of each
// agent/controller log file.
DefaultAgentLogfileMaxSize = 100
// DefaultAgentLogfileMaxBackups is the number of old agent/controller log
// files to keep (compressed).
DefaultAgentLogfileMaxBackups = 2
// DefaultModelLogfileMaxSize is the maximum file size in MB of
// the log file written out by the controller on behalf of workers
// running for a model.
DefaultModelLogfileMaxSize = 10
// DefaultModelLogfileMaxBackups is the number of old model
// log files to keep (compressed).
DefaultModelLogfileMaxBackups = 2
// DefaultPruneTxnQueryCount is the number of transactions
// to read in a single query.
DefaultPruneTxnQueryCount = 1000
// DefaultPruneTxnSleepTime is the amount of time to sleep between
// processing each batch query. This is used to reduce load on the system,
// allowing other queries to time to operate. On large controllers,
// processing 1000 txs seems to take about 100ms, so a sleep time of 10ms
// represents a 10% slowdown, but allows other systems to
// operate concurrently.
DefaultPruneTxnSleepTime = 10 * time.Millisecond
// DefaultMaxCharmStateSize is the maximum size (in bytes) of charm
// state data that each unit can store to the controller.
DefaultMaxCharmStateSize = 2 * 1024 * 1024
// DefaultMaxAgentStateSize is the maximum size (in bytes) of internal
// state data that agents can store to the controller.
DefaultMaxAgentStateSize = 512 * 1024
// DefaultMigrationMinionWaitMax is the default value for how long a
// migration minion will wait for the migration to complete.
DefaultMigrationMinionWaitMax = 15 * time.Minute
// DefaultQueryTracingEnabled is the default value for if query tracing
// is enabled.
DefaultQueryTracingEnabled = false
// DefaultQueryTracingThreshold is the default value for the threshold
// for query tracing. If a query takes longer than this to complete
// it will be logged if query tracing is enabled.
DefaultQueryTracingThreshold = time.Second
// DefaultAuditLogExcludeMethods is the default list of methods to
// exclude from the audit log.
// This special value means we exclude any methods in the set
// listed in apiserver/observer/auditfilter.go
DefaultAuditLogExcludeMethods = ReadOnlyMethodsWildcard
// DefaultOpenTelemetryEnabled is the default value for if the open
// telemetry tracing is enabled or not.
DefaultOpenTelemetryEnabled = false
// DefaultOpenTelemetryInsecure is the default value for it the open
// telemetry tracing endpoint is insecure or not.
DefaultOpenTelemetryInsecure = false
// DefaultOpenTelemetryStackTraces is the default value for it the open
// telemetry tracing has stack traces or not.
DefaultOpenTelemetryStackTraces = false
// DefaultOpenTelemetrySampleRatio is the default value for the sample
// ratio for open telemetry.
// By default we only want to trace 10% of the requests.
DefaultOpenTelemetrySampleRatio = 0.1
// DefaultOpenTelemetryTailSamplingThreshold is the default value for the
// tail sampling threshold for open telemetry.
DefaultOpenTelemetryTailSamplingThreshold = 1 * time.Millisecond
// JujudControllerSnapSource is the default value for the jujud controller
// snap source, which is the snapstore.
// TODO(jujud-controller-snap): change this to "snapstore" once it is implemented.
DefaultJujudControllerSnapSource = "legacy"
// DefaultObjectStoreType is the default type of object store to use for
// storing blobs.
DefaultObjectStoreType = objectstore.FileBackend
)
var (
// ControllerOnlyConfigAttributes lists all the controller config keys, so we
// can distinguish these from model config keys when bootstrapping.
ControllerOnlyConfigAttributes = []string{
AllowModelAccessKey,
AgentRateLimitMax,
AgentRateLimitRate,
APIPort,
APIPortOpenDelay,
AutocertDNSNameKey,
AutocertURLKey,
CACertKey,
ControllerAPIPort,
ControllerName,
ControllerUUIDKey,
LoginTokenRefreshURL,
IdentityPublicKey,
IdentityURL,
SetNUMAControlPolicyKey,
StatePort,
MongoMemoryProfile,
JujuDBSnapChannel,
MaxDebugLogDuration,
MaxTxnLogSize,
MaxPruneTxnBatchSize,
MaxPruneTxnPasses,
AgentLogfileMaxBackups,
AgentLogfileMaxSize,
ModelLogfileMaxBackups,
ModelLogfileMaxSize,
PruneTxnQueryCount,
PruneTxnSleepTime,
PublicDNSAddress,
JujuHASpace,
JujuManagementSpace,
AuditingEnabled,
AuditLogCaptureArgs,
AuditLogMaxSize,
AuditLogMaxBackups,
AuditLogExcludeMethods,
CAASOperatorImagePath,
CAASImageRepo,
Features,
MaxCharmStateSize,
MaxAgentStateSize,
MigrationMinionWaitMax,
ApplicationResourceDownloadLimit,
ControllerResourceDownloadLimit,
QueryTracingEnabled,
QueryTracingThreshold,
OpenTelemetryEnabled,
OpenTelemetryEndpoint,
OpenTelemetryInsecure,
OpenTelemetryStackTraces,
OpenTelemetrySampleRatio,
OpenTelemetryTailSamplingThreshold,
ObjectStoreType,
ObjectStoreS3Endpoint,
ObjectStoreS3StaticKey,
ObjectStoreS3StaticSecret,
ObjectStoreS3StaticSession,
SystemSSHKeys,
JujudControllerSnapSource,
SSHMaxConcurrentConnections,
SSHServerPort,
}
// For backwards compatibility, we must include "anything", "juju-apiserver"
// and "juju-mongodb" as hostnames as that is what clients specify
// as the hostname for verification (this certificate is used both
// for serving MongoDB and API server connections). We also
// explicitly include localhost.
DefaultDNSNames = []string{
"localhost",
"juju-apiserver",
"juju-mongodb",
"anything",
}
// AllowedUpdateConfigAttributes contains all of the controller
// config attributes that are allowed to be updated after the
// controller has been created.
AllowedUpdateConfigAttributes = set.NewStrings(
AgentLogfileMaxBackups,
AgentLogfileMaxSize,
AgentRateLimitMax,
AgentRateLimitRate,
APIPortOpenDelay,
ApplicationResourceDownloadLimit,
AuditingEnabled,
AuditLogCaptureArgs,
AuditLogExcludeMethods,
AuditLogMaxBackups,
AuditLogMaxSize,
CAASImageRepo,
ControllerResourceDownloadLimit,
Features,
JujuHASpace,
JujuManagementSpace,
MaxAgentStateSize,
MaxCharmStateSize,
MaxDebugLogDuration,
MaxPruneTxnBatchSize,
MaxPruneTxnPasses,
MigrationMinionWaitMax,
ModelLogfileMaxBackups,
ModelLogfileMaxSize,
MongoMemoryProfile,
OpenTelemetryEnabled,
OpenTelemetryEndpoint,
OpenTelemetryInsecure,
OpenTelemetryStackTraces,
OpenTelemetrySampleRatio,
OpenTelemetryTailSamplingThreshold,
PruneTxnQueryCount,
PruneTxnSleepTime,
PublicDNSAddress,
QueryTracingEnabled,
QueryTracingThreshold,
ObjectStoreType,
ObjectStoreS3Endpoint,
ObjectStoreS3StaticKey,
ObjectStoreS3StaticSecret,
ObjectStoreS3StaticSession,
SSHMaxConcurrentConnections,
SSHServerPort,
)
methodNameRE = regexp.MustCompile(`[[:alpha:]][[:alnum:]]*\.[[:alpha:]][[:alnum:]]*`)
)
// ControllerOnlyAttribute returns true if the specified attribute name
// is a controller config key (as opposed to, say, a model config key).
func ControllerOnlyAttribute(attr string) bool {
for _, a := range ControllerOnlyConfigAttributes {
if attr == a {
return true
}
}
return false
}
// Config is a string-keyed map of controller configuration attributes.
type Config map[string]interface{}
// Validate validates the controller configuration.
func (c Config) Validate() error {
return Validate(c)
}
// NewConfig creates a new Config from the supplied attributes.
// Default values will be used where defaults are available.
//
// The controller UUID and CA certificate must be passed in.
// The UUID is typically generated by the immediate caller,
// and the CA certificate generated by environs/bootstrap.NewConfig.
func NewConfig(controllerUUID, caCert string, attrs map[string]interface{}) (Config, error) {
// TODO(wallyworld) - use core/config when it supports duration types
for k, v := range attrs {
field, ok := ConfigSchema[k]
if !ok || field.Type != configschema.Tlist {
continue
}
str, ok := v.(string)
if !ok {
continue
}
var coerced interface{}
err := yaml.Unmarshal([]byte(str), &coerced)
if err != nil {
return Config{}, errors.NewNotValid(err, fmt.Sprintf("value %q for attribute %q not valid", str, k))
}
attrs[k] = coerced
}
coerced, err := configChecker.Coerce(attrs, nil)
if err != nil {
return Config{}, errors.Trace(err)
}
attrs = coerced.(map[string]interface{})
attrs[ControllerUUIDKey] = controllerUUID
attrs[CACertKey] = caCert
config := Config(attrs)
return config, config.Validate()
}
// mustInt returns the named attribute as an integer, panicking if
// it is not found or is zero. Zero values should have been
// diagnosed at Validate time.
func (c Config) mustInt(name string) int {
// Values obtained over the api are encoded as float64.
if value, ok := c[name].(float64); ok {
return int(value)
}
value, _ := c[name].(int)
if value == 0 {
panic(errors.Errorf("empty value for %q found in configuration", name))
}
return value
}
func (c Config) intOrDefault(name string, defaultVal int) int {
if _, ok := c[name]; ok {
return c.mustInt(name)
}
return defaultVal
}
func (c Config) boolOrDefault(name string, defaultVal bool) bool {
if value, ok := c[name]; ok {
// Value has already been validated.
return value.(bool)
}
return defaultVal
}
func (c Config) sizeMBOrDefault(name string, defaultVal int) int {
size := c.asString(name)
if size != "" {
// Value has already been validated.
value, _ := utils.ParseSize(size)
return int(value)
}
return defaultVal
}
// asString is a private helper method to keep the ugly string casting
// in once place. It returns the given named attribute as a string,
// returning "" if it isn't found.
func (c Config) asString(name string) string {
value, _ := c[name].(string)
return value
}
// mustString returns the named attribute as an string, panicking if
// it is not found or is empty.
func (c Config) mustString(name string) string {
value, _ := c[name].(string)
if value == "" {
panic(errors.Errorf("empty value for %q found in configuration (type %T, val %v)", name, c[name], c[name]))
}
return value
}
func (c Config) durationOrDefault(name string, defaultVal time.Duration) time.Duration {
switch v := c[name].(type) {
case string:
if v != "" {
// Value has already been validated.
value, _ := time.ParseDuration(v)
return value
}
case time.Duration:
return v
default:
// nil type shows up here
}
return defaultVal
}
// StatePort returns the mongo server port for the environment.
func (c Config) StatePort() int {
return c.mustInt(StatePort)
}
// APIPort returns the API server port for the environment.
func (c Config) APIPort() int {
return c.mustInt(APIPort)
}
// APIPortOpenDelay returns the duration to wait before opening
// the APIPort once the controller has started up. Only used when
// the ControllerAPIPort is non-zero.
func (c Config) APIPortOpenDelay() time.Duration {
return c.durationOrDefault(APIPortOpenDelay, DefaultAPIPortOpenDelay)
}
// ControllerAPIPort returns the optional API port to be used for
// the controllers to talk to each other. A zero value means that
// it is not set.
func (c Config) ControllerAPIPort() int {
if value, ok := c[ControllerAPIPort].(float64); ok {
return int(value)
}
// If the value isn't an int, this conversion will fail and value
// will be 0, which is what we want here.
value, _ := c[ControllerAPIPort].(int)
return value
}
// ApplicationResourceDownloadLimit limits the number of concurrent resource download
// requests from unit agents which will be served. The limit is per application.
func (c Config) ApplicationResourceDownloadLimit() int {
switch v := c[ApplicationResourceDownloadLimit].(type) {
case float64:
return int(v)
case int:
return v
default:
// nil type shows up here
}
return DefaultApplicationResourceDownloadLimit
}
// ControllerResourceDownloadLimit limits the number of concurrent resource download
// requests from unit agents which will be served. The limit is for the combined total
// of all applications on the controller.
func (c Config) ControllerResourceDownloadLimit() int {
switch v := c[ControllerResourceDownloadLimit].(type) {
case float64:
return int(v)
case int:
return v
default:
// nil type shows up here
}
return DefaultControllerResourceDownloadLimit
}
// AgentRateLimitMax is the initial size of the token bucket that is used to
// rate limit agent connections.
func (c Config) AgentRateLimitMax() int {
switch v := c[AgentRateLimitMax].(type) {
case float64:
return int(v)
case int:
return v
default:
// nil type shows up here
}
return DefaultAgentRateLimitMax
}
// AgentRateLimitRate is the time taken to add a token into the token bucket
// that is used to rate limit agent connections.
func (c Config) AgentRateLimitRate() time.Duration {
return c.durationOrDefault(AgentRateLimitRate, DefaultAgentRateLimitRate)
}
// AuditingEnabled returns whether or not auditing has been enabled
// for the environment. The default is false.
func (c Config) AuditingEnabled() bool {
if v, ok := c[AuditingEnabled]; ok {
return v.(bool)
}
return DefaultAuditingEnabled
}
// AuditLogCaptureArgs returns whether audit logging should capture
// the arguments to API methods. The default is false.
func (c Config) AuditLogCaptureArgs() bool {
if v, ok := c[AuditLogCaptureArgs]; ok {
return v.(bool)
}
return DefaultAuditLogCaptureArgs
}
// AuditLogMaxSizeMB returns the maximum size for an audit log file in
// MB.
func (c Config) AuditLogMaxSizeMB() int {
return c.sizeMBOrDefault(AuditLogMaxSize, DefaultAuditLogMaxSizeMB)
}
// AuditLogMaxBackups returns the maximum number of backup audit log
// files to keep.
func (c Config) AuditLogMaxBackups() int {
return c.intOrDefault(AuditLogMaxBackups, DefaultAuditLogMaxBackups)
}
// AuditLogExcludeMethods returns the set of method names that are
// considered uninteresting for audit logging. Conversations
// containing only these will be excluded from the audit log.
func (c Config) AuditLogExcludeMethods() set.Strings {
v := c.asString(AuditLogExcludeMethods)
if v == "" {
return set.NewStrings()
}
return set.NewStrings(strings.Split(v, ",")...)
}
// Features returns the controller config set features flags.
func (c Config) Features() set.Strings {
v := c.asString(Features)
if v == "" {
return set.NewStrings()
}
return set.NewStrings(strings.Split(v, ",")...)
}
// ControllerName returns the name for the controller
func (c Config) ControllerName() string {
return c.asString(ControllerName)
}
// ControllerUUID returns the uuid for the controller.
func (c Config) ControllerUUID() string {
return c.mustString(ControllerUUIDKey)
}
// CACert returns the certificate of the CA that signed the controller
// certificate, in PEM format, and whether the setting is available.
//
// TODO(axw) once the controller config is completely constructed,
// there will always be a CA certificate. Get rid of the bool result.
func (c Config) CACert() (string, bool) {
if s, ok := c[CACertKey]; ok {
return s.(string), true
}
return "", false
}
// IdentityURL returns the URL of the identity manager.
func (c Config) IdentityURL() string {
return c.asString(IdentityURL)
}
// AutocertURL returns the URL used to obtain official TLS certificates
// when a client connects to the API. See AutocertURLKey
// for more details.
func (c Config) AutocertURL() string {
return c.asString(AutocertURLKey)
}
// AutocertDNSName returns the DNS name of the controller.
// See AutocertDNSNameKey for more details.
func (c Config) AutocertDNSName() string {
return c.asString(AutocertDNSNameKey)
}
// IdentityPublicKey returns the public key of the identity manager.
func (c Config) IdentityPublicKey() *bakery.PublicKey {
key := c.asString(IdentityPublicKey)
if key == "" {
return nil
}
var pubKey bakery.PublicKey
err := pubKey.UnmarshalText([]byte(key))
if err != nil {
// We check if the key string can be unmarshalled into a PublicKey in the
// Validate function, so we really do not expect this to fail.
panic(err)
}
return &pubKey
}
// LoginTokenRefreshURL returns the URL of the login jwt well known endpoint.
func (c Config) LoginTokenRefreshURL() string {
return c.asString(LoginTokenRefreshURL)
}
// MongoMemoryProfile returns the selected profile or low.
func (c Config) MongoMemoryProfile() string {
if profile, ok := c[MongoMemoryProfile]; ok {
return profile.(string)
}
return DefaultMongoMemoryProfile
}
// JujuDBSnapChannel returns the channel for installing mongo snaps.
func (c Config) JujuDBSnapChannel() string {
return c.asString(JujuDBSnapChannel)
}
// JujudControllerSnapSource returns the source of the jujud-controller snap.
func (c Config) JujudControllerSnapSource() string {
if src, ok := c[JujudControllerSnapSource]; ok {
return src.(string)
}
return DefaultJujudControllerSnapSource
}
// NUMACtlPreference returns if numactl is preferred.
func (c Config) NUMACtlPreference() bool {
if numa, ok := c[SetNUMAControlPolicyKey]; ok {
return numa.(bool)
}
return DefaultNUMAControlPolicy
}
// AllowModelAccess reports whether users are allowed to access models
// they have been granted permission for even when they can't access
// the controller.
func (c Config) AllowModelAccess() bool {
value, _ := c[AllowModelAccessKey].(bool)
return value
}
// AgentLogfileMaxSizeMB is the maximum file size in MB of each
// agent/controller log file.
func (c Config) AgentLogfileMaxSizeMB() int {
return c.sizeMBOrDefault(AgentLogfileMaxSize, DefaultAgentLogfileMaxSize)
}
// AgentLogfileMaxBackups is the number of old agent/controller log files to
// keep (compressed).
func (c Config) AgentLogfileMaxBackups() int {
return c.intOrDefault(AgentLogfileMaxBackups, DefaultAgentLogfileMaxBackups)
}
// ModelLogfileMaxBackups is the number of old model log files to keep (compressed).
func (c Config) ModelLogfileMaxBackups() int {
return c.intOrDefault(ModelLogfileMaxBackups, DefaultModelLogfileMaxBackups)
}
// ModelLogfileMaxSizeMB is the maximum size of the log file written out by the
// controller on behalf of workers running for a model.
func (c Config) ModelLogfileMaxSizeMB() int {
return c.sizeMBOrDefault(ModelLogfileMaxSize, DefaultModelLogfileMaxSize)
}
// MaxDebugLogDuration is the maximum time a debug-log session is allowed
// to run before it is terminated by the server.
func (c Config) MaxDebugLogDuration() time.Duration {
return c.durationOrDefault(MaxDebugLogDuration, DefaultMaxDebugLogDuration)
}
// MaxTxnLogSizeMB is the maximum size in MiB of the txn log collection.
func (c Config) MaxTxnLogSizeMB() int {
return c.sizeMBOrDefault(MaxTxnLogSize, DefaultMaxTxnLogCollectionMB)
}
// MaxPruneTxnBatchSize is the maximum size of the txn log collection.
func (c Config) MaxPruneTxnBatchSize() int {
return c.intOrDefault(MaxPruneTxnBatchSize, DefaultMaxPruneTxnBatchSize)
}
// MaxPruneTxnPasses is the maximum number of batches of the txn log collection we will process at a time.
func (c Config) MaxPruneTxnPasses() int {
return c.intOrDefault(MaxPruneTxnPasses, DefaultMaxPruneTxnPasses)
}
// PruneTxnQueryCount is the size of small batches for pruning
func (c Config) PruneTxnQueryCount() int {
return c.intOrDefault(PruneTxnQueryCount, DefaultPruneTxnQueryCount)
}
// PruneTxnSleepTime is the amount of time to sleep between batches.
func (c Config) PruneTxnSleepTime() time.Duration {
return c.durationOrDefault(PruneTxnSleepTime, DefaultPruneTxnSleepTime)
}
// PublicDNSAddress returns the DNS name of the controller.
func (c Config) PublicDNSAddress() string {
return c.asString(PublicDNSAddress)
}
// JujuHASpace is the network space within which the MongoDB replica-set
// should communicate.
func (c Config) JujuHASpace() string {
return c.asString(JujuHASpace)
}
// SystemSSHKeys returns the trusted ssh keys that agents of this controller
// should trust.
func (c Config) SystemSSHKeys() string {
return c.asString(SystemSSHKeys)
}
// JujuManagementSpace is the network space that agents should use to
// communicate with controllers.
func (c Config) JujuManagementSpace() string {
return c.asString(JujuManagementSpace)
}
// CAASOperatorImagePath sets the URL of the docker image
// used for the application operator.
// Deprecated: use CAASImageRepo
func (c Config) CAASOperatorImagePath() string {
return c.asString(CAASOperatorImagePath)
}
// CAASImageRepo sets the URL of the docker repo
// used for the jujud operator and mongo images.
func (c Config) CAASImageRepo() string {
return c.asString(CAASImageRepo)
}
// MaxCharmStateSize returns the max size (in bytes) of charm-specific state
// that each unit can store to the controller. A value of zero indicates no
// limit.
func (c Config) MaxCharmStateSize() int {
return c.intOrDefault(MaxCharmStateSize, DefaultMaxCharmStateSize)
}
// MaxAgentStateSize returns the max size (in bytes) of state data that agents
// can store to the controller. A value of zero indicates no limit.
func (c Config) MaxAgentStateSize() int {
return c.intOrDefault(MaxAgentStateSize, DefaultMaxAgentStateSize)
}
// MigrationMinionWaitMax returns a duration for the maximum time that the
// migration-master worker should wait for migration-minion reports during
// phases of a model migration.
func (c Config) MigrationMinionWaitMax() time.Duration {
return c.durationOrDefault(MigrationMinionWaitMax, DefaultMigrationMinionWaitMax)
}
// QueryTracingEnabled returns whether query tracing is enabled.
func (c Config) QueryTracingEnabled() bool {
return c.boolOrDefault(QueryTracingEnabled, DefaultQueryTracingEnabled)
}
// QueryTracingThreshold returns the threshold for query tracing. The
// lower the threshold, the more queries will be output. A value of 0
// means all queries will be output.
func (c Config) QueryTracingThreshold() time.Duration {
return c.durationOrDefault(QueryTracingThreshold, DefaultQueryTracingThreshold)
}
// OpenTelemetryEnabled returns whether open telemetry tracing is enabled.
func (c Config) OpenTelemetryEnabled() bool {
return c.boolOrDefault(OpenTelemetryEnabled, DefaultOpenTelemetryEnabled)
}
// OpenTelemetryEndpoint returns the open telemetry endpoint.
func (c Config) OpenTelemetryEndpoint() string {
return c.asString(OpenTelemetryEndpoint)
}
// OpenTelemetryInsecure returns whether open telemetry tracing endpoint is
// insecure or not.
func (c Config) OpenTelemetryInsecure() bool {
return c.boolOrDefault(OpenTelemetryInsecure, DefaultOpenTelemetryInsecure)
}
// OpenTelemetryStackTraces returns whether open telemetry tracing spans
// requires to have stack traces.
func (c Config) OpenTelemetryStackTraces() bool {
return c.boolOrDefault(OpenTelemetryStackTraces, DefaultOpenTelemetryStackTraces)
}
// OpenTelemetrySampleRatio returns whether open telemetry tracing spans
// requires to have stack traces.
func (c Config) OpenTelemetrySampleRatio() float64 {
f, err := parseRatio(c, OpenTelemetrySampleRatio)
if err == nil {
return f
}
return DefaultOpenTelemetrySampleRatio
}
// OpenTelemetryTailSamplingThreshold returns the tail sampling threshold
// for open telemetry tracing spans.
func (c Config) OpenTelemetryTailSamplingThreshold() time.Duration {
return c.durationOrDefault(OpenTelemetryTailSamplingThreshold, DefaultOpenTelemetryTailSamplingThreshold)
}
// ObjectStoreType returns the type of object store to use for storing blobs.
func (c Config) ObjectStoreType() objectstore.BackendType {
return objectstore.BackendType(c.asString(ObjectStoreType))
}
// ObjectStoreS3Endpoint returns the endpoint to use for S3 object stores.
func (c Config) ObjectStoreS3Endpoint() string {
return c.asString(ObjectStoreS3Endpoint)
}
// ObjectStoreS3StaticKey returns the static key to use for S3 object stores.
func (c Config) ObjectStoreS3StaticKey() string {
return c.asString(ObjectStoreS3StaticKey)
}
// ObjectStoreS3StaticSecret returns the static secret to use for S3 object
// stores.
func (c Config) ObjectStoreS3StaticSecret() string {
return c.asString(ObjectStoreS3StaticSecret)
}
// ObjectStoreS3StaticSession returns the static session token to use for S3
// object stores.
func (c Config) ObjectStoreS3StaticSession() string {
return c.asString(ObjectStoreS3StaticSession)
}
// SSHServerPort returns the port the SSH server listens on.
func (c Config) SSHServerPort() int {
return c.intOrDefault(SSHServerPort, DefaultSSHServerPort)
}
// SSHMaxConcurrentConnections returns the maximum number of concurrent
// SSH connections that the controller will allow.
func (c Config) SSHMaxConcurrentConnections() int {
return c.intOrDefault(SSHMaxConcurrentConnections, DefaultSSHMaxConcurrentConnections)
}
// Validate ensures that config is a valid configuration.
func Validate(c Config) error {
if v, ok := c[IdentityPublicKey].(string); ok {
var key bakery.PublicKey
if err := key.UnmarshalText([]byte(v)); err != nil {
return errors.Annotate(err, "invalid identity public key")
}
}
if v, ok := c[IdentityURL].(string); ok {
u, err := url.Parse(v)
if err != nil {
return errors.Annotate(err, "invalid identity URL")
}
// If we've got an identity public key, we allow an HTTP
// scheme for the identity server because we won't need
// to rely on insecure transport to obtain the public
// key.
if _, ok := c[IdentityPublicKey]; !ok && u.Scheme != "https" {
return errors.Errorf("URL needs to be https when %s not provided", IdentityPublicKey)
}
}
if v, ok := c[LoginTokenRefreshURL].(string); ok {
u, err := url.Parse(v)
if err != nil {
return errors.Annotate(err, "invalid login token refresh URL")
}
if u.Scheme == "" || u.Host == "" {
return errors.NotValidf("logic token refresh URL %q", v)
}
}
caCert, caCertOK := c.CACert()
if !caCertOK {
return errors.Errorf("missing CA certificate")
}
if ok, err := pki.IsPemCA([]byte(caCert)); err != nil {
return errors.Annotate(err, "bad CA certificate in configuration")
} else if !ok {
return errors.New("ca certificate in configuration is not a CA")
}
if uuid, ok := c[ControllerUUIDKey].(string); ok && !utils.IsValidUUIDString(uuid) {
return errors.Errorf("controller-uuid: expected UUID, got string(%q)", uuid)
}
if v, ok := c[ApplicationResourceDownloadLimit].(int); ok {
if v < 0 {
return errors.Errorf("negative %s (%d) not valid, use 0 to disable the limit", ApplicationResourceDownloadLimit, v)
}
}
if v, ok := c[ControllerResourceDownloadLimit].(int); ok {
if v < 0 {
return errors.Errorf("negative %s (%d) not valid, use 0 to disable the limit", ControllerResourceDownloadLimit, v)
}
}
if v, ok := c[AgentRateLimitMax].(int); ok {
if v < 0 {
return errors.NotValidf("negative %s (%d)", AgentRateLimitMax, v)
}
}
if v, err := parseDuration(c, AgentRateLimitRate); err != nil && !errors.Is(err, errors.NotFound) {
return errors.Trace(err)
} else if err == nil {
if v == 0 {
return errors.Errorf("%s cannot be zero", AgentRateLimitRate)
}
if v < 0 {
return errors.Errorf("%s cannot be negative", AgentRateLimitRate)
}
if v > time.Minute {
return errors.Errorf("%s must be between 0..1m", AgentRateLimitRate)
}
}
if mgoMemProfile, ok := c[MongoMemoryProfile].(string); ok {
if mgoMemProfile != MongoProfLow && mgoMemProfile != MongoProfDefault {
return errors.Errorf("mongo-memory-profile: expected one of %q or %q got string(%q)", MongoProfLow, MongoProfDefault, mgoMemProfile)
}
}
if v, err := parseDuration(c, MaxDebugLogDuration); err != nil && !errors.Is(err, errors.NotFound) {
return errors.Trace(err)
} else if err == nil {
if v == 0 {
return errors.Errorf("%s cannot be zero", MaxDebugLogDuration)
}
}
if v, ok := c[AgentLogfileMaxBackups].(int); ok {
if v < 0 {
return errors.NotValidf("negative %s", AgentLogfileMaxBackups)
}
}
if v, ok := c[AgentLogfileMaxSize].(string); ok {
mb, err := utils.ParseSize(v)
if err != nil {
return errors.Annotatef(err, "invalid %s in configuration", AgentLogfileMaxSize)
}
if mb < 1 {
return errors.NotValidf("%s less than 1 MB", AgentLogfileMaxSize)
}
}
if v, ok := c[ModelLogfileMaxBackups].(int); ok {
if v < 0 {
return errors.NotValidf("negative %s", ModelLogfileMaxBackups)
}
}
if v, ok := c[ModelLogfileMaxSize].(string); ok {
mb, err := utils.ParseSize(v)
if err != nil {
return errors.Annotatef(err, "invalid %s in configuration", ModelLogfileMaxSize)
}
if mb < 1 {
return errors.NotValidf("%s less than 1 MB", ModelLogfileMaxSize)
}
}
if v, ok := c[MaxTxnLogSize].(string); ok {
if _, err := utils.ParseSize(v); err != nil {
return errors.Annotate(err, "invalid max txn log size in configuration")
}
}
if v, ok := c[PruneTxnSleepTime].(string); ok {
if _, err := time.ParseDuration(v); err != nil {
return errors.Annotatef(err, `%s must be a valid duration (eg "10ms")`, PruneTxnSleepTime)
}
}
if err := c.validateSpaceConfig(JujuHASpace, "juju HA"); err != nil {
return errors.Trace(err)
}
if err := c.validateSpaceConfig(JujuManagementSpace, "juju mgmt"); err != nil {
return errors.Trace(err)
}
var auditLogMaxSize int
if v, ok := c[AuditLogMaxSize].(string); ok {
if size, err := utils.ParseSize(v); err != nil {
return errors.Annotate(err, "invalid audit log max size in configuration")
} else {
auditLogMaxSize = int(size)
}
}
if v, ok := c[AuditingEnabled].(bool); ok {
if v && auditLogMaxSize == 0 {
return errors.Errorf("invalid audit log max size: can't be 0 if auditing is enabled")
}
}
if v, ok := c[AuditLogMaxBackups].(int); ok {
if v < 0 {
return errors.Errorf("invalid audit log max backups: should be a number of files (or 0 to keep all), got %d", v)
}
}
if v, ok := c[AuditLogExcludeMethods].(string); ok {
if v != "" {
for i, name := range strings.Split(v, ",") {
if name != ReadOnlyMethodsWildcard && !methodNameRE.MatchString(name) {
return errors.Errorf(
`invalid audit log exclude methods: should be a list of "Facade.Method" names (or "ReadOnlyMethods"), got %q at position %d`,
name,
i+1,
)
}
}
}
}
if v, ok := c[ControllerAPIPort].(int); ok {
// TODO: change the validation so 0 is invalid and --reset is used.
// However that doesn't exist yet.
if v < 0 {
return errors.NotValidf("non-positive integer for controller-api-port")
}
if v == c.APIPort() {
return errors.NotValidf("controller-api-port matching api-port")
}
if v == c.StatePort() {
return errors.NotValidf("controller-api-port matching state-port")
}
}
if v, ok := c[ControllerName].(string); ok {
if !names.IsValidControllerName(v) {
return errors.Errorf("%s value must be a valid controller name (lowercase or digit with non-leading hyphen), got %q", ControllerName, v)
}
}
if v, ok := c[APIPortOpenDelay].(string); ok {
_, err := time.ParseDuration(v)
if err != nil {
return errors.Errorf("%s value %q must be a valid duration", APIPortOpenDelay, v)
}
}
// Each unit stores the charm and uniter state in a single document.
// Given that mongo by default enforces a 16M limit for documents we
// should also verify that the combined limits don't exceed 16M.
var maxUnitStateSize int
if v, ok := c[MaxCharmStateSize].(int); ok {
if v < 0 {
return errors.Errorf("invalid max charm state size: should be a number of bytes (or 0 to disable limit), got %d", v)
}
maxUnitStateSize += v
} else {
maxUnitStateSize += DefaultMaxCharmStateSize
}
if v, ok := c[MaxAgentStateSize].(int); ok {
if v < 0 {
return errors.Errorf("invalid max agent state size: should be a number of bytes (or 0 to disable limit), got %d", v)
}
maxUnitStateSize += v
} else {
maxUnitStateSize += DefaultMaxAgentStateSize
}
if mongoMax := 16 * 1024 * 1024; maxUnitStateSize > mongoMax {
return errors.Errorf("invalid max charm/agent state sizes: combined value should not exceed mongo's 16M per-document limit, got %d", maxUnitStateSize)
}
if v, ok := c[MigrationMinionWaitMax].(string); ok {
_, err := time.ParseDuration(v)
if err != nil {
return errors.Errorf("%s value %q must be a valid duration", MigrationMinionWaitMax, v)
}
}
if v, err := parseDuration(c, QueryTracingThreshold); err != nil && !errors.Is(err, errors.NotFound) {
return errors.Trace(err)
} else if err == nil {
if v < 0 {
return errors.Errorf("%s value %q must be a positive duration", QueryTracingThreshold, v)
}
}
if v, err := parseRatio(c, OpenTelemetrySampleRatio); err != nil && !errors.Is(err, errors.NotFound) {
return errors.Annotatef(err, "%s", OpenTelemetrySampleRatio)
} else if err == nil {
if v < 0 || v > 1 {
return errors.Errorf("%s value %f must be a ratio between 0 and 1", OpenTelemetrySampleRatio, v)
}
}
if v, err := parseDuration(c, OpenTelemetryTailSamplingThreshold); err != nil && !errors.Is(err, errors.NotFound) {
return errors.Trace(err)
} else if err == nil {
if v < 0 {
return errors.Errorf("%s value %q must be a positive duration", OpenTelemetryTailSamplingThreshold, v)
}
}
if v, ok := c[ObjectStoreType].(string); ok {
if v == "" {
return errors.NotValidf("empty object store type")
}
if _, err := objectstore.ParseObjectStoreType(v); err != nil {
return errors.NotValidf("invalid object store type %q", v)
}
}
if v, ok := c[JujudControllerSnapSource].(string); ok {
switch v {
case "legacy": // TODO(jujud-controller-snap): remove once jujud-controller snap is fully implemented.
case "snapstore", "local", "local-dangerous":
default:
return errors.Errorf("%s value %q must be one of legacy, snapstore, local or local-dangerous.", JujudControllerSnapSource, v)
}
}
if v, ok := c[SSHServerPort].(int); ok {
if v <= 0 {
return errors.NotValidf("non-positive integer for ssh-server-port")
}
if v == c.APIPort() {
return errors.NotValidf("ssh-server-port matching api-port")
}
if v == c.StatePort() {
return errors.NotValidf("ssh-server-port matching state-port")
}
if v == c.ControllerAPIPort() {
return errors.NotValidf("ssh-server-port matching controller-api-port")
}
}
if v, ok := c[SSHMaxConcurrentConnections].(int); ok {
if v <= 0 {
return errors.NotValidf("non-positive integer for ssh-max-concurrent-connections")
}
}
return nil
}
func (c Config) validateSpaceConfig(key, topic string) error {
val := c[key]
if val == nil {
return nil
}
if v, ok := val.(string); ok {
// NOTE(nvinuesa): We also check for the case where the passed
// value is the empty string, this is needed to un-set the
// controller config value and not added in the regexp to
// validate the space.
if !names.IsValidSpace(v) && v != "" {
return errors.NotValidf("%s space name %q", topic, val)
}
} else {
return errors.NotValidf("type for %s space name %v", topic, val)
}
return nil
}
// AsSpaceConstraints checks to see whether config has spaces names populated
// for management and/or HA (Mongo).
// Non-empty values are merged with any input spaces and returned as a new
// slice reference.
// A slice pointer is used for congruence with the Spaces member in
// constraints.Value.
func (c Config) AsSpaceConstraints(spaces *[]string) *[]string {
newSpaces := set.NewStrings()
if spaces != nil {
for _, s := range *spaces {
newSpaces.Add(s)
}
}
for _, c := range []string{c.JujuManagementSpace(), c.JujuHASpace()} {
// NOTE (hml) 2019-10-30
// This can cause issues in deployment and/or enabling HA if
// c == AlphaSpaceName as the provisioner expects any space
// listed to have subnets. Which is only AWS today.
if c != "" {
newSpaces.Add(c)
}
}
// Preserve a nil pointer if there is no change. This conveys information
// in constraints.Value (not set vs. deliberately set as empty).
if spaces == nil && len(newSpaces) == 0 {
return nil
}
ns := newSpaces.SortedValues()
return &ns
}
func parseDuration(c Config, name string) (time.Duration, error) {
if _, ok := c[name]; !ok {
return 0, errors.NotFoundf("config key %q", name)
}
switch t := c[name].(type) {
case string:
value, err := time.ParseDuration(t)
return value, err
case time.Duration:
return t, nil
case nil:
return 0, nil
default:
return 0, errors.Errorf("unexpected type %T", c[name])
}
}
func parseRatio(c Config, name string) (float64, error) {
if _, ok := c[name]; !ok {
return 0, errors.NotFoundf("config key %q", name)
}
switch t := c[name].(type) {
case float64:
return t, nil
case float32:
return float64(t), nil
case string:
value, err := strconv.ParseFloat(t, 64)
return value, err
case nil:
return 0, nil
default:
return 0, errors.Errorf("unexpected type %T", c[name])
}
}
// HasCompleteS3ControllerConfig returns true if the controller has a complete
// S3 configuration. This includes an endpoint, static key, and static secret.
func HasCompleteS3ControllerConfig(cfg Config) error {
endpoint := cfg.ObjectStoreS3Endpoint()
staticKey := cfg.ObjectStoreS3StaticKey()
staticSecret := cfg.ObjectStoreS3StaticSecret()
return HasCompleteS3Config(endpoint, staticKey, staticSecret)
}
// HasCompleteS3Config returns true if the S3 configuration is complete.
func HasCompleteS3Config(endpoint, staticKey, staticSecret string) error {
if endpoint == "" {
return errors.New("missing S3 endpoint")
}
if staticKey == "" {
return errors.New("missing S3 static key")
}
if staticSecret == "" {
return errors.New("missing S3 static secret")
}
return nil
}
// Copyright 2023 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package controller
import (
"fmt"
"time"
"github.com/juju/errors"
)
// EncodeToString encodes the given controller config into a map of strings.
func EncodeToString(cfg Config) (map[string]string, error) {
result := make(map[string]string, len(cfg))
for key, v := range cfg {
switch v := v.(type) {
case string:
result[key] = v
case bool:
result[key] = fmt.Sprintf("%t", v)
case int, int8, int16, int32, int64:
result[key] = fmt.Sprintf("%d", v)
case uint, uint8, uint16, uint32, uint64:
result[key] = fmt.Sprintf("%d", v)
case float32, float64:
result[key] = fmt.Sprintf("%f", v)
case time.Duration:
result[key] = v.String()
case time.Time:
result[key] = v.Format(time.RFC3339Nano)
case fmt.Stringer:
result[key] = v.String()
default:
return nil, errors.Errorf("unable to serialize controller config key %q: unknown type %T", key, v)
}
}
return result, nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package arch
import (
"regexp"
"runtime"
"strings"
"github.com/juju/collections/set"
)
const (
// DefaultArchitecture represents the default architecture we expect to use
// if none is present.
DefaultArchitecture = AMD64
)
// Arch represents a platform architecture.
type Arch = string
// Arches defines a list of arches to compare against.
type Arches struct {
set set.Strings
}
// AllArches creates a series of arches to compare against.
func AllArches() Arches {
return Arches{
set: set.NewStrings(AllSupportedArches...),
}
}
// Contains checks to see if a given arch is found with in the list.
func (a Arches) Contains(arch Arch) bool {
return a.set.Contains(arch)
}
// StringList returns an ordered list of strings.
// ArchAll will always be the front of the slice to show importance of the enum
// value.
func (a Arches) StringList() []string {
return a.set.SortedValues()
}
func (a Arches) String() string {
return strings.Join(a.set.SortedValues(), ",")
}
// The following constants define the machine architectures supported by Juju.
const (
AMD64 Arch = "amd64"
ARM64 Arch = "arm64"
PPC64EL Arch = "ppc64el"
S390X Arch = "s390x"
RISCV64 Arch = "riscv64"
)
// AllSupportedArches records the machine architectures recognised by Juju.
var AllSupportedArches = []string{
AMD64,
ARM64,
PPC64EL,
S390X,
RISCV64,
}
// UnsupportedArches records the machine architectures not supported by Juju.
// Note: don't make const to prevent referencing it.
var UnsupportedArches = []string{
"i386", "arm", "armhf", "ppc",
}
// archREs maps regular expressions for matching
// `uname -m` to architectures recognised by Juju.
var archREs = []struct {
*regexp.Regexp
arch string
}{
{Regexp: regexp.MustCompile("amd64|x86_64"), arch: AMD64},
{Regexp: regexp.MustCompile("arm64|aarch64"), arch: ARM64},
{Regexp: regexp.MustCompile("ppc64|ppc64el|ppc64le"), arch: PPC64EL},
{Regexp: regexp.MustCompile("s390x"), arch: S390X},
{Regexp: regexp.MustCompile("riscv64|risc$|risc-[vV]64"), arch: RISCV64},
}
// Override for testing.
var HostArch = hostArch
// hostArch returns the Juju architecture of the machine on which it is run.
func hostArch() string {
return NormaliseArch(runtime.GOARCH)
}
// NormaliseArch returns the Juju architecture corresponding to a machine's
// reported architecture. The Juju architecture is used to filter simple
// streams lookup of tools and images.
func NormaliseArch(rawArch string) string {
rawArch = strings.TrimSpace(rawArch)
for _, re := range archREs {
if re.Match([]byte(rawArch)) {
return re.arch
}
}
return rawArch
}
// IsSupportedArch returns true if arch is one supported by Juju.
func IsSupportedArch(arch string) bool {
for _, a := range AllSupportedArches {
if a == arch {
return true
}
}
return false
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package base
import (
"fmt"
"strings"
"github.com/juju/collections/set"
"github.com/juju/errors"
"github.com/juju/juju/internal/charm"
)
// Base represents an OS/Channel.
// Bases can also be converted to and from a series string.
type Base struct {
OS string
// Channel is track[/risk/branch].
// eg "22.04" or "22.04/stable" etc.
Channel Channel
}
const (
// UbuntuOS is the special value to be places in OS field of a base to
// indicate an operating system is an Ubuntu distro
UbuntuOS = "ubuntu"
)
// ParseBase constructs a Base from the os and channel string.
func ParseBase(os string, channel string) (Base, error) {
if os == "" && channel == "" {
return Base{}, nil
}
if os == "" || channel == "" {
return Base{}, errors.NotValidf("missing base os or channel")
}
ch, err := ParseChannelNormalize(channel)
if err != nil {
return Base{}, errors.Annotatef(err, "parsing base %s@%s", os, channel)
}
return Base{OS: strings.ToLower(os), Channel: ch}, nil
}
// ParseBaseFromString takes a string containing os and channel separated
// by @ and returns a base.
func ParseBaseFromString(b string) (Base, error) {
parts := strings.Split(b, "@")
if len(parts) != 2 {
return Base{}, errors.New("expected base string to contain os and channel separated by '@'")
}
channel, err := ParseChannelNormalize(parts[1])
if err != nil {
return Base{}, errors.Trace(err)
}
return Base{OS: parts[0], Channel: channel}, nil
}
// ParseManifestBases transforms charm.Bases to Bases. This
// format comes out of a charm.Manifest and contains architectures
// which Base does not. Only unique non architecture Bases
// will be returned.
func ParseManifestBases(manifestBases []charm.Base) ([]Base, error) {
if len(manifestBases) == 0 {
return nil, errors.BadRequestf("base len zero")
}
bases := make([]Base, 0)
unique := set.NewStrings()
for _, m := range manifestBases {
// The data actually comes over the wire as an operating system
// with a single architecture, not multiple ones.
// TODO - (hml) 2023-05-18
// There is no guarantee that every architecture has
// the same operating systems. This logic should be
// investigated.
m.Architectures = []string{}
if unique.Contains(m.String()) {
continue
}
base, err := ParseBase(m.Name, m.Channel.String())
if err != nil {
return nil, err
}
bases = append(bases, base)
unique.Add(m.String())
}
return bases, nil
}
// MustParseBaseFromString is like ParseBaseFromString but panics if the string
// is invalid.
func MustParseBaseFromString(b string) Base {
base, err := ParseBaseFromString(b)
if err != nil {
panic(err)
}
return base
}
// MakeDefaultBase creates a base from an os and simple version string, eg "22.04".
func MakeDefaultBase(os string, channel string) Base {
return Base{OS: os, Channel: MakeDefaultChannel(channel)}
}
// Empty returns true if the base is empty.
func (b Base) Empty() bool {
return b.OS == "" && b.Channel.Empty()
}
func (b Base) String() string {
if b.OS == "" {
return ""
}
return fmt.Sprintf("%s@%s", b.OS, b.Channel)
}
// IsCompatible returns true if base other is the same underlying
// OS version, ignoring risk.
func (b Base) IsCompatible(other Base) bool {
return b.OS == other.OS && b.Channel.Track == other.Channel.Track
}
// ubuntuLTSes lists the Ubuntu LTS releases that
// this version of Juju knows about
var ubuntuLTSes = []Base{
MakeDefaultBase(UbuntuOS, "20.04"),
MakeDefaultBase(UbuntuOS, "22.04"),
MakeDefaultBase(UbuntuOS, "24.04"),
}
// IsUbuntuLTS returns true if this base is a recognised
// Ubuntu LTS.
func (b Base) IsUbuntuLTS() bool {
for _, ubuntuLTS := range ubuntuLTSes {
if b.IsCompatible(ubuntuLTS) {
return true
}
}
return false
}
// DisplayString returns the base string ignoring risk.
func (b Base) DisplayString() string {
if b.Channel.Track == "" || b.OS == "" {
return ""
}
return b.OS + "@" + b.Channel.DisplayString()
}
// Copyright 2022 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package base
import (
"fmt"
"strings"
"github.com/juju/errors"
)
// Risk describes the type of risk in a current channel.
type Risk string
const (
Stable Risk = "stable"
Candidate Risk = "candidate"
Beta Risk = "beta"
Edge Risk = "edge"
)
// Risks is a list of the available channel risks.
var Risks = []Risk{
Stable,
Candidate,
Beta,
Edge,
}
func isRisk(potential string) bool {
for _, risk := range Risks {
if potential == string(risk) {
return true
}
}
return false
}
// Channel identifies and describes completely an os channel.
//
// A channel consists of, and is subdivided by, tracks and risk-levels:
// - Tracks represents the version of the os, eg "22.04".
// - Risk-levels represent a progressive potential trade-off between stability
// and new features.
//
// The complete channel name can be structured as three distinct parts separated
// by slashes:
//
// <track>/<risk>
type Channel struct {
Track string `json:"track,omitempty"`
Risk Risk `json:"risk,omitempty"`
}
// MakeDefaultChannel creates a normalized channel for
// the specified track with a default risk of "stable".
func MakeDefaultChannel(track string) Channel {
ch := Channel{
Track: track,
}
return ch.Normalize()
}
// ParseChannel parses a string representing a channel.
func ParseChannel(s string) (Channel, error) {
if s == "" {
return Channel{}, errors.NotValidf("empty channel")
}
p := strings.Split(s, "/")
var risk, track *string
switch len(p) {
case 1:
track = &p[0]
case 2:
track, risk = &p[0], &p[1]
default:
return Channel{}, errors.Errorf("channel is malformed and has too many components %q", s)
}
ch := Channel{}
if risk != nil {
if !isRisk(*risk) {
return Channel{}, errors.NotValidf("risk in channel %q", s)
}
// We can lift this into a risk, as we've validated prior to this to
// ensure it's a valid risk.
ch.Risk = Risk(*risk)
}
if track != nil {
if *track == "" {
return Channel{}, errors.NotValidf("track in channel %q", s)
}
ch.Track = *track
}
return ch, nil
}
// ParseChannelNormalize parses a string representing a store channel.
// The returned channel's track, risk and name are normalized.
func ParseChannelNormalize(s string) (Channel, error) {
ch, err := ParseChannel(s)
if err != nil {
return Channel{}, errors.Trace(err)
}
return ch.Normalize(), nil
}
// Normalize the channel with normalized track, risk and names.
func (ch Channel) Normalize() Channel {
track := ch.Track
risk := ch.Risk
if risk == "" {
risk = "stable"
}
return Channel{
Track: track,
Risk: risk,
}
}
// Empty returns true if all it's components are empty.
func (ch Channel) Empty() bool {
return ch.Track == "" && ch.Risk == ""
}
func (ch Channel) String() string {
path := ch.Track
if risk := ch.Risk; risk != "" {
path = fmt.Sprintf("%s/%s", path, risk)
}
return path
}
func (ch Channel) DisplayString() string {
track, risk := ch.Track, ch.Risk
if risk == Stable {
risk = ""
}
if risk == "" {
return track
}
return fmt.Sprintf("%s/%s", track, risk)
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package base
import (
"github.com/juju/errors"
)
// seriesBaseMapping is a hard-coded set of pairs
// of equivalent series and bases. We use this to
// convert a base into it's equivalent series
var seriesBaseMapping = []struct {
base Base
series string
}{{
base: MakeDefaultBase(UbuntuOS, "20.04"),
series: "focal",
}, {
base: MakeDefaultBase(UbuntuOS, "20.10"),
series: "groovy",
}, {
base: MakeDefaultBase(UbuntuOS, "21.04"),
series: "hirsute",
}, {
base: MakeDefaultBase(UbuntuOS, "21.10"),
series: "impish",
}, {
base: MakeDefaultBase(UbuntuOS, "22.04"),
series: "jammy",
}, {
base: MakeDefaultBase(UbuntuOS, "22.10"),
series: "kinetic",
}, {
base: MakeDefaultBase(UbuntuOS, "23.04"),
series: "lunar",
}, {
base: MakeDefaultBase(UbuntuOS, "23.10"),
series: "mantic",
}, {
base: MakeDefaultBase(UbuntuOS, "24.04"),
series: "noble",
}, {
base: MakeDefaultBase(UbuntuOS, "24.10"),
series: "oracular",
}}
// GetSeriesFromBase returns the series name for a
// given Base. This is needed to support legacy series.
func GetSeriesFromBase(v Base) (string, error) {
for _, pair := range seriesBaseMapping {
if v.IsCompatible(pair.base) {
return pair.series, nil
}
}
return "", errors.NotFoundf("os %q version %q", v.OS, v.Channel.Track)
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package base
// ControllerBases returns the supported workload bases available to it at the
// execution time.
func ControllerBases() []Base {
return []Base{
MakeDefaultBase(UbuntuOS, "20.04"),
MakeDefaultBase(UbuntuOS, "22.04"),
MakeDefaultBase(UbuntuOS, "24.04"),
}
}
// WorkloadBases returns the supported workload bases available to it at the
// execution time.
func WorkloadBases() []Base {
return []Base{
MakeDefaultBase(UbuntuOS, "20.04"),
MakeDefaultBase(UbuntuOS, "22.04"),
MakeDefaultBase(UbuntuOS, "23.10"),
MakeDefaultBase(UbuntuOS, "24.04"),
}
}
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package base
import (
"github.com/juju/errors"
)
// ValidateBase attempts to validate a base if one is found, otherwise it
// uses the fallback base and validates that one.
// Returns the base it validated against or an error if one is found.
// Note: the selected base will be returned if there is an error to help use
// that for a fallback during error scenarios.
func ValidateBase(supportedBases []Base, base, fallbackPreferredBase Base) (Base, error) {
// Validate the requested base.
// Attempt to do the validation in one place, so it makes it easier to
// reason about where the validation happens. This only happens for IAAS
// models, as CAAS can't take base as an argument.
var requestedBase Base
if !base.Empty() {
requestedBase = base
} else {
// If no bootstrap base is supplied, go and get that information from
// the fallback. We should still validate the fallback value to ensure
// that we also work with that base.
requestedBase = fallbackPreferredBase
}
var found bool
for _, supportedBase := range supportedBases {
if supportedBase.IsCompatible(requestedBase) {
found = true
break
}
}
if !found {
return requestedBase, errors.NotSupportedf("%s", requestedBase.String())
}
return requestedBase, nil
}
// Copyright 2021 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package metrics
// MetricKey represents metrics keys collected and sent to charmhub.
type MetricKey string
func (c MetricKey) String() string {
return string(c)
}
const (
// Controller is used in RequestMetrics
Controller MetricKey = "controller"
// Model is used in RequestMetrics
Model MetricKey = "model"
)
// MetricValueKey represents metrics value keys collected and sent to charmhub.
type MetricValueKey string
func (c MetricValueKey) String() string {
return string(c)
}
const (
//
// Controller and Model, included in the RefreshRequest Metrics.
//
// UUID is the uuid of a model, either controller or model.
UUID MetricValueKey = "uuid"
// JujuVersion is the version of juju running in this model.
JujuVersion MetricValueKey = "juju-version"
//
// Model metrics, included in the RefreshRequest Metrics.
//
// Provider matches the provider type defined in juju.
Provider MetricValueKey = "provider"
// Region is the region this model is operating in.
Region MetricValueKey = "region"
// Cloud is the name of the cloud this model is operating in.
Cloud MetricValueKey = "cloud"
// NumApplications is the number of applications in the model.
NumApplications MetricValueKey = "applications"
// NumMachines is the number of machines in the model.
NumMachines MetricValueKey = "machines"
// NumUnits is the number of units in the model.
NumUnits MetricValueKey = "units"
//
// Charm metrics, included in the RefreshRequestContext Metrics.
//
// Relations is a common separated list of charms currently related
// to an application. (no spaces)
Relations MetricValueKey = "relations"
)
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package credential
import (
"fmt"
"github.com/juju/errors"
"github.com/juju/names/v6"
"github.com/juju/juju/core/user"
"github.com/juju/juju/internal/uuid"
)
// Key represents the natural key of a cloud credential.
type Key struct {
// Cloud is the cloud name that the credential applies to. Key is valid when
// this value is set.
Cloud string
// Owner is the owner of the credential. Key is valid when this value is set.
Owner user.Name
// Name is the name of the credential. It is valid when this value is set.
Name string
}
// KeyFromTag provides a utility for converting a CloudCredentialTag into a Key
// struct. If the tags IsZero() returns true then a zero value Key struct is
// returned.
func KeyFromTag(tag names.CloudCredentialTag) Key {
if tag.IsZero() {
return Key{}
}
return Key{
Cloud: tag.Cloud().Id(),
Owner: user.NameFromTag(tag.Owner()),
Name: tag.Name(),
}
}
// IsZero returns true if the [Key] struct is its zero value with no values set.
func (k Key) IsZero() bool {
return k == Key{}
}
// String implements the stringer interface.
func (k Key) String() string {
return fmt.Sprintf("%s/%s/%s", k.Cloud, k.Owner, k.Name)
}
// Tag will convert this Key struct to a juju names CloudCredentialTag. Errors in
// parsing of the tag will be returned. If the Key struct is it's zero value then
// a zero value Tag will be returned.
func (k Key) Tag() (names.CloudCredentialTag, error) {
if k.IsZero() {
return names.CloudCredentialTag{}, nil
}
return names.ParseCloudCredentialTag(
fmt.Sprintf("%s-%s_%s_%s", names.CloudCredentialTagKind, k.Cloud, k.Owner, k.Name),
)
}
// Validate is responsible for checking all of the fields of Key are in a set
// state that is valid for use. You can also use IsZero() to test if the Key is
// currently set to it's zero value.
func (k Key) Validate() error {
if k.Cloud == "" {
return fmt.Errorf("%w cloud cannot be empty", errors.NotValid)
}
if k.Name == "" {
return fmt.Errorf("%w name cannot be empty", errors.NotValid)
}
if k.Owner.IsZero() {
return fmt.Errorf("%w owner cannot be empty", errors.NotValid)
}
return nil
}
// UUID represents a unique id within the juju controller for a cloud credential.
type UUID string
// NewUUID generates a new credential [UUID]
func NewUUID() (UUID, error) {
uuid, err := uuid.NewUUID()
if err != nil {
return UUID(""), fmt.Errorf("creating new credential id: %w", err)
}
return UUID(uuid.String()), nil
}
// String implements the stringer interface returning a string representation of
// the credential UUID.
func (u UUID) String() string {
return string(u)
}
// Validate ensures the consistency of the uuid. If the [UUID] is invalid an
// error satisfying [errors.NotValid] will be returned.
func (u UUID) Validate() error {
if u == "" {
return fmt.Errorf("credential uuid cannot be empty%w", errors.Hide(errors.NotValid))
}
if !uuid.IsValidUUIDString(string(u)) {
return fmt.Errorf("credential uuid %q %w", u, errors.NotValid)
}
return nil
}
// Copyright 2018 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package devices
import (
"strconv"
"strings"
"github.com/juju/errors"
)
var deviceParseErr = errors.Errorf("cannot parse device constraints string, supported format is [<count>,]<device-class>|<vendor/type>[,<key>=<value>;...]")
// DeviceType defines a device type.
type DeviceType string
// Constraints describes a set of device constraints.
type Constraints struct {
// Type is the device type or device-class.
// currently supported types are
// - gpu
// - nvidia.com/gpu
// - amd.com/gpu
Type DeviceType `bson:"type"`
// Count is the number of devices that the user has asked for - count min and max are the
// number of devices the charm requires.
Count int64 `bson:"count"`
// Attributes is a collection of key value pairs device related (node affinity labels/tags etc.).
Attributes map[string]string `bson:"attributes"`
}
// ParseConstraints parses the specified string and creates a
// Constraints structure.
//
// The acceptable format for device constraints is a comma separated
// sequence of: COUNT, TYPE, and ATTRIBUTES with format like
//
// <device-name>=[<count>,]<device-class>|<vendor/type>[,<attributes>]
//
// where
//
// COUNT is the number of devices that the user has asked for - count min and max are the
// number of devices the charm requires. If unspecified, COUNT defaults to 1.
func ParseConstraints(s string) (Constraints, error) {
var cons Constraints
fields := strings.Split(s, ",")
fieldsLen := len(fields)
if fieldsLen < 1 || fieldsLen > 3 {
return cons, deviceParseErr
}
if fieldsLen == 1 {
cons.Count = 1
cons.Type = DeviceType(fields[0])
} else {
count, err := parseCount(fields[0])
if err != nil {
return Constraints{}, err
}
cons.Count = count
cons.Type = DeviceType(fields[1])
if fieldsLen == 3 {
attr, err := parseAttributes(fields[2])
if err != nil {
return Constraints{}, err
}
cons.Attributes = attr
}
}
return cons, nil
}
func parseAttributes(s string) (map[string]string, error) {
parseAttribute := func(s string) ([]string, error) {
kv := strings.Split(s, "=")
if len(kv) != 2 {
return nil, errors.Errorf("device attribute key/value pair has bad format: %q", s)
}
return kv, nil
}
attr := map[string]string{}
for _, attrStr := range strings.Split(s, ";") {
kv, err := parseAttribute(attrStr)
if err != nil {
return nil, err
}
attr[kv[0]] = kv[1]
}
return attr, nil
}
func parseCount(s string) (int64, error) {
errMsg := errors.Errorf("count must be greater than zero, got %q", s)
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, errMsg
}
if i > 0 {
return i, nil
}
return 0, errMsg
}
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package devices
func Fuzz(data []byte) int {
_, err := ParseConstraints(string(data))
if err != nil {
return 0
}
return 1
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package http
import (
"context"
"net/http"
"github.com/juju/errors"
)
const (
// ErrHTTPClientDying is used to indicate to *third parties* that the
// http client worker is dying, instead of catacomb.ErrDying, which is
// unsuitable for propagating inter-worker.
// This error indicates to consuming workers that their dependency has
// become unmet and a restart by the dependency engine is imminent.
ErrHTTPClientDying = errors.ConstError("http client worker is dying")
)
// HTTPClientGetter is the interface that is used to get a http client for a
// given namespace.
type HTTPClientGetter interface {
// GetHTTPClient returns a http client for the given namespace.
GetHTTPClient(context.Context, Purpose) (HTTPClient, error)
}
// HTTPClient is the interface that is used to do http requests.
type HTTPClient interface {
// Do sends an HTTP request and returns an HTTP response. The client will
// follow policy (such as redirects, cookies, auth) as configured on the
// client.
Do(*http.Request) (*http.Response, error)
}
// Purpose is a type used to define the namespace of a http client.
// This allows multiple http clients to be created with different namespaces.
type Purpose string
const (
// CharmhubPurpose is the namespace for the charmhub http client.
CharmhubPurpose Purpose = "charmhub"
// S3Purpose is the namespace for the s3 http client.
S3Purpose Purpose = "s3"
// SSHImporterPurpose is the namespace for the ssh importer http client.
SSHImporterPurpose Purpose = "ssh-importer"
)
func (n Purpose) String() string {
return string(n)
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package life
import (
"github.com/juju/errors"
)
// Value indicates the state of some entity.
type Value string
const (
// Alive indicates that some entity is meant to exist.
Alive Value = "alive"
// Dying indicates that some entity should be removed.
Dying Value = "dying"
// Dead indicates that some entity is no longer useful,
// and can be destroyed unconditionally.
Dead Value = "dead"
)
// Validate returns an error if the value is not known.
func (v Value) Validate() error {
switch v {
case Alive, Dying, Dead:
return nil
}
return errors.NotValidf("life value %q", v)
}
// Predicate is a predicate.
type Predicate func(Value) bool
// IsDead is a Predicate that returns true if the supplied value is Dead.
//
// This indicates that the entity in question is dead.
func IsDead(v Value) bool {
return v == Dead
}
// IsAlive is a Predicate that returns true if the supplied value
// is Alive.
//
// This generally indicates that the entity in question is expected
// to be existing for now and not going away or gone completely.
func IsAlive(v Value) bool {
return v == Alive
}
// IsNotAlive is a Predicate that returns true if the supplied value
// is not Alive.
//
// This generally indicates that the entity in question is at some
// stage of destruction/cleanup.
func IsNotAlive(v Value) bool {
return v != Alive
}
// IsNotDead is a Predicate that returns true if the supplied value
// is not Dead.
//
// This generally indicates that the entity in question is active in
// some way, and can probably not be completely destroyed without
// consequences.
func IsNotDead(v Value) bool {
return v != Dead
}
// Copyright 2017 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package logger
import (
"sort"
"sync"
"time"
"github.com/juju/clock"
"github.com/juju/errors"
)
// BufferedLogWriter wraps a LogWriter, providing a buffer that
// accumulates log messages, flushing them to the underlying logger
// when enough messages have been accumulated.
// The emitted records are sorted by timestamp.
type BufferedLogWriter struct {
l LogWriter
clock clock.Clock
flushInterval time.Duration
mu sync.Mutex
buf []LogRecord
flushTimer clock.Timer
}
// NewBufferedLogWriter returns a new BufferedLogWriter, wrapping the given
// Logger with a buffer of the specified size and flush interval.
func NewBufferedLogWriter(
l LogWriter,
bufferSize int,
flushInterval time.Duration,
clock clock.Clock,
) *BufferedLogWriter {
return &BufferedLogWriter{
l: l,
buf: make([]LogRecord, 0, bufferSize),
clock: clock,
flushInterval: flushInterval,
}
}
func insertSorted(recs []LogRecord, in []LogRecord) []LogRecord {
for _, r := range in {
i := sort.Search(len(recs), func(i int) bool { return recs[i].Time.After(r.Time) })
if len(recs) == i {
recs = append(recs, r)
continue
}
recs = append(recs[:i+1], recs[i:]...)
recs[i] = r
}
return recs
}
// Log is part of the Logger interface.
//
// BufferedLogWriter's Log implementation will buffer log records up to
// the specified capacity and duration; after either of which is exceeded,
// the records will be flushed to the underlying logger.
func (b *BufferedLogWriter) Log(in []LogRecord) error {
b.mu.Lock()
defer b.mu.Unlock()
// Sort incoming records.
// We only use the first N so they need
// to be sorted first up.
sort.Slice(in, func(i, j int) bool {
return in[i].Time.Before(in[j].Time)
})
for len(in) > 0 {
r := cap(b.buf) - len(b.buf)
n := len(in)
if n > r {
n = r
}
b.buf = insertSorted(b.buf, in[:n])
in = in[n:]
if len(b.buf) >= cap(b.buf) {
if err := b.flush(); err != nil {
return errors.Trace(err)
}
}
}
if len(b.buf) > 0 && b.flushTimer == nil {
b.flushTimer = b.clock.AfterFunc(b.flushInterval, b.flushOnTimer)
}
return nil
}
// Flush flushes any buffered log records to the underlying Logger.
func (b *BufferedLogWriter) Flush() error {
b.mu.Lock()
defer b.mu.Unlock()
return b.flush()
}
func (b *BufferedLogWriter) flushOnTimer() {
b.mu.Lock()
defer b.mu.Unlock()
// Can't do anything about errors here, except to
// ignore them and let the Log() method report them
// when the buffer fills up.
b.flush()
}
// flush flushes any buffered log records to the underlying Logger, and stops
// the flush timer if there is one. The caller must be holding b.mu.
func (b *BufferedLogWriter) flush() error {
if b.flushTimer != nil {
b.flushTimer.Stop()
b.flushTimer = nil
}
if len(b.buf) > 0 {
if err := b.l.Log(b.buf); err != nil {
return errors.Trace(err)
}
b.buf = b.buf[:0]
}
return nil
}
// Copyright 2022 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package logger
import (
"context"
"fmt"
"io"
"path/filepath"
"sort"
"strings"
"github.com/juju/loggo/v2"
"github.com/juju/names/v6"
"github.com/juju/juju/core/model"
"github.com/juju/juju/internal/errors"
)
const (
ErrLoggerDying = errors.ConstError("logger worker is dying")
)
// Level represents the log level.
type Level uint32
// The severity levels. Higher values are more considered more
// important.
const (
UNSPECIFIED Level = iota
TRACE
DEBUG
INFO
WARNING
ERROR
CRITICAL
)
// String implements Stringer.
func (level Level) String() string {
switch level {
case UNSPECIFIED:
return "UNSPECIFIED"
case TRACE:
return "TRACE"
case DEBUG:
return "DEBUG"
case INFO:
return "INFO"
case WARNING:
return "WARNING"
case ERROR:
return "ERROR"
case CRITICAL:
return "CRITICAL"
default:
return "<unknown>"
}
}
// ParseLevelFromString returns the log level from the given string.
func ParseLevelFromString(level string) (Level, bool) {
level = strings.ToUpper(level)
switch level {
case "UNSPECIFIED":
return UNSPECIFIED, true
case "TRACE":
return TRACE, true
case "DEBUG":
return DEBUG, true
case "INFO":
return INFO, true
case "WARN", "WARNING":
return WARNING, true
case "ERROR":
return ERROR, true
case "CRITICAL":
return CRITICAL, true
default:
return UNSPECIFIED, false
}
}
// Labels represents key values which are assigned to a log entry.
type Labels map[string]string
const (
rootString = "<root>"
)
// Config represents the configuration of the loggers.
type Config map[string]Level
// String returns a logger configuration string that may be parsed
// using ParseConfigurationString.
func (c Config) String() string {
if c == nil {
return ""
}
// output in alphabetical order.
var names []string
for name := range c {
names = append(names, name)
}
sort.Strings(names)
var entries []string
for _, name := range names {
level := c[name]
if name == "" {
name = rootString
}
entry := fmt.Sprintf("%s=%s", name, level)
entries = append(entries, entry)
}
return strings.Join(entries, ";")
}
// Logger is an interface that provides logging methods.
type Logger interface {
// Criticalf logs a message at the critical level.
Criticalf(ctx context.Context, msg string, args ...any)
// Errorf logs a message at the error level.
Errorf(ctx context.Context, msg string, args ...any)
// Warningf logs a message at the warning level.
Warningf(ctx context.Context, msg string, args ...any)
// Infof logs a message at the info level.
Infof(ctx context.Context, msg string, args ...any)
// Debugf logs a message at the debug level.
Debugf(ctx context.Context, msg string, args ...any)
// Tracef logs a message at the trace level.
Tracef(ctx context.Context, msg string, args ...any)
// Logf logs information at the given level.
// The provided arguments are assembled together into a string with
// fmt.Sprintf.
Logf(ctx context.Context, level Level, labels Labels, format string, args ...any)
// IsLevelEnabled returns true if the given level is enabled for the logger.
IsLevelEnabled(Level) bool
// Child returns a new logger with the given name.
Child(name string, tags ...string) Logger
// GetChildByName returns a child logger with the given name.
GetChildByName(name string) Logger
}
// LoggerContext is an interface that provides a method to get loggers.
type LoggerContext interface {
// GetLogger returns a logger with the given name and tags.
GetLogger(name string, tags ...string) Logger
// ConfigureLoggers configures loggers according to the given string
// specification, which specifies a set of modules and their associated
// logging levels. Loggers are colon- or semicolon-separated; each module is
// specified as <modulename>=<level>. White space outside of module names
// and levels is ignored. The root module is specified with the name
// "<root>".
//
// An example specification:
//
// <root>=ERROR; foo.bar=WARNING
//
// Label matching can be applied to the loggers by providing a set of labels
// to the function. If a logger has a label that matches the provided
// labels, then the logger will be configured with the provided level. If
// the logger does not have a label that matches the provided labels, then
// the logger will not be configured. No labels will configure all loggers
// in the specification.
ConfigureLoggers(specification string) error
// ResetLoggerLevels iterates through the known logging modules and sets the
// levels of all to UNSPECIFIED, except for <root> which is set to WARNING.
// If labels are provided, then only loggers that have the provided labels
// will be reset.
ResetLoggerLevels()
// Config returns the current configuration of the Loggers. Loggers
// with UNSPECIFIED level will not be included.
Config() Config
// AddWriter adds a writer to the list to be called for each logging call.
// The name cannot be empty, and the writer cannot be nil. If an existing
// writer exists with the specified name, an error is returned.
//
// Note: we're relying on loggo.Writer here, until we do model level
// logging. Deprecated: This will be removed in the future and is only here
// whilst we cut things across.
AddWriter(name string, writer loggo.Writer) error
}
// LogWriter provides an interface for writing log records.
type LogWriter interface {
// Log writes the given log records to the logger's storage.
Log([]LogRecord) error
}
// LogWriterCloser is a Logger that can be closed.
type LogWriterCloser interface {
LogWriter
io.Closer
}
// ModelLogger keeps track of all the log writers, which can be accessed
// by a given model uuid.
type ModelLogger interface {
// Closer provides a Close() method which calls Close() on
// each of the tracked log writers.
io.Closer
// GetLogWriter returns a log writer for the given model and keeps
// track of it, returning the same one if called again.
GetLogWriter(ctx context.Context, modelUUID model.UUID) (LogWriterCloser, error)
// RemoveLogWriter stops tracking the given's model's log writer and
// calls Close() on the log writer.
RemoveLogWriter(modelUUID model.UUID) error
}
// LoggerContextGetter is an interface that is used to get a LoggerContext.
type LoggerContextGetter interface {
// GetLoggerContext returns a LoggerContext for the given name.
GetLoggerContext(ctx context.Context, modelUUID model.UUID) (LoggerContext, error)
}
// ModelLogSinkGetter gets the ModelLogger or the LoggerContext for a given
// model.
type ModelLogSinkGetter interface {
ModelLogger
LoggerContextGetter
}
// LogWriterForModelFunc is a function which returns a log writer for a given
// model.
type LogWriterForModelFunc func(ctx context.Context, key LoggerKey) (LogWriterCloser, error)
// LoggerKey is a key used to identify a logger.
type LoggerKey struct {
ModelUUID string
ModelName string
ModelOwner string
}
// ModelLogFile makes an absolute model log file path.
func ModelLogFile(logDir string, key LoggerKey) string {
filename := fmt.Sprintf("%s-%s-%s.log", key.ModelOwner, key.ModelName, names.NewModelTag(key.ModelUUID).ShortId())
return filepath.Join(logDir, "models", filename)
}
// Copyright 2022 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package logger
import (
"encoding/json"
"fmt"
"time"
"github.com/juju/errors"
)
// LogRecord defines a single Juju log message as returned by
// LogTailer.
type LogRecord struct {
Time time.Time
// origin fields
ModelUUID string
Entity string
// logging-specific fields
Level Level
Module string
Location string
Message string
Labels map[string]string
}
type logRecordJSON struct {
ModelUUID string `json:"model-uuid,omitempty"`
Time time.Time `json:"timestamp"`
Entity string `json:"entity"`
Level string `json:"level"`
Module string `json:"module"`
Location string `json:"location"`
Message string `json:"message"`
Labels map[string]string `json:"labels,omitempty"`
}
func (r *LogRecord) MarshalJSON() ([]byte, error) {
jrec := logRecordJSON{
ModelUUID: r.ModelUUID,
Time: r.Time,
Entity: r.Entity,
Level: r.Level.String(),
Module: r.Module,
Location: r.Location,
Message: r.Message,
Labels: r.Labels,
}
return json.Marshal(jrec)
}
func (r *LogRecord) UnmarshalJSON(data []byte) error {
var jrec logRecordJSON
if err := json.Unmarshal(data, &jrec); err != nil {
return errors.Trace(err)
}
level, ok := ParseLevelFromString(jrec.Level)
if !ok {
return fmt.Errorf("log level %q %w", jrec.Level, errors.NotValid)
}
r.Time = jrec.Time
r.Entity = jrec.Entity
r.Level = level
r.Module = jrec.Module
r.Location = jrec.Location
r.Message = jrec.Message
r.Labels = jrec.Labels
return nil
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package migration
import (
"time"
"github.com/juju/description/v9"
"github.com/juju/errors"
"github.com/juju/names/v6"
"github.com/juju/version/v2"
"github.com/juju/juju/core/resource"
)
// MigrationStatus returns the details for a migration as needed by
// the migrationmaster worker.
type MigrationStatus struct {
// MigrationId hold the unique id for the migration.
MigrationId string
// ModelUUID holds the UUID of the model being migrated.
ModelUUID string
// Phases indicates the current migration phase.
Phase Phase
// PhaseChangedTime indicates the time the phase was changed to
// its current value.
PhaseChangedTime time.Time
// TargetInfo contains the details of how to connect to the target
// controller.
TargetInfo TargetInfo
}
// SerializedModel wraps a buffer contain a serialised Juju model as
// well as containing metadata about the charms and tools used by the
// model.
type SerializedModel struct {
// Bytes contains the serialized data for the model.
Bytes []byte
// Charms lists the charm URLs in use in the model.
Charms []string
// Tools lists the tools versions in use with the model along with
// their URIs. The URIs can be used to download the tools from the
// source controller.
Tools map[version.Binary]string // version -> tools URI
// Resources represents all the resources in use in the model.
Resources []resource.Resource
}
// ModelInfo is used to report basic details about a model.
type ModelInfo struct {
UUID string
Owner names.UserTag
Name string
AgentVersion version.Number
ControllerAgentVersion version.Number
ModelDescription description.Model
}
// SourceControllerInfo holds the details required to connect
// to a migration's source controller.
type SourceControllerInfo struct {
ControllerTag names.ControllerTag
ControllerAlias string
Addrs []string
CACert string
}
func (i *ModelInfo) Validate() error {
if i.UUID == "" {
return errors.NotValidf("empty UUID")
}
if i.Owner.Name() == "" {
return errors.NotValidf("empty Owner")
}
if i.Name == "" {
return errors.NotValidf("empty Name")
}
if i.AgentVersion.Compare(version.Number{}) == 0 {
return errors.NotValidf("empty Version")
}
return nil
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package migration
// MinionReports returns information about the migration minion
// reports received so far for a given migration phase.
type MinionReports struct {
// ModelUUID holds the unique identifier for the model migration.
MigrationId string
// Phases indicates the migration phase the reports relate to.
Phase Phase
// SuccesCount indicates how many agents have successfully
// completed the migration phase.
SuccessCount int
// UnknownCount indicates how many agents are yet to report
// regarding the migration phase.
UnknownCount int
// SomeUnknownMachines holds the ids of some of the machines which
// have not yet reported in.
SomeUnknownMachines []string
// SomeUnknownUnits holds the names of some of the units which
// have not yet reported in.
SomeUnknownUnits []string
// SomeUnknownApplications holds the names of some of the applications which
// have not yet reported in.
SomeUnknownApplications []string
// FailedMachines holds the ids of machines which have failed to
// complete the migration phase.
FailedMachines []string
// FailedUnits holds the names of units which have failed to
// complete the migration phase.
FailedUnits []string
// FailedApplications holds the names of applications which have failed to
// complete the migration phase.
FailedApplications []string
}
// IsZero returns true if the MinionReports instance hasn't been set.
func (r *MinionReports) IsZero() bool {
return r.MigrationId == "" && r.Phase == UNKNOWN
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package migration
// Phase values specify model migration phases.
type Phase int
// Enumerate all possible migration phases.
const (
UNKNOWN Phase = iota
NONE
QUIESCE
IMPORT
PROCESSRELATIONS
VALIDATION
SUCCESS
LOGTRANSFER
REAP
REAPFAILED
DONE
ABORT
ABORTDONE
)
var phaseNames = []string{
"UNKNOWN", // To catch uninitialised fields.
"NONE", // For watchers to indicate there's never been a migration attempt.
"QUIESCE",
"IMPORT",
"PROCESSRELATIONS",
"VALIDATION",
"SUCCESS",
"LOGTRANSFER",
"REAP",
"REAPFAILED",
"DONE",
"ABORT",
"ABORTDONE",
}
// Those phases are only used to get a complete successful round for testing purposes.
func SuccessfulMigrationPhases() []Phase {
return []Phase{
IMPORT,
PROCESSRELATIONS,
VALIDATION,
SUCCESS,
LOGTRANSFER,
REAP,
DONE,
}
}
// String returns the name of an model migration phase constant.
func (p Phase) String() string {
i := int(p)
if i >= 0 && i < len(phaseNames) {
return phaseNames[i]
}
return "UNKNOWN"
}
// CanTransitionTo returns true if the given phase is a valid next
// model migration phase.
func (p Phase) CanTransitionTo(targetPhase Phase) bool {
nextPhases, exists := validTransitions[p]
if !exists {
return false
}
for _, nextPhase := range nextPhases {
if nextPhase == targetPhase {
return true
}
}
return false
}
// IsTerminal returns true if the phase is one which signifies the end
// of a migration.
func (p Phase) IsTerminal() bool {
for _, t := range terminalPhases {
if p == t {
return true
}
}
return false
}
// IsRunning returns true if the phase indicates the migration is
// active and up to or at the SUCCESS phase. It returns false if the
// phase is one of the final cleanup phases or indicates an failed
// migration.
func (p Phase) IsRunning() bool {
if p.IsTerminal() {
return false
}
switch p {
case QUIESCE, IMPORT, PROCESSRELATIONS, VALIDATION, SUCCESS:
return true
default:
return false
}
}
// Define all possible phase transitions.
//
// The keys are the "from" states and the values enumerate the
// possible "to" states.
var validTransitions = map[Phase][]Phase{
QUIESCE: {IMPORT, ABORT},
IMPORT: {PROCESSRELATIONS, ABORT},
PROCESSRELATIONS: {VALIDATION, ABORT},
VALIDATION: {SUCCESS, ABORT},
SUCCESS: {LOGTRANSFER},
LOGTRANSFER: {REAP},
REAP: {DONE, REAPFAILED},
ABORT: {ABORTDONE},
}
var terminalPhases []Phase
func init() {
// Compute the terminal phases.
for p := 0; p <= len(phaseNames); p++ {
phase := Phase(p)
if _, exists := validTransitions[phase]; !exists {
terminalPhases = append(terminalPhases, phase)
}
}
}
// ParsePhase converts a string model migration phase name
// to its constant value.
func ParsePhase(target string) (Phase, bool) {
for p, name := range phaseNames {
if target == name {
return Phase(p), true
}
}
return UNKNOWN, false
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package migration
import (
"github.com/juju/errors"
"github.com/juju/names/v6"
"gopkg.in/macaroon.v2"
"github.com/juju/juju/core/network"
)
// TargetInfo holds the details required to connect to a
// migration's target controller.
//
// TODO(mjs) - Note the similarity to api.Info. It would be nice to be
// able to use api.Info here but state can't import api and moving
// api.Info to live under the core package is too big a project to be
// done right now.
type TargetInfo struct {
// ControllerTag holds tag for the target controller.
ControllerTag names.ControllerTag
// ControllerAlias holds an optional alias for the target controller.
ControllerAlias string
// Addrs holds the addresses and ports of the target controller's
// API servers.
Addrs []string
// CACert holds the CA certificate that will be used to validate
// the target API server's certificate, in PEM format.
CACert string
// AuthTag holds the user tag to authenticate with to the target
// controller.
AuthTag names.UserTag
// Password holds the password to use with AuthTag.
Password string
// Macaroons holds macaroons to use with AuthTag. At least one of
// Password or Macaroons must be set.
Macaroons []macaroon.Slice
}
// Validate returns an error if the TargetInfo contains bad data. Nil
// is returned otherwise.
func (info *TargetInfo) Validate() error {
if !names.IsValidModel(info.ControllerTag.Id()) {
return errors.NotValidf("ControllerTag")
}
if len(info.Addrs) < 1 {
return errors.NotValidf("empty Addrs")
}
for _, addr := range info.Addrs {
_, err := network.ParseMachineHostPort(addr)
if err != nil {
return errors.NotValidf("%q in Addrs", addr)
}
}
if info.AuthTag.Id() == "" && len(info.Macaroons) == 0 {
return errors.NotValidf("empty AuthTag")
}
if info.Password == "" && len(info.Macaroons) == 0 {
return errors.NotValidf("missing Password & Macaroons")
}
return nil
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package model
import "context"
type contextKey string
const (
// ContextKeyModelUUID is the key used to store the model UUID in the
// context.
ContextKeyModelUUID contextKey = "model-uuid"
)
// WitContextModelUUID returns a new context with the model UUID set.
func WithContextModelUUID(ctx context.Context, modelUUID UUID) context.Context {
return context.WithValue(ctx, ContextKeyModelUUID, modelUUID)
}
// ModelUUIDFromContext returns the model UUID from the context.
func ModelUUIDFromContext(ctx context.Context) (UUID, bool) {
modelUUID, ok := ctx.Value(ContextKeyModelUUID).(UUID)
return modelUUID, ok
}
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package model
// MachineJob values define responsibilities that machines may be
// expected to fulfil.
type MachineJob string
const (
JobHostUnits MachineJob = "JobHostUnits"
JobManageModel MachineJob = "JobManageModel"
)
// NeedsState returns true if the job requires a state connection.
func (job MachineJob) NeedsState() bool {
return job == JobManageModel
}
// AnyJobNeedsState returns true if any of the provided jobs
// require a state connection.
func AnyJobNeedsState(jobs ...MachineJob) bool {
for _, j := range jobs {
if j.NeedsState() {
return true
}
}
return false
}
// Copyright 2018 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package model
import (
"fmt"
"github.com/juju/errors"
"github.com/juju/version/v2"
"github.com/juju/juju/core/credential"
"github.com/juju/juju/core/life"
"github.com/juju/juju/core/user"
"github.com/juju/juju/internal/uuid"
)
// ModelType indicates a model type.
type ModelType string
const (
// IAAS is the type for IAAS models.
IAAS ModelType = "iaas"
// CAAS is the type for CAAS models.
CAAS ModelType = "caas"
)
// String returns m as a string.
func (m ModelType) String() string {
return string(m)
}
// IsValid returns true if the value of Type is a known valid type.
// Currently supported values are:
// - CAAS
// - IAAS
func (m ModelType) IsValid() bool {
switch m {
case CAAS, IAAS:
return true
}
return false
}
// Model represents the state of a model.
type Model struct {
// Name returns the human friendly name of the model.
Name string
// Life is the current state of the model.
// Options are alive, dying, dead. Every model starts as alive, only
// during the destruction of the model it transitions to dying and then
// dead.
Life life.Value
// UUID is the universally unique identifier of the model.
UUID UUID
// ModelType is the type of model.
ModelType ModelType
// AgentVersion is the target version for agents running under this model.
AgentVersion version.Number
// Cloud is the name of the cloud to associate with the model.
// Must not be empty for a valid struct.
Cloud string
// CloudType is the type of the underlying cloud (e.g. lxd, azure, ...)
CloudType string
// CloudRegion is the region that the model will use in the cloud.
CloudRegion string
// Credential is the id attributes for the credential to be associated with
// model. Credential must be for the same cloud as that of the model.
// Credential can be the zero value of the struct to not have a credential
// associated with the model.
Credential credential.Key
// Owner is the uuid of the user that owns this model in the Juju controller.
Owner user.UUID
// OwnerName is the name of the owner in the Juju controller.
OwnerName user.Name
}
// UUID represents a model unique identifier.
type UUID string
// NewUUID is a convince function for generating a new model uuid.
func NewUUID() (UUID, error) {
uuid, err := uuid.NewUUID()
if err != nil {
return UUID(""), err
}
return UUID(uuid.String()), nil
}
// String implements the stringer interface for UUID.
func (u UUID) String() string {
return string(u)
}
// Validate ensures the consistency of the UUID. If the uuid is invalid an error
// satisfying [errors.NotValid] will be returned.
func (u UUID) Validate() error {
if u == "" {
return fmt.Errorf("%wuuid cannot be empty", errors.Hide(errors.NotValid))
}
if !uuid.IsValidUUIDString(string(u)) {
return fmt.Errorf("uuid %q %w", u, errors.NotValid)
}
return nil
}
// Copyright 2025 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package model
import (
"fmt"
"strings"
"github.com/juju/errors"
"github.com/juju/names/v6"
)
const (
// DefaultSuffixDigits defines how many of the uuid digits to use.
// Since the suffix function asserts that the modelUUID is valid, we know
// it follows the UUID string format that ends with eight hex digits.
DefaultSuffixDigits = uint(6)
// maxSuffixLength is the maximum number of UUID digits to use.
maxSuffixLength = 32
// minMaxNameLength is the minimum allowed maxNameLength value.
minMaxNameLength = 16
// minResourceNameComponentLength is the minimum length, including separator,
// of the resource name component of the disambiguated name.
minResourceNameComponentLength = 5
)
func suffix(modelUUID string, suffixLength uint) (string, error) {
if !names.IsValidModel(modelUUID) {
return "", errors.NotValidf("model UUID %q", modelUUID)
}
// The suffix is the last six hex digits of the model uuid.
modelUUIDDigitsOnly := strings.ReplaceAll(modelUUID, "-", "")
return modelUUIDDigitsOnly[len(modelUUIDDigitsOnly)-int(suffixLength):], nil
}
// DisambiguateResourceName creates a unique resource name from the supplied name by
// appending a suffix derived from the model UUID.
// The maximum length of the entire resulting resource name is maxLength.
// To achieve maxLength, the name is right trimmed.
// The default suffix length [DefaultSuffixDigits] is used.
func DisambiguateResourceName(modelUUID string, name string, maxLength uint) (string, error) {
return DisambiguateResourceNameWithSuffixLength(modelUUID, name, maxLength, DefaultSuffixDigits)
}
// DisambiguateResourceNameWithSuffixLength creates a unique resource name from the supplied name by
// appending a suffix derived from the model UUID, using the specified suffix length.
// The maximum length of the entire resulting resource name is maxLength.
// To achieve maxLength, the name is right trimmed.
// The default suffix length [DefaultSuffixDigits] is used.
func DisambiguateResourceNameWithSuffixLength(modelUUID string, name string, maxNameLength, suffixLength uint) (string, error) {
if maxNameLength < minMaxNameLength {
return "", fmt.Errorf("maxNameLength (%d) must be greater than %d", maxNameLength, minMaxNameLength)
}
var maxAllowedSuffixLength uint = maxSuffixLength
if maxAllowedSuffixLength > maxNameLength-minResourceNameComponentLength {
maxAllowedSuffixLength = maxNameLength - minResourceNameComponentLength
}
if suffixLength < DefaultSuffixDigits || suffixLength > maxAllowedSuffixLength {
return "", fmt.Errorf("suffixLength (%d) must be between %d and %d", suffixLength, DefaultSuffixDigits, maxAllowedSuffixLength)
}
if overflow := len(name) + 1 + int(suffixLength) - int(maxNameLength); overflow > 0 {
name = name[0 : len(name)-overflow]
}
suffix, err := suffix(modelUUID, suffixLength)
if err != nil {
return "", err
}
return fmt.Sprintf("%s-%s", name, suffix), nil
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package modelconfig
import (
"fmt"
"github.com/juju/errors"
)
// ContainerNetworkingMethod defined a strong type for setting and reading the
// model config value for container networking method.
type ContainerNetworkingMethod string
const (
// ContainerNetworkingMethodLocal sets and indicates that the operator of
// the model has deemed that the local method be used for all container
// networking within the model.
ContainerNetworkingMethodLocal = ContainerNetworkingMethod("local")
// ContainerNetworkingMethodProvider sets and indicates that the operator of
// the model has deemed that the provider method be used for all
// container networking within the model.
ContainerNetworkingMethodProvider = ContainerNetworkingMethod("provider")
// ContainerNetworkingMethodAuto set and indicates that the operator of
// the model has deemed that the Juju controller should determine the best
// container networking method for the model based on the cloud
// that is in use.
ContainerNetworkingMethodAuto = ContainerNetworkingMethod("")
)
// String implements the stringer interface returning a human readable string
// representation of the container networking method.
func (c ContainerNetworkingMethod) String() string {
return string(c)
}
// Validate checks that the value of [ContainerNetworkingMethod] is an
// understood value by the system. If the value is not valid an error satisfying
// [errors.NotValid] will be returned.
func (c ContainerNetworkingMethod) Validate() error {
switch c {
case ContainerNetworkingMethodAuto,
ContainerNetworkingMethodProvider,
ContainerNetworkingMethodLocal:
return nil
default:
return fmt.Errorf("container networking method value %q %w", c, errors.NotValid)
}
}
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"bytes"
"context"
"fmt"
"net"
"sort"
"github.com/juju/collections/set"
"github.com/juju/errors"
)
// Private and special use network ranges for IPv4 and IPv6.
// See: http://tools.ietf.org/html/rfc1918
// Also: http://tools.ietf.org/html/rfc4193
// And: https://tools.ietf.org/html/rfc6890
var (
classAPrivate = mustParseCIDR("10.0.0.0/8")
classBPrivate = mustParseCIDR("172.16.0.0/12")
classCPrivate = mustParseCIDR("192.168.0.0/16")
ipv6UniqueLocal = mustParseCIDR("fc00::/7")
classEReserved = mustParseCIDR("240.0.0.0/4")
)
func mustParseCIDR(s string) *net.IPNet {
_, ipNet, err := net.ParseCIDR(s)
if err != nil {
panic(err)
}
return ipNet
}
// AddressConfigType defines valid network link configuration types.
// See interfaces(5) for details.
type AddressConfigType string
const (
ConfigUnknown AddressConfigType = ""
ConfigDHCP AddressConfigType = "dhcp"
ConfigStatic AddressConfigType = "static"
ConfigManual AddressConfigType = "manual"
ConfigLoopback AddressConfigType = "loopback"
)
// IsValidAddressConfigType returns whether the given value is a valid
// method to configure a link-layer network device's IP address.
// TODO (manadart 2021-05-04): There is an issue with the usage of this
// method in state where we have denormalised the config method so it is
// against device addresses. This is because "manual" indicates a device that
// has no configuration by default. This could never apply to an address.
func IsValidAddressConfigType(value string) bool {
switch AddressConfigType(value) {
case ConfigLoopback, ConfigStatic, ConfigDHCP, ConfigManual:
return true
}
return false
}
// AddressType represents the possible ways of specifying a machine location by
// either a hostname resolvable by dns lookup, or IPv4 or IPv6 address.
type AddressType string
const (
HostName AddressType = "hostname"
IPv4Address AddressType = "ipv4"
IPv6Address AddressType = "ipv6"
)
// Scope denotes the context a location may apply to. If a name or address can
// be reached from the wider internet, it is considered public.
// A private network address is either specific to the cloud or cloud subnet a
// machine belongs to, or to the machine itself for containers.
type Scope string
const (
ScopeUnknown Scope = ""
ScopePublic Scope = "public"
ScopeCloudLocal Scope = "local-cloud"
ScopeFanLocal Scope = "local-fan"
ScopeMachineLocal Scope = "local-machine"
ScopeLinkLocal Scope = "link-local"
)
// ScopeMatch is a numeric designation of how well the requirement
// for satisfying a scope is met.
type ScopeMatch int
const (
invalidScope ScopeMatch = iota
exactScopeIPv4
exactScope
firstFallbackScopeIPv4
firstFallbackScope
secondFallbackScopeIPv4
secondFallbackScope
)
// Address describes methods for returning details
// about an IP address or host name.
type Address interface {
// Host returns the value for the host-name/IP address.
Host() string
// AddressType returns the type of the address.
AddressType() AddressType
// AddressScope returns the scope of the address.
AddressScope() Scope
// AddressCIDR returns the subnet CIDR of the address.
AddressCIDR() string
// AddressConfigType returns the configuration method of the address.
AddressConfigType() AddressConfigType
// AddressIsSecondary returns whether this address is not the
// primary address associated with the network device.
AddressIsSecondary() bool
}
// ScopeMatchFunc is an alias for a function that accepts an Address,
// and returns what kind of scope match is determined by the body.
type ScopeMatchFunc = func(addr Address) ScopeMatch
// ExactScopeMatch checks if an address exactly
// matches any of the specified scopes.
func ExactScopeMatch(addr Address, addrScopes ...Scope) bool {
for _, scope := range addrScopes {
if addr.AddressScope() == scope {
return true
}
}
return false
}
// SortOrderMostPublic calculates the "weight" of the address to use when
// sorting such that the most accessible addresses will appear first:
// - public IPs first;
// - hostnames after that, but "localhost" will be last if present;
// - cloud-local next;
// - fan-local next;
// - machine-local next;
// - link-local next;
// - non-hostnames with unknown scope last.
// Secondary addresses with otherwise equal weight will be sorted to come after
// primary addresses, including host names *except* localhost.
func SortOrderMostPublic(a Address) int {
order := 100
switch a.AddressScope() {
case ScopePublic:
order = 0
// Special case to ensure that these follow non-localhost host names.
if a.AddressIsSecondary() {
order = 10
}
case ScopeCloudLocal:
order = 30
case ScopeFanLocal:
order = 50
case ScopeMachineLocal:
order = 70
case ScopeLinkLocal:
order = 90
}
switch a.AddressType() {
case HostName:
order = 10
if a.Host() == "localhost" {
order = 20
}
case IPv6Address:
order++
case IPv4Address:
}
if a.AddressIsSecondary() {
order += 2
}
return order
}
// MachineAddress represents an address without associated space or provider
// information. Addresses of this form will be supplied by an agent running
// directly on a machine or container, or returned for requests where space
// information is irrelevant to usage.
type MachineAddress struct {
// Value is an IP address or hostname.
Value string
// Type indicates the form of the address value;
// IPv4, IPv6 or host-name.
Type AddressType
// Scope indicates the visibility of this address.
Scope Scope
// CIDR is used for IP addresses to indicate
// the subnet that they are part of.
CIDR string
// ConfigType denotes how this address was configured.
ConfigType AddressConfigType
// IsSecondary if true, indicates that this address is not the primary
// address associated with the network device.
IsSecondary bool
}
// Host returns the value for the host-name/IP address.
func (a MachineAddress) Host() string {
return a.Value
}
// AddressType returns the type of the address.
func (a MachineAddress) AddressType() AddressType {
return a.Type
}
// AddressScope returns the scope of the address.
func (a MachineAddress) AddressScope() Scope {
return a.Scope
}
// AddressCIDR returns the subnet CIDR of the address.
func (a MachineAddress) AddressCIDR() string {
return a.CIDR
}
// AddressConfigType returns the configuration method of the address.
func (a MachineAddress) AddressConfigType() AddressConfigType {
return a.ConfigType
}
// AddressIsSecondary returns whether this address is not the
// primary address associated with the network device.
func (a MachineAddress) AddressIsSecondary() bool {
return a.IsSecondary
}
// GoString implements fmt.GoStringer.
func (a MachineAddress) GoString() string {
return a.String()
}
// String returns the address value, prefixed with the scope if known.
func (a MachineAddress) String() string {
var prefix string
if a.Scope != ScopeUnknown {
prefix = string(a.Scope) + ":"
}
return prefix + a.Value
}
// IP returns the net.IP representation of this address.
func (a MachineAddress) IP() net.IP {
return net.ParseIP(a.Value)
}
// ValueWithMask returns the value of the address combined
// with the subnet mask indicated by its CIDR.
func (a MachineAddress) ValueWithMask() (string, error) {
// Returning a NotFound error preserves prior behaviour from when
// CIDRAddress was a method on InterfaceInfo.
// TODO (manadart 2021-03-16): Rethink this as we clean up InterfaceInfos
// and its corresponding wire type.
if a.Value == "" || a.CIDR == "" {
return "", errors.NotFoundf("address and CIDR pair (%q, %q)", a.Value, a.CIDR)
}
_, ipNet, err := net.ParseCIDR(a.CIDR)
if err != nil {
return "", errors.Trace(err)
}
ip := a.IP()
if ip == nil {
return "", errors.Errorf("cannot parse IP address %q", a.Value)
}
ipNet.IP = ip
return ipNet.String(), nil
}
// AsProviderAddress is used to construct a ProviderAddress
// from a MachineAddress
func (a MachineAddress) AsProviderAddress(options ...func(mutator ProviderAddressMutator)) ProviderAddress {
addr := ProviderAddress{MachineAddress: a}
for _, option := range options {
option(&addr)
}
return addr
}
// NewMachineAddress creates a new MachineAddress,
// applying any supplied options to the result.
func NewMachineAddress(value string, options ...func(AddressMutator)) MachineAddress {
addr := MachineAddress{
Value: value,
Type: DeriveAddressType(value),
Scope: ScopeUnknown,
}
for _, option := range options {
option(&addr)
}
if addr.Scope == ScopeUnknown {
addr.Scope = deriveScope(addr)
}
return addr
}
// MachineAddresses is a slice of MachineAddress
type MachineAddresses []MachineAddress
// NewMachineAddresses is a convenience function to create addresses
// from a variable number of string arguments, applying any supplied
// options to each address
func NewMachineAddresses(values []string, options ...func(AddressMutator)) MachineAddresses {
if len(values) == 0 {
return nil
}
addrs := make(MachineAddresses, len(values))
for i, value := range values {
addrs[i] = NewMachineAddress(value, options...)
}
return addrs
}
// AsProviderAddresses is used to construct ProviderAddresses
// element-wise from MachineAddresses
func (as MachineAddresses) AsProviderAddresses(options ...func(mutator ProviderAddressMutator)) ProviderAddresses {
if len(as) == 0 {
return nil
}
addrs := make(ProviderAddresses, len(as))
for i, addr := range as {
addrs[i] = addr.AsProviderAddress(options...)
}
return addrs
}
// AllMatchingScope returns the addresses that satisfy
// the input scope matching function.
func (as MachineAddresses) AllMatchingScope(getMatcher ScopeMatchFunc) MachineAddresses {
return allMatchingScope(as, getMatcher)
}
// Values transforms the MachineAddresses to a string slice
// containing their raw IP values.
func (as MachineAddresses) Values() []string {
return toStrings(as)
}
// deriveScope attempts to derive the network scope from an address'
// type and value, returning the original network scope if no
// deduction can be made.
func deriveScope(addr MachineAddress) Scope {
if addr.Type == HostName {
return addr.Scope
}
ip := net.ParseIP(addr.Value)
if ip == nil {
return addr.Scope
}
if ip.IsLoopback() {
return ScopeMachineLocal
}
if isIPv4PrivateNetworkAddress(addr.Type, ip) ||
isIPv6UniqueLocalAddress(addr.Type, ip) {
return ScopeCloudLocal
}
if isIPv4ReservedEAddress(addr.Type, ip) {
return ScopeFanLocal
}
if ip.IsLinkLocalMulticast() ||
ip.IsLinkLocalUnicast() ||
ip.IsInterfaceLocalMulticast() {
return ScopeLinkLocal
}
if ip.IsGlobalUnicast() {
return ScopePublic
}
return addr.Scope
}
func isIPv4PrivateNetworkAddress(addrType AddressType, ip net.IP) bool {
if addrType != IPv4Address {
return false
}
return classAPrivate.Contains(ip) ||
classBPrivate.Contains(ip) ||
classCPrivate.Contains(ip)
}
func isIPv4ReservedEAddress(addrType AddressType, ip net.IP) bool {
if addrType != IPv4Address {
return false
}
return classEReserved.Contains(ip)
}
func isIPv6UniqueLocalAddress(addrType AddressType, ip net.IP) bool {
if addrType != IPv6Address {
return false
}
return ipv6UniqueLocal.Contains(ip)
}
// InterfaceAddrs is patched for tests.
var InterfaceAddrs = func() ([]net.Addr, error) {
return net.InterfaceAddrs()
}
// IsLocalAddress returns true if the provided IP address equals to one of the
// local IP addresses.
func IsLocalAddress(ip net.IP) (bool, error) {
addrs, err := InterfaceAddrs()
if err != nil {
return false, errors.Trace(err)
}
for _, addr := range addrs {
localIP, _, err := net.ParseCIDR(addr.String())
if err != nil {
continue
}
if localIP.To4() != nil || localIP.To16() != nil {
if ip.Equal(localIP) {
return true, nil
}
}
}
return false, nil
}
// ProviderAddress represents an address supplied by provider logic.
// It can include the provider's knowledge of the space in which the
// address resides.
type ProviderAddress struct {
MachineAddress
// SpaceName is the space in which this address resides
SpaceName SpaceName
// ProviderSpaceID is the provider's ID for the space this address is in
ProviderSpaceID Id
// ProviderID is the ID of this address's provider
ProviderID Id
// ProviderSubnetID is the provider's ID for the subnet this address is in
ProviderSubnetID Id
// ProviderVLANID is the provider's ID for the VLAN this address is in
ProviderVLANID Id
// VLANTag is the tag associated with this address's VLAN
VLANTag int
}
// GoString implements fmt.GoStringer.
func (a ProviderAddress) GoString() string {
return a.String()
}
// String returns a string representation of the address, in the form:
// `<scope>:<address-value>@<space-name>(id:<space-provider-id)`; for example:
//
// public:c2-54-226-162-124.compute-1.amazonaws.com@public-api(id:42)
//
// If the SpaceName is blank, the "@<space-name>" suffix will be omitted.
// Finally, if the ProviderSpaceID is empty the suffix
// "(id:<space-provider-id>)" part will be omitted as well.
func (a ProviderAddress) String() string {
var buf bytes.Buffer
buf.WriteString(a.MachineAddress.String())
var spaceFound bool
if a.SpaceName != "" {
spaceFound = true
buf.WriteByte('@')
buf.WriteString(string(a.SpaceName))
}
if a.ProviderSpaceID != "" {
if !spaceFound {
buf.WriteByte('@')
}
buf.WriteString(fmt.Sprintf("(id:%v)", string(a.ProviderSpaceID)))
}
return buf.String()
}
// ProviderAddresses is a slice of ProviderAddress
// supporting conversion to SpaceAddresses.
type ProviderAddresses []ProviderAddress
// Values transforms the ProviderAddresses to a string slice containing
// their raw IP values.
func (pas ProviderAddresses) Values() []string {
return toStrings(pas)
}
// ToSpaceAddresses transforms the ProviderAddresses to SpaceAddresses by using
// the input lookup to get a space ID from the name or the CIDR.
func (pas ProviderAddresses) ToSpaceAddresses(spaceInfos SpaceInfos) (SpaceAddresses, error) {
if pas == nil {
return nil, nil
}
sas := make(SpaceAddresses, len(pas))
for i, pa := range pas {
sas[i] = SpaceAddress{MachineAddress: pa.MachineAddress}
// If the provider explicitly sets the space, i.e. MAAS, prefer the name.
if pa.SpaceName != "" {
info := spaceInfos.GetByName(string(pa.SpaceName))
if info == nil {
return nil, errors.NotFoundf("space with name %q", pa.SpaceName)
}
sas[i].SpaceID = info.ID
continue
}
// Otherwise attempt to look up the CIDR.
sInfo, err := spaceInfos.InferSpaceFromCIDRAndSubnetID(pa.CIDR, string(pa.ProviderSubnetID))
if err != nil {
logger.Debugf(context.TODO(), "no matching subnet for CIDR %q and provider ID %q", pa.CIDR, pa.ProviderSubnetID)
continue
}
sas[i].SpaceID = sInfo.ID
}
return sas, nil
}
// OneMatchingScope returns the address that best satisfies the input scope
// matching function. The boolean return indicates if a match was found.
func (pas ProviderAddresses) OneMatchingScope(getMatcher ScopeMatchFunc) (ProviderAddress, bool) {
indexes := indexesForScope(pas, getMatcher)
if len(indexes) == 0 {
return ProviderAddress{}, false
}
addr := pas[indexes[0]]
logger.Debugf(context.TODO(), "selected %q as address, using scope %q", addr.Value, addr.Scope)
return addr, true
}
// SpaceAddress represents the location of a machine, including metadata
// about what kind of location the address describes.
// This is a server-side type that may include a space reference.
// It is used in logic for filtering addresses by space.
type SpaceAddress struct {
MachineAddress
SpaceID string
}
// GoString implements fmt.GoStringer.
func (a SpaceAddress) GoString() string {
return a.String()
}
// String returns a string representation of the address, in the form:
// `<scope>:<address-value>@space:<space-id>`; for example:
//
// public:c2-54-226-162-124.compute-1.amazonaws.com@space:1
//
// If the Space ID is empty, the @space:<space-id> suffix will be omitted.
func (a SpaceAddress) String() string {
var buf bytes.Buffer
buf.WriteString(a.MachineAddress.String())
if a.SpaceID != "" {
buf.WriteString("@space:")
buf.WriteString(a.SpaceID)
}
return buf.String()
}
// NewSpaceAddress creates a new SpaceAddress,
// applying any supplied options to the result.
func NewSpaceAddress(value string, options ...func(mutator AddressMutator)) SpaceAddress {
return SpaceAddress{MachineAddress: NewMachineAddress(value, options...)}
}
// SpaceAddresses is a slice of SpaceAddress
// supporting conversion to ProviderAddresses.
type SpaceAddresses []SpaceAddress
// NewSpaceAddresses is a convenience function to create addresses
// from a variable number of string arguments.
func NewSpaceAddresses(inAddresses ...string) (outAddresses SpaceAddresses) {
outAddresses = make(SpaceAddresses, len(inAddresses))
for i, address := range inAddresses {
outAddresses[i] = NewSpaceAddress(address)
}
return outAddresses
}
// Values returns a slice of strings containing the IP/host-name of each of
// the receiver addresses.
func (sas SpaceAddresses) Values() []string {
return toStrings(sas)
}
// ToProviderAddresses transforms the SpaceAddresses to ProviderAddresses by using
// the input lookup for conversion of space ID to space info.
func (sas SpaceAddresses) ToProviderAddresses(spaceInfos SpaceInfos) (ProviderAddresses, error) {
if sas == nil {
return nil, nil
}
pas := make(ProviderAddresses, len(sas))
for i, sa := range sas {
pas[i] = ProviderAddress{MachineAddress: sa.MachineAddress}
if sa.SpaceID != "" {
info := spaceInfos.GetByID(sa.SpaceID)
if info == nil {
return nil, errors.NotFoundf("space with ID %q", sa.SpaceID)
}
pas[i].SpaceName = info.Name
pas[i].ProviderSpaceID = info.ProviderId
}
}
return pas, nil
}
// InSpaces returns the SpaceAddresses that are in the input spaces.
func (sas SpaceAddresses) InSpaces(spaces ...SpaceInfo) (SpaceAddresses, bool) {
if len(spaces) == 0 {
logger.Errorf(context.TODO(), "addresses not filtered - no spaces given.")
return sas, false
}
spaceInfos := SpaceInfos(spaces)
var selectedAddresses SpaceAddresses
for _, addr := range sas {
if space := spaceInfos.GetByID(addr.SpaceID); space != nil {
logger.Debugf(context.TODO(), "selected %q as an address in space %q", addr.Value, space.Name)
selectedAddresses = append(selectedAddresses, addr)
}
}
if len(selectedAddresses) > 0 {
return selectedAddresses, true
}
logger.Errorf(context.TODO(), "no addresses found in spaces %s", spaceInfos)
return sas, false
}
// OneMatchingScope returns the address that best satisfies the input scope
// matching function. The boolean return indicates if a match was found.
func (sas SpaceAddresses) OneMatchingScope(getMatcher ScopeMatchFunc) (SpaceAddress, bool) {
addrs := sas.AllMatchingScope(getMatcher)
if len(addrs) == 0 {
return SpaceAddress{}, false
}
return addrs[0], true
}
// AllMatchingScope returns the addresses that satisfy
// the input scope matching function.
func (sas SpaceAddresses) AllMatchingScope(getMatcher ScopeMatchFunc) SpaceAddresses {
return allMatchingScope(sas, getMatcher)
}
// EqualTo returns true if this set of SpaceAddresses is equal to other.
func (sas SpaceAddresses) EqualTo(other SpaceAddresses) bool {
if len(sas) != len(other) {
return false
}
sort.Sort(sas)
sort.Sort(other)
for i := 0; i < len(sas); i++ {
if sas[i].String() != other[i].String() {
return false
}
}
return true
}
func (sas SpaceAddresses) Len() int { return len(sas) }
func (sas SpaceAddresses) Swap(i, j int) { sas[i], sas[j] = sas[j], sas[i] }
func (sas SpaceAddresses) Less(i, j int) bool {
addr1 := sas[i]
addr2 := sas[j]
order1 := SortOrderMostPublic(addr1)
order2 := SortOrderMostPublic(addr2)
if order1 == order2 {
return addr1.Value < addr2.Value
}
return order1 < order2
}
// DeriveAddressType attempts to detect the type of address given.
func DeriveAddressType(value string) AddressType {
ip := net.ParseIP(value)
switch {
case ip == nil:
// TODO(gz): Check value is a valid hostname
return HostName
case ip.To4() != nil:
return IPv4Address
case ip.To16() != nil:
return IPv6Address
default:
panic("Unknown form of IP address")
}
}
// ScopeMatchPublic is an address scope matching function for determining the
// extent to which the input address' scope satisfies a requirement for public
// accessibility.
func ScopeMatchPublic(addr Address) ScopeMatch {
switch addr.AddressScope() {
case ScopePublic:
if addr.AddressType() == IPv4Address {
return exactScopeIPv4
}
return exactScope
case ScopeCloudLocal:
if addr.AddressType() == IPv4Address {
return firstFallbackScopeIPv4
}
return firstFallbackScope
case ScopeFanLocal, ScopeUnknown:
if addr.AddressType() == IPv4Address {
return secondFallbackScopeIPv4
}
return secondFallbackScope
}
return invalidScope
}
func ScopeMatchMachineOrCloudLocal(addr Address) ScopeMatch {
if addr.AddressScope() == ScopeMachineLocal {
if addr.AddressType() == IPv4Address {
return exactScopeIPv4
}
return exactScope
}
return ScopeMatchCloudLocal(addr)
}
// ScopeMatchCloudLocal is an address scope matching function for determining
// the extent to which the input address' scope satisfies a requirement for
// accessibility from within the local cloud.
// Machine-only addresses do not satisfy this matcher.
func ScopeMatchCloudLocal(addr Address) ScopeMatch {
switch addr.AddressScope() {
case ScopeCloudLocal:
if addr.AddressType() == IPv4Address {
return exactScopeIPv4
}
return exactScope
case ScopeFanLocal:
if addr.AddressType() == IPv4Address {
return firstFallbackScopeIPv4
}
return firstFallbackScope
case ScopePublic, ScopeUnknown:
if addr.AddressType() == IPv4Address {
return secondFallbackScopeIPv4
}
return secondFallbackScope
}
return invalidScope
}
// MergedAddresses provides a single list of addresses without duplicates
// suitable for returning as an address list for a machine.
// TODO (cherylj) Add explicit unit tests - tracked with bug #1544158
func MergedAddresses(machineAddresses, providerAddresses []SpaceAddress) []SpaceAddress {
merged := make([]SpaceAddress, 0, len(providerAddresses)+len(machineAddresses))
providerValues := set.NewStrings()
for _, address := range providerAddresses {
// Older versions of Juju may have stored an empty address so ignore it here.
if address.Value == "" || providerValues.Contains(address.Value) {
continue
}
providerValues.Add(address.Value)
merged = append(merged, address)
}
for _, address := range machineAddresses {
if !providerValues.Contains(address.Value) {
merged = append(merged, address)
}
}
return merged
}
// CIDRAddressType returns back an AddressType to indicate whether the supplied
// CIDR corresponds to an IPV4 or IPV6 range. An error will be returned if a
// non-valid CIDR is provided.
//
// Caveat: if the provided CIDR corresponds to an IPV6 range with a 4in6
// prefix, the function will classify it as an IPV4 address. This is a known
// limitation of the go stdlib IP parsing code but it's not something that we
// are likely to encounter in the wild so there is no need to add extra logic
// to work around it.
func CIDRAddressType(cidr string) (AddressType, error) {
_, netIP, err := net.ParseCIDR(cidr)
if err != nil {
return "", err
}
if netIP.IP.To4() != nil {
return IPv4Address, nil
}
return IPv6Address, nil
}
// NetworkCIDRFromIPAndMask constructs a CIDR for a network by applying the
// provided netmask to the specified address (can be either a host or network
// address) and formatting the result as a CIDR.
//
// For example, passing 10.0.0.4 and a /24 mask yields 10.0.0.0/24.
func NetworkCIDRFromIPAndMask(ip net.IP, netmask net.IPMask) string {
if ip == nil || netmask == nil {
return ""
}
hostBits, _ := netmask.Size()
return fmt.Sprintf("%s/%d", ip.Mask(netmask), hostBits)
}
// SpaceAddressCandidate describes property methods required
// for conversion to sortable space addresses.
type SpaceAddressCandidate interface {
Value() string
ConfigMethod() AddressConfigType
SubnetCIDR() string
IsSecondary() bool
}
// ConvertToSpaceAddress returns a SpaceAddress representing the
// input candidate address, by using the input subnet lookup to
// associate the address with a space..
func ConvertToSpaceAddress(addr SpaceAddressCandidate, lookup SubnetLookup) (SpaceAddress, error) {
subnets, err := lookup.AllSubnetInfos()
if err != nil {
return SpaceAddress{}, errors.Trace(err)
}
cidr := addr.SubnetCIDR()
spaceAddr := SpaceAddress{
MachineAddress: NewMachineAddress(
addr.Value(),
WithCIDR(cidr),
WithConfigType(addr.ConfigMethod()),
WithSecondary(addr.IsSecondary()),
),
}
// Attempt to set the space ID based on the subnet.
if cidr != "" {
allMatching, err := subnets.GetByCIDR(cidr)
if err != nil {
return SpaceAddress{}, errors.Trace(err)
}
// This only holds true while CIDRs uniquely identify subnets.
if len(allMatching) != 0 {
spaceAddr.SpaceID = allMatching[0].SpaceID
}
}
return spaceAddr, nil
}
// noAddress represents an error when an address is requested but not available.
type noAddress struct {
errors.Err
}
// NoAddressError returns an error which satisfies IsNoAddressError(). The given
// addressKind specifies what kind of address(es) is(are) missing, usually
// "private" or "public".
func NoAddressError(addressKind string) error {
newErr := errors.NewErr("no %s address(es)", addressKind)
newErr.SetLocation(1)
return &noAddress{newErr}
}
// IsNoAddressError reports whether err was created with NoAddressError().
func IsNoAddressError(err error) bool {
err = errors.Cause(err)
_, ok := err.(*noAddress)
return ok
}
// toStrings returns the IP addresses in string form for input
// that is a slice of types implementing the Address interface.
func toStrings[T Address](addrs []T) []string {
if addrs == nil {
return nil
}
ips := make([]string, len(addrs))
for i, addr := range addrs {
ips[i] = addr.Host()
}
return ips
}
func allMatchingScope[T Address](addrs []T, getMatcher ScopeMatchFunc) []T {
indexes := indexesForScope(addrs, getMatcher)
if len(indexes) == 0 {
return nil
}
out := make([]T, len(indexes))
for i, index := range indexes {
out[i] = addrs[index]
}
return out
}
// indexesForScope returns the indexes of the addresses with the best
// matching scope and type (according to the matchFunc).
// An empty slice is returned if there were no suitable addresses.
func indexesForScope[T Address](addrs []T, matchFunc ScopeMatchFunc) []int {
matches := filterAndCollateAddressIndexes(addrs, matchFunc)
for _, matchType := range scopeMatchHierarchy() {
indexes, ok := matches[matchType]
if ok && len(indexes) > 0 {
return indexes
}
}
return nil
}
// indexesByScopeMatch filters address indexes by matching scope,
// then returns them in descending order of best match.
func indexesByScopeMatch[T Address](addrs []T, matchFunc ScopeMatchFunc) []int {
matches := filterAndCollateAddressIndexes(addrs, matchFunc)
var prioritized []int
for _, matchType := range scopeMatchHierarchy() {
indexes, ok := matches[matchType]
if ok && len(indexes) > 0 {
prioritized = append(prioritized, indexes...)
}
}
return prioritized
}
// filterAndCollateAddressIndexes filters address indexes using the input scope
// matching function, then returns the results grouped by scope match quality.
// Invalid results are omitted.
func filterAndCollateAddressIndexes[T Address](addrs []T, matchFunc ScopeMatchFunc) map[ScopeMatch][]int {
matches := make(map[ScopeMatch][]int)
for i, addr := range addrs {
matchType := matchFunc(addr)
if matchType != invalidScope {
matches[matchType] = append(matches[matchType], i)
}
}
return matches
}
func scopeMatchHierarchy() []ScopeMatch {
return []ScopeMatch{
exactScopeIPv4, exactScope,
firstFallbackScopeIPv4, firstFallbackScope,
secondFallbackScopeIPv4, secondFallbackScope,
}
}
// Copyright 2021 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
// AddressMutator describes setter methods for an address.
type AddressMutator interface {
// SetScope sets the scope property of the address.
SetScope(Scope)
// SetCIDR sets the CIDR property of the address.
SetCIDR(string)
// SetSecondary indicates whether this address is not the
// primary address of the device it is associated with.
SetSecondary(bool)
// SetConfigType indicates how this address was configured.
SetConfigType(AddressConfigType)
}
// SetScope (AddressMutator) sets the input
// scope on the address receiver.
func (a *MachineAddress) SetScope(scope Scope) {
a.Scope = scope
}
// SetCIDR (AddressMutator) sets the input
// CIDR on the address receiver.
func (a *MachineAddress) SetCIDR(cidr string) {
a.CIDR = cidr
}
// SetSecondary (AddressMutator) sets the IsSecondary
// member to true on the address receiver.
func (a *MachineAddress) SetSecondary(isSecondary bool) {
a.IsSecondary = isSecondary
}
// SetConfigType (AddressMutator) sets the input
// AddressConfigType on the address receiver.
func (a *MachineAddress) SetConfigType(configType AddressConfigType) {
a.ConfigType = configType
}
// WithScope returns a functional option that can
// be used to set the input scope on an address.
func WithScope(scope Scope) func(AddressMutator) {
return func(a AddressMutator) {
a.SetScope(scope)
}
}
// WithCIDR returns a functional option that can
// be used to set the input CIDR on an address.
func WithCIDR(cidr string) func(AddressMutator) {
return func(a AddressMutator) {
a.SetCIDR(cidr)
}
}
// WithSecondary returns a functional option that can be used to
// indicate whether an address is not the primary for its NIC.
func WithSecondary(isSecondary bool) func(AddressMutator) {
return func(a AddressMutator) {
a.SetSecondary(isSecondary)
}
}
func WithConfigType(configType AddressConfigType) func(AddressMutator) {
return func(a AddressMutator) {
a.SetConfigType(configType)
}
}
// ProviderAddressMutator describes setter methods for a ProviderAddress
type ProviderAddressMutator interface {
AddressMutator
// SetSpaceName sets the SpaceName property of the provider address
SetSpaceName(string)
// SetProviderSpaceID sets the ProviderSpaceID property of the provider address
SetProviderSpaceID(Id)
// SetProviderID sets the ProviderID property of the provider address
SetProviderID(Id)
// SetProviderSubnetID sets the ProviderSubnetID property of the provider address
SetProviderSubnetID(Id)
// SetProviderVLANID sets the ProviderVLANID property of the provider address
SetProviderVLANID(Id)
// SetVLANTag sets the VLANTag property of the provider address
SetVLANTag(int)
}
// SetSpaceName (ProviderAddressMutator) sets the input
// space name on the provider address receiver
func (a *ProviderAddress) SetSpaceName(spaceName string) {
a.SpaceName = SpaceName(spaceName)
}
// SetProviderSpaceID (ProviderAddressMutator) sets the input
// provider space id on the provider address receiver
func (a *ProviderAddress) SetProviderSpaceID(id Id) {
a.ProviderSpaceID = id
}
// SetProviderID (ProviderAddressMutator) sets the input
// provider id on the provider address receiver
func (a *ProviderAddress) SetProviderID(id Id) {
a.ProviderID = id
}
// SetProviderSubnetID (ProviderAddressMutator) sets the input
// provider subnet id on the provider addrerss reviever
func (a *ProviderAddress) SetProviderSubnetID(id Id) {
a.ProviderSubnetID = id
}
// SetProviderVLANID (ProviderAddressMutator) sets the input
// provider VLAN id on the provider addrerss reviever
func (a *ProviderAddress) SetProviderVLANID(id Id) {
a.ProviderVLANID = id
}
// SetVLANTag (ProviderAddressMutator) sets the input
// VLAN tag on the provider addrerss reviever
func (a *ProviderAddress) SetVLANTag(tag int) {
a.VLANTag = tag
}
// WithSpaceName returns a functional option that can
// be used to set the input space name on a provider address.
func WithSpaceName(space string) func(ProviderAddressMutator) {
return func(a ProviderAddressMutator) {
a.SetSpaceName(space)
}
}
// WithProviderSpaceID returns a functional option that can
// be used to set the input provider space id on a provider address
func WithProviderSpaceID(id Id) func(ProviderAddressMutator) {
return func(a ProviderAddressMutator) {
a.SetProviderSpaceID(id)
}
}
// WithProviderID returns a functional option that can
// be used to set the input provider id on a provider address
func WithProviderID(id Id) func(ProviderAddressMutator) {
return func(a ProviderAddressMutator) {
a.SetProviderID(id)
}
}
// WithProviderSubnetID returns a functional option that can
// be used to set the input provider subnet id on a provider address
func WithProviderSubnetID(id Id) func(ProviderAddressMutator) {
return func(a ProviderAddressMutator) {
a.SetProviderSubnetID(id)
}
}
// WithProviderVLANID returns a functional option that can
// be used to set the input provider VLAN id on a provider address
func WithProviderVLANID(id Id) func(ProviderAddressMutator) {
return func(a ProviderAddressMutator) {
a.SetProviderVLANID(id)
}
}
// WithVLANTag returns a functional option that can
// be used to set the input VLAN tag on a provider address
func WithVLANTag(tag int) func(ProviderAddressMutator) {
return func(a ProviderAddressMutator) {
a.SetVLANTag(tag)
}
}
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"context"
"strings"
"github.com/juju/errors"
)
// GetObservedNetworkConfig uses the given source to find all available network
// interfaces and their assigned addresses, and returns the result as
// []params.NetworkConfig. In addition to what the source returns, a few
// additional transformations are done:
//
// - On any OS, the state (UP/DOWN) of each interface and the DeviceIndex field,
// will be correctly populated. Loopback interfaces are also properly detected
// and will have InterfaceType set as LoopbackInterface.
// - On Linux only, the InterfaceType field will be reliably detected for a few
// types: BondInterface, BridgeInterface, VLAN_8021QInterface.
// - Also on Linux, for interfaces that are discovered to be ports on a bridge,
// the ParentInterfaceName will be populated with the name of the bridge.
// - ConfigType fields will be set to ConfigManual when no address is detected,
// or ConfigStatic when it is.
// - NICs that correspond to the internal port of an OVS-managed switch will
// have their type forced to bridge and their virtual port type set to
// OvsPort.
// - TODO: IPv6 link-local addresses will be ignored and treated as empty ATM.
func GetObservedNetworkConfig(source ConfigSource) (InterfaceInfos, error) {
logger.Tracef(context.TODO(), "discovering observed machine network config...")
interfaces, err := source.Interfaces()
if err != nil {
return nil, errors.Annotate(err, "detecting network interfaces")
}
if len(interfaces) == 0 {
logger.Tracef(context.TODO(), "no network interfaces")
return nil, nil
}
knownOVSBridges, err := source.OvsManagedBridges()
if err != nil {
// NOTE(achilleasa): we will only get an error here if we do
// locate the OVS cli tools and get an error executing them.
return nil, errors.Annotate(err, "querying OVS bridges")
}
defaultRoute, defaultRouteDevice, err := source.DefaultRoute()
if err != nil {
return nil, errors.Annotate(err, "retrieving default route")
}
var configs InterfaceInfos
var bridgeNames []string
var noAddressesNics []string
for _, nic := range interfaces {
virtualPortType := NonVirtualPort
if knownOVSBridges.Contains(nic.Name()) {
virtualPortType = OvsPort
}
nicConfig := createInterfaceInfo(nic, virtualPortType)
if nicConfig.InterfaceName == defaultRouteDevice {
nicConfig.IsDefaultGateway = true
nicConfig.GatewayAddress = NewMachineAddress(defaultRoute.String()).AsProviderAddress()
}
// Collect all the bridge device names. We will use these to update all
// the parent device names for the bridge's port devices at the end.
if nic.Type() == BridgeDevice {
bridgeNames = append(bridgeNames, nic.Name())
}
nicAddrs, err := nic.Addresses()
if err != nil {
return nil, errors.Annotatef(err, "detecting addresses for %q", nic.Name())
}
if len(nicAddrs) > 0 {
// TODO (manadart 2021-05-07): This preserves prior behaviour,
// but is incorrect for DHCP configured devices.
// At present we do not store a config type against the device,
// only the addresses (which incorrectly default to static too).
// This could be corrected by interrogating the DHCP leases for
// the device, should we ever need that detail.
// At present we do not - we only use it to determine if an address
// has a configuration method of "loopback".
if nic.Type() != LoopbackDevice {
nicConfig.ConfigType = ConfigStatic
}
nicConfig.Addresses, err = addressesToConfig(nicConfig, nicAddrs)
if err != nil {
return nil, errors.Trace(err)
}
} else {
noAddressesNics = append(noAddressesNics, nic.Name())
}
configs = append(configs, nicConfig)
}
if len(noAddressesNics) > 0 {
logger.Debugf(context.TODO(), "no addresses observed on interfaces %q", strings.Join(noAddressesNics, ", "))
}
updateParentsForBridgePorts(configs, bridgeNames, source)
return configs, nil
}
func createInterfaceInfo(nic ConfigSourceNIC,
virtualPortType VirtualPortType,
) InterfaceInfo {
configType := ConfigManual
if nic.Type() == LoopbackDevice {
configType = ConfigLoopback
}
isUp := nic.IsUp()
// TODO (dimitern): Add DNS servers and search domains.
return InterfaceInfo{
DeviceIndex: nic.Index(),
MACAddress: nic.HardwareAddr().String(),
ConfigType: configType,
MTU: nic.MTU(),
InterfaceName: nic.Name(),
InterfaceType: nic.Type(),
NoAutoStart: !isUp,
Disabled: !isUp,
VirtualPortType: virtualPortType,
Origin: OriginMachine,
}
}
func addressesToConfig(nic InterfaceInfo, nicAddrs []ConfigSourceAddr) ([]ProviderAddress, error) {
var res ProviderAddresses
for _, nicAddr := range nicAddrs {
if nicAddr == nil {
return nil, errors.Errorf("cannot parse nil address on interface %q", nic.InterfaceName)
}
ip := nicAddr.IP()
// TODO (macgreagoir): Skip IPv6 link-local until we decide how to handle them.
if ip.To4() == nil && ip.IsLinkLocalUnicast() {
logger.Tracef(context.TODO(), "skipping observed IPv6 link-local address %q on %q", ip, nic.InterfaceName)
continue
}
opts := []func(mutator AddressMutator){
WithConfigType(nic.ConfigType),
WithSecondary(nicAddr.IsSecondary()),
}
if ipNet := nicAddr.IPNet(); ipNet != nil && ipNet.Mask != nil {
opts = append(opts, WithCIDR(NetworkCIDRFromIPAndMask(ip, ipNet.Mask)))
}
// Constructing a core Address like this first,
// then converting, populates the scope and type.
res = append(res, NewMachineAddress(ip.String(), opts...).AsProviderAddress())
}
return res, nil
}
func updateParentsForBridgePorts(config []InterfaceInfo, bridgeNames []string, source ConfigSource) {
for _, bridgeName := range bridgeNames {
for _, portName := range source.GetBridgePorts(bridgeName) {
for i := range config {
if config[i].InterfaceName == portName {
config[i].ParentInterfaceName = bridgeName
break
}
}
}
}
}
// Copyright 2021 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"bufio"
"context"
"os"
"strings"
"github.com/juju/errors"
)
// DNSConfig holds a list of DNS nameserver addresses
// and default search domains.
type DNSConfig struct {
Nameservers []ProviderAddress
SearchDomains []string
}
// ParseResolvConf parses a resolv.conf(5) file at the given path (usually
// "/etc/resolv.conf"), if present. Returns the values of any 'nameserver'
// stanzas, and the last 'search' stanza found. Values in the result will
// appear in the order found, including duplicates.
// Parsing errors will be returned in these cases:
//
// 1. if a 'nameserver' or 'search' without a value is found;
// 2. 'nameserver' with more than one value (trailing comments starting with
// '#' or ';' after the value are allowed).
// 3. if any value containing '#' or ';' (e.g. 'nameserver 8.8.8.8#bad'),
// because values and comments following them must be separated by
// whitespace.
//
// No error is returned if the file is missing.
// See resolv.conf(5) man page for details.
func ParseResolvConf(path string) (*DNSConfig, error) {
file, err := os.Open(path)
if os.IsNotExist(err) {
logger.Debugf(context.TODO(), "%q does not exist - not parsing", path)
return nil, nil
} else if err != nil {
return nil, errors.Trace(err)
}
defer file.Close()
var (
nameservers []string
searchDomains []string
)
scanner := bufio.NewScanner(file)
lineNum := 0
for scanner.Scan() {
line := scanner.Text()
lineNum++
values, err := parseResolvStanza(line, "nameserver")
if err != nil {
return nil, errors.Annotatef(err, "parsing %q, line %d", path, lineNum)
}
if numValues := len(values); numValues > 1 {
return nil, errors.Errorf(
"parsing %q, line %d: one value expected for \"nameserver\", got %d",
path, lineNum, numValues,
)
} else if numValues == 1 {
nameservers = append(nameservers, values[0])
continue
}
values, err = parseResolvStanza(line, "search")
if err != nil {
return nil, errors.Annotatef(err, "parsing %q, line %d", path, lineNum)
}
if len(values) > 0 {
// Last 'search' found wins.
searchDomains = values
}
}
if err := scanner.Err(); err != nil {
return nil, errors.Annotatef(err, "reading %q", path)
}
return &DNSConfig{
Nameservers: NewMachineAddresses(nameservers).AsProviderAddresses(),
SearchDomains: searchDomains,
}, nil
}
// parseResolvStanza parses a single line from a resolv.conf(5) file, beginning
// with the given stanza ('nameserver' or 'search' ). If the line does not
// contain the stanza, no results and no error is returned. Leading and trailing
// whitespace is removed first, then lines starting with ";" or "#" are treated
// as comments.
//
// Examples:
// parseResolvStanza(` # nothing ;to see here`, "doesn't matter")
// will return (nil, nil) - comments and whitespace are ignored, nothing left.
//
// parseResolvStanza(` nameserver ns1.example.com # preferred`, "nameserver")
// will return ([]string{"ns1.example.com"}, nil).
//
// parseResolvStanza(`search ;; bad: no value`, "search")
// will return (nil, err: `"search": required value(s) missing`)
//
// parseResolvStanza(`search foo bar foo foo.bar bar.foo ;; try all`, "search")
// will return ([]string("foo", "bar", "foo", "foo.bar", "bar.foo"}, nil)
//
// parseResolvStanza(`search foo#bad comment`, "nameserver")
// will return (nil, nil) - line does not start with "nameserver".
//
// parseResolvStanza(`search foo#bad comment`, "search")
// will return (nil, err: `"search": invalid value "foo#bad"`) - no whitespace
// between the value "foo" and the following comment "#bad comment".
func parseResolvStanza(line, stanza string) ([]string, error) {
const commentChars = ";#"
isComment := func(s string) bool {
return strings.IndexAny(s, commentChars) == 0
}
line = strings.TrimSpace(line)
fields := strings.Fields(line)
noFields := len(fields) == 0 // line contains only whitespace
if isComment(line) || noFields || fields[0] != stanza {
// Lines starting with ';' or '#' are comments and are ignored. Empty
// lines and those not starting with stanza are ignored.
return nil, nil
}
// Mostly for convenience, comments starting with ';' or '#' after a value
// are allowed and ignored, assuming there's whitespace between the value
// and the comment (e.g. 'search foo #bar' is OK, but 'search foo#bar'
// isn't).
var parsedValues []string
rawValues := fields[1:] // skip the stanza itself
for _, value := range rawValues {
if isComment(value) {
// We're done parsing as the rest of the line is still part of the
// same comment.
break
}
if strings.ContainsAny(value, commentChars) {
// This will catch cases like 'nameserver 8.8.8.8#foo', because
// fields[1] will be '8.8.8.8#foo'.
return nil, errors.Errorf("%q: invalid value %q", stanza, value)
}
parsedValues = append(parsedValues, value)
}
// resolv.conf(5) states that to be recognized as valid, the line must begin
// with the stanza, followed by whitespace, then at least one value (for
// 'nameserver', more values separated by whitespace are allowed for
// 'search').
if len(parsedValues) == 0 {
return nil, errors.Errorf("%q: required value(s) missing", stanza)
}
return parsedValues, nil
}
// Copyright 2018 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"context"
"net"
"os/exec"
"runtime"
"strconv"
"strings"
)
var goos = func() string {
return runtime.GOOS
}
var runIPRouteShow = func() (string, error) {
output, err := exec.Command("ip", "route", "show").CombinedOutput()
if err != nil {
return "", err
}
return string(output), nil
}
func GetDefaultRoute() (net.IP, string, error) {
output, err := runIPRouteShow()
if err != nil {
return nil, "", err
}
logger.Tracef(context.TODO(), "ip route show output:\n%s", output)
var defaultRouteMetric = ^uint64(0)
var defaultRoute string
var defaultRouteDevice string
for _, line := range strings.Split(output, "\n") {
to, values := parseIpRouteShowLine(line)
logger.Tracef(context.TODO(), "parsing ip r s line to %q, values %+v ", to, values)
if to == "default" {
var metric = uint64(0)
if v, ok := values["metric"]; ok {
if i, err := strconv.ParseUint(v, 10, 64); err == nil {
metric = i
} else {
return nil, "", err
}
}
if metric < defaultRouteMetric {
// We want to replace our current default route if it's valid.
via, hasVia := values["via"]
dev, hasDev := values["dev"]
if hasVia || hasDev {
defaultRouteMetric = metric
if hasVia {
defaultRoute = via
} else {
defaultRoute = ""
}
if hasDev {
defaultRouteDevice = dev
} else {
defaultRouteDevice = ""
}
}
}
}
}
return net.ParseIP(defaultRoute), defaultRouteDevice, nil
}
func parseIpRouteShowLine(line string) (string, map[string]string) {
values := make(map[string]string)
fields := strings.Fields(line)
if len(fields) < 2 {
return "", values
}
to, fields := fields[0], fields[1:]
for ; len(fields) >= 2; fields = fields[2:] {
values[fields[0]] = fields[1]
}
return to, values
}
// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"context"
"net"
"strconv"
"strings"
"github.com/juju/collections/set"
"github.com/juju/errors"
)
// HostPort describes methods on an object that
// represents a network connection endpoint.
type HostPort interface {
Address
Port() int
}
// HostPorts derives from a slice of HostPort
// and allows bulk operations on its members.
type HostPorts []HostPort
// FilterUnusable returns a copy of the receiver HostPorts after removing
// any addresses unlikely to be usable (ScopeMachineLocal or ScopeLinkLocal).
func (hps HostPorts) FilterUnusable() HostPorts {
filtered := make(HostPorts, 0, len(hps))
for _, addr := range hps {
switch addr.AddressScope() {
case ScopeMachineLocal, ScopeLinkLocal:
continue
}
filtered = append(filtered, addr)
}
return filtered
}
// Strings returns the HostPorts as a slice of
// strings suitable for passing to net.Dial.
func (hps HostPorts) Strings() []string {
result := make([]string, len(hps))
for i, addr := range hps {
result[i] = DialAddress(addr)
}
return result
}
// Unique returns a copy of the receiver HostPorts with duplicate endpoints
// removed. Note that this only applies to dial addresses; spaces are ignored.
func (hps HostPorts) Unique() HostPorts {
results := make([]HostPort, 0, len(hps))
seen := set.NewStrings()
for _, addr := range hps {
da := DialAddress(addr)
if seen.Contains(da) {
continue
}
seen.Add(da)
results = append(results, addr)
}
return results
}
// PrioritizedForScope orders the HostPorts by best match for the input scope
// matching function and returns them in NetAddr form.
// If there are no suitable addresses then an empty slice is returned.
func (hps HostPorts) PrioritizedForScope(getMatcher ScopeMatchFunc) []string {
indexes := indexesByScopeMatch(hps, getMatcher)
out := make([]string, len(indexes))
for i, index := range indexes {
out[i] = DialAddress(hps[index])
}
return out
}
// DialAddress returns a string value for the input HostPort,
// suitable for passing as an argument to net.Dial.
func DialAddress(a HostPort) string {
return net.JoinHostPort(a.Host(), strconv.Itoa(a.Port()))
}
// NetPort represents a network port.
// TODO (manadart 2019-08-15): Finish deprecation of `Port` and use that name.
type NetPort int
// Port returns the port number.
func (p NetPort) Port() int {
return int(p)
}
// MachineHostPort associates a space-unaware address with a port.
type MachineHostPort struct {
MachineAddress
NetPort
}
var _ HostPort = MachineHostPort{}
// String implements Stringer.
func (hp MachineHostPort) String() string {
return DialAddress(hp)
}
// GoString implements fmt.GoStringer.
func (hp MachineHostPort) GoString() string {
return hp.String()
}
// MachineHostPorts is a slice of MachineHostPort
// allowing use as a receiver for bulk operations.
type MachineHostPorts []MachineHostPort
// HostPorts returns the slice as a new slice of the HostPort indirection.
func (hp MachineHostPorts) HostPorts() HostPorts {
addrs := make(HostPorts, len(hp))
for i, hp := range hp {
addrs[i] = hp
}
return addrs
}
// NewMachineHostPorts creates a list of MachineHostPorts
// from each given string address and port.
func NewMachineHostPorts(port int, addresses ...string) MachineHostPorts {
hps := make(MachineHostPorts, len(addresses))
for i, addr := range addresses {
hps[i] = MachineHostPort{
MachineAddress: NewMachineAddress(addr),
NetPort: NetPort(port),
}
}
return hps
}
// ParseMachineHostPort converts a string containing a
// single host and port value to a MachineHostPort.
func ParseMachineHostPort(hp string) (*MachineHostPort, error) {
host, port, err := net.SplitHostPort(hp)
if err != nil {
return nil, errors.Annotatef(err, "cannot parse %q as address:port", hp)
}
numPort, err := strconv.Atoi(port)
if err != nil {
return nil, errors.Annotatef(err, "cannot parse %q port", hp)
}
return &MachineHostPort{
MachineAddress: NewMachineAddress(host),
NetPort: NetPort(numPort),
}, nil
}
// CollapseToHostPorts returns the input nested slice of MachineHostPort
// as a flat slice of HostPort, preserving the order.
func CollapseToHostPorts(serversHostPorts []MachineHostPorts) HostPorts {
var collapsed HostPorts
for _, hps := range serversHostPorts {
for _, hp := range hps {
collapsed = append(collapsed, hp)
}
}
return collapsed
}
// ProviderHostPort associates a provider/space aware address with a port.
type ProviderHostPort struct {
ProviderAddress
NetPort
}
var _ HostPort = ProviderHostPort{}
// String implements Stringer.
func (hp ProviderHostPort) String() string {
return DialAddress(hp)
}
// GoString implements fmt.GoStringer.
func (hp ProviderHostPort) GoString() string {
return hp.String()
}
// ProviderHostPorts is a slice of ProviderHostPort
// allowing use as a receiver for bulk operations.
type ProviderHostPorts []ProviderHostPort
// Addresses extracts the ProviderAddress from each member of the collection,
// then returns them as a new collection, effectively discarding the port.
func (hp ProviderHostPorts) Addresses() ProviderAddresses {
addrs := make(ProviderAddresses, len(hp))
for i, hp := range hp {
addrs[i] = hp.ProviderAddress
}
return addrs
}
// HostPorts returns the slice as a new slice of the HostPort indirection.
func (hp ProviderHostPorts) HostPorts() HostPorts {
addrs := make(HostPorts, len(hp))
for i, hp := range hp {
addrs[i] = hp
}
return addrs
}
// ParseProviderHostPorts creates a slice of MachineHostPorts parsing
// each given string containing address:port.
// An error is returned if any string cannot be parsed as a MachineHostPort.
func ParseProviderHostPorts(hostPorts ...string) (ProviderHostPorts, error) {
hps := make(ProviderHostPorts, len(hostPorts))
for i, hp := range hostPorts {
mhp, err := ParseMachineHostPort(hp)
if err != nil {
return nil, errors.Trace(err)
}
hps[i] = ProviderHostPort{
ProviderAddress: ProviderAddress{MachineAddress: mhp.MachineAddress},
NetPort: mhp.NetPort,
}
}
return hps, nil
}
// SpaceHostPort associates a space ID decorated address with a port.
type SpaceHostPort struct {
SpaceAddress
NetPort
}
var _ HostPort = SpaceHostPort{}
// String implements Stringer.
func (hp SpaceHostPort) String() string {
return DialAddress(hp)
}
// GoString implements fmt.GoStringer.
func (hp SpaceHostPort) GoString() string {
return hp.String()
}
// Less reports whether hp is ordered before hp2
// according to the criteria used by SortHostPorts.
func (hp SpaceHostPort) Less(hp2 SpaceHostPort) bool {
order1 := SortOrderMostPublic(hp)
order2 := SortOrderMostPublic(hp2)
if order1 == order2 {
if hp.SpaceAddress.Value == hp2.SpaceAddress.Value {
return hp.Port() < hp2.Port()
}
return hp.SpaceAddress.Value < hp2.SpaceAddress.Value
}
return order1 < order2
}
// SpaceHostPorts is a slice of SpaceHostPort
// allowing use as a receiver for bulk operations.
type SpaceHostPorts []SpaceHostPort
// NewSpaceHostPorts creates a list of SpaceHostPorts
// from each input string address and port.
func NewSpaceHostPorts(port int, addresses ...string) SpaceHostPorts {
hps := make(SpaceHostPorts, len(addresses))
for i, addr := range addresses {
hps[i] = SpaceHostPort{
SpaceAddress: NewSpaceAddress(addr),
NetPort: NetPort(port),
}
}
return hps
}
// HostPorts returns the slice as a new slice of the HostPort indirection.
func (hps SpaceHostPorts) HostPorts() HostPorts {
addrs := make(HostPorts, len(hps))
for i, hp := range hps {
addrs[i] = hp
}
return addrs
}
// InSpaces returns the SpaceHostPorts that are in the input spaces.
func (hps SpaceHostPorts) InSpaces(spaces ...SpaceInfo) (SpaceHostPorts, bool) {
if len(spaces) == 0 {
logger.Errorf(context.TODO(), "host ports not filtered - no spaces given.")
return hps, false
}
spaceInfos := SpaceInfos(spaces)
var selectedHostPorts SpaceHostPorts
for _, hp := range hps {
if space := spaceInfos.GetByID(hp.SpaceID); space != nil {
logger.Debugf(context.TODO(), "selected %q as a hostPort in space %q", hp.Value, space.Name)
selectedHostPorts = append(selectedHostPorts, hp)
}
}
if len(selectedHostPorts) > 0 {
return selectedHostPorts, true
}
logger.Errorf(context.TODO(), "no hostPorts found in spaces %s", spaceInfos)
return hps, false
}
// AllMatchingScope returns the HostPorts that best satisfy the input scope
// matching function, as strings usable as arguments to net.Dial.
func (hps SpaceHostPorts) AllMatchingScope(getMatcher ScopeMatchFunc) []string {
indexes := indexesForScope(hps, getMatcher)
out := make([]string, 0, len(indexes))
for _, index := range indexes {
out = append(out, DialAddress(hps[index]))
}
return out
}
func (hps SpaceHostPorts) Len() int { return len(hps) }
func (hps SpaceHostPorts) Swap(i, j int) { hps[i], hps[j] = hps[j], hps[i] }
func (hps SpaceHostPorts) Less(i, j int) bool {
return hps[i].Less(hps[j])
}
// SpaceAddressesWithPort returns the input SpaceAddresses
// all associated with the given port.
func SpaceAddressesWithPort(addrs SpaceAddresses, port int) SpaceHostPorts {
hps := make(SpaceHostPorts, len(addrs))
for i, addr := range addrs {
hps[i] = SpaceHostPort{
SpaceAddress: addr,
NetPort: NetPort(port),
}
}
return hps
}
// APIHostPortsToNoProxyString converts list of lists of NetAddrs() to
// a NoProxy-like comma separated string, ignoring local addresses
func APIHostPortsToNoProxyString(ahp []SpaceHostPorts) string {
noProxySet := set.NewStrings()
for _, host := range ahp {
for _, hp := range host {
if hp.SpaceAddress.Scope == ScopeMachineLocal || hp.SpaceAddress.Scope == ScopeLinkLocal {
continue
}
noProxySet.Add(hp.SpaceAddress.Value)
}
}
return strings.Join(noProxySet.SortedValues(), ",")
}
// EnsureFirstHostPort scans the given list of SpaceHostPorts and if
// "first" is found, it moved to index 0. Otherwise, if "first" is not
// in the list, it's inserted at index 0.
func EnsureFirstHostPort(first SpaceHostPort, hps SpaceHostPorts) SpaceHostPorts {
var result []SpaceHostPort
found := false
for _, hp := range hps {
if hp.String() == first.String() && !found {
// Found, so skip it.
found = true
continue
}
result = append(result, hp)
}
// Insert it at the top.
result = append(SpaceHostPorts{first}, result...)
return result
}
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"runtime"
"strings"
)
// LinkLayerDeviceType defines the type of a link-layer network device.
type LinkLayerDeviceType string
const (
// UnknownDevice indicates that the type of this device is not known.
UnknownDevice LinkLayerDeviceType = ""
// LoopbackDevice is used for loopback devices.
LoopbackDevice LinkLayerDeviceType = "loopback"
// EthernetDevice is used for Ethernet (IEEE 802.3) devices.
EthernetDevice LinkLayerDeviceType = "ethernet"
// VLAN8021QDevice is used for IEEE 802.1Q VLAN devices.
VLAN8021QDevice LinkLayerDeviceType = "802.1q"
// BondDevice is used for bonding devices.
BondDevice LinkLayerDeviceType = "bond"
// BridgeDevice is used for OSI layer-2 bridge devices.
BridgeDevice LinkLayerDeviceType = "bridge"
// VXLANDevice is used for Virtual Extensible LAN devices.
VXLANDevice LinkLayerDeviceType = "vxlan"
)
// IsValidLinkLayerDeviceType returns whether the given value is a valid
// link-layer network device type.
func IsValidLinkLayerDeviceType(value string) bool {
switch LinkLayerDeviceType(value) {
case LoopbackDevice, EthernetDevice, VLAN8021QDevice, BondDevice, BridgeDevice, VXLANDevice:
return true
}
return false
}
// IsValidLinkLayerDeviceName returns whether the given name is a valid network
// link-layer device name, depending on the runtime.GOOS value.
func IsValidLinkLayerDeviceName(name string) bool {
return isValidLinkLayerDeviceName(name, runtime.GOOS)
}
func isValidLinkLayerDeviceName(name string, runtimeOS string) bool {
if runtimeOS == "linux" {
return isValidLinuxDeviceName(name)
}
hasHash := strings.Contains(name, "#")
return !hasHash && stringLengthBetween(name, 1, 255)
}
// isValidLinuxDeviceName returns whether the given deviceName is valid,
// using the same criteria as dev_valid_name(9) in the Linux kernel:
// - no whitespace allowed
// - length from 1 to 15 ASCII characters
// - literal "." and ".." as names are not allowed.
// Additionally, we don't allow "#" in the name.
func isValidLinuxDeviceName(name string) bool {
hasWhitespace := whitespaceReplacer.Replace(name) != name
isDot, isDoubleDot := name == ".", name == ".."
hasValidLength := stringLengthBetween(name, 1, 15)
hasHash := strings.Contains(name, "#")
return hasValidLength && !(hasHash || hasWhitespace || isDot || isDoubleDot)
}
// whitespaceReplacer strips whitespace characters from the input string.
var whitespaceReplacer = strings.NewReplacer(
" ", "",
"\t", "",
"\v", "",
"\n", "",
"\r", "",
)
func stringLengthBetween(value string, minLength, maxLength uint) bool {
length := uint(len(value))
return length >= minLength && length <= maxLength
}
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"context"
"fmt"
"math/rand"
"net"
"sort"
internallogger "github.com/juju/juju/internal/logger"
)
var logger = internallogger.GetLogger("juju.core.network")
// macAddressTemplate is suitable for generating virtual MAC addresses,
// particularly for use by container devices.
// The last 3 segments are randomised.
// TODO (manadart 2018-06-21) Depending on where this is utilised,
// ensuring MAC address uniqueness within a model might be prudent.
const macAddressTemplate = "00:16:3e:%02x:%02x:%02x"
// GenerateVirtualMACAddress creates a random MAC address within the address
// space implied by macAddressTemplate above.
var GenerateVirtualMACAddress = func() string {
digits := make([]interface{}, 3)
for i := range digits {
digits[i] = rand.Intn(256)
}
return fmt.Sprintf(macAddressTemplate, digits...)
}
// Id defines a provider-specific network ID.
type Id string
// String returns the underlying string representation of the Id.
// This method helps with formatting and type inference.
func (id Id) String() string {
return string(id)
}
// IDSet represents the classic "set" data structure, and contains Id.
// IDSet is used as a typed version to prevent string -> Id -> string
// conversion when using set.Strings
type IDSet map[Id]struct{}
// MakeIDSet creates and initializes a IDSet and populates it with
// initial values as specified in the parameters.
func MakeIDSet(values ...Id) IDSet {
set := make(map[Id]struct{}, len(values))
for _, id := range values {
set[id] = struct{}{}
}
return set
}
// Add puts a value into the set.
func (s IDSet) Add(value Id) {
s[value] = struct{}{}
}
// Size returns the number of elements in the set.
func (s IDSet) Size() int {
return len(s)
}
// IsEmpty is true for empty or uninitialized sets.
func (s IDSet) IsEmpty() bool {
return len(s) == 0
}
// Contains returns true if the value is in the set, and false otherwise.
func (s IDSet) Contains(id Id) bool {
_, exists := s[id]
return exists
}
// Difference returns a new IDSet representing all the values in the
// target that are not in the parameter.
func (s IDSet) Difference(other IDSet) IDSet {
result := make(IDSet)
// Use the internal map rather than going through the friendlier functions
// to avoid extra allocation of slices.
for value := range s {
if !other.Contains(value) {
result[value] = struct{}{}
}
}
return result
}
// Values returns an unordered slice containing all the values in the set.
func (s IDSet) Values() []Id {
result := make([]Id, len(s))
i := 0
for key := range s {
result[i] = key
i++
}
return result
}
// SortedValues returns an ordered slice containing all the values in the set.
func (s IDSet) SortedValues() []Id {
values := s.Values()
sort.Slice(values, func(i, j int) bool {
return values[i] < values[j]
})
return values
}
// SubnetsForAddresses returns subnets corresponding to the addresses
// in the input address list.
// There can be situations (observed for CAAS) where the addresses can
// contain a FQDN.
// For these cases we log a warning and eschew subnet determination.
func SubnetsForAddresses(addrs []string) []string {
var subs []string
for _, a := range addrs {
// We don't expect this to be the case, but guard conservatively.
if _, _, err := net.ParseCIDR(a); err == nil {
subs = append(subs, a)
continue
}
if addr := net.ParseIP(a); addr != nil {
if addr.To4() != nil {
subs = append(subs, addr.String()+"/32")
} else {
subs = append(subs, addr.String()+"/128")
}
continue
}
logger.Warningf(context.TODO(), "unable to determine egress subnet for %q", a)
}
return subs
}
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"context"
"fmt"
"net"
"strings"
"github.com/juju/errors"
)
// VirtualPortType defines the list of known port types for virtual NICs.
type VirtualPortType string
const (
NonVirtualPort VirtualPortType = ""
OvsPort VirtualPortType = "openvswitch"
)
// Route defines a single route to a subnet via a defined gateway.
type Route struct {
// DestinationCIDR is the subnet that we want a controlled route to.
DestinationCIDR string
// GatewayIP is the IP (v4 or v6) that should be used for traffic that is
// bound for DestinationCIDR
GatewayIP string
// Metric is the weight to apply to this route.
Metric int
}
// Validate that this Route is properly formed.
func (r Route) Validate() error {
// Make sure the CIDR is actually a CIDR not just an IP or hostname
destinationIP, _, err := net.ParseCIDR(r.DestinationCIDR)
if err != nil {
return errors.Annotate(err, "DestinationCIDR not valid")
}
// Make sure the Gateway is just an IP, not a CIDR, etc.
gatewayIP := net.ParseIP(r.GatewayIP)
if gatewayIP == nil {
return errors.Errorf("GatewayIP is not a valid IP address: %q", r.GatewayIP)
}
if r.Metric < 0 {
return errors.Errorf("Metric is negative: %d", r.Metric)
}
// Make sure that either both are IPv4 or both are IPv6, not mixed.
destIP4 := destinationIP.To4()
gatewayIP4 := gatewayIP.To4()
if destIP4 != nil {
if gatewayIP4 == nil {
return errors.Errorf("DestinationCIDR is IPv4 (%s) but GatewayIP is IPv6 (%s)", r.DestinationCIDR, r.GatewayIP)
}
} else {
if gatewayIP4 != nil {
return errors.Errorf("DestinationCIDR is IPv6 (%s) but GatewayIP is IPv4 (%s)", r.DestinationCIDR, r.GatewayIP)
}
}
return nil
}
// InterfaceInfo describes a single network interface.
//
// A note on ConfigType stored against the interface, and on members of the
// Addresses collection:
// Addresses detected for machines during discovery (on-machine or via the
// instance-poller) are denormalised for storage in that the configuration
// method (generally associated with the device) is stored for each address.
// So when incoming, ConfigType supplied with *addresses* is prioritised.
// Alternatively, when supplied to instance provisioning as network
// configuration for cloud-init, we are informing how a *device* should be
// configured for addresses and so we use the ConfigType against the interface.
type InterfaceInfo struct {
// DeviceIndex specifies the order in which the network interface
// appears on the host. The primary interface has an index of 0.
DeviceIndex int
// MACAddress is the network interface's hardware MAC address
// (e.g. "aa:bb:cc:dd:ee:ff").
MACAddress string
// ProviderId is a provider-specific NIC id.
ProviderId Id
// ProviderSubnetId is the provider-specific id for the associated
// subnet.
ProviderSubnetId Id
// ProviderNetworkId is the provider-specific id for the
// associated network.
ProviderNetworkId Id
// ProviderSpaceId is the provider-specific id for the associated space,
// if known and supported.
ProviderSpaceId Id
// ProviderVLANId is the provider-specific id of the VLAN for this
// interface.
ProviderVLANId Id
// ProviderAddressId is the provider-specific id of the assigned address.
ProviderAddressId Id
// AvailabilityZones describes the availability zones the associated
// subnet is in.
AvailabilityZones []string
// VLANTag needs to be between 1 and 4094 for VLANs and 0 for
// normal networks. It's defined by IEEE 802.1Q standard.
VLANTag int
// InterfaceName is the raw OS-specific network device name (e.g.
// "eth1", even for a VLAN eth1.42 virtual interface).
InterfaceName string
// ParentInterfaceName is the name of the parent interface to use,
// if known.
ParentInterfaceName string
// InterfaceType is the type of the interface.
InterfaceType LinkLayerDeviceType
// Disabled is true when the interface needs to be disabled on the
// machine, e.g. not to configure it.
Disabled bool
// NoAutoStart is true when the interface should not be configured
// to start automatically on boot.
// By default and for backwards-compatibility, interfaces are
// configured to auto-start.
NoAutoStart bool
// ConfigType determines whether the interface should be
// configured via DHCP, statically, manually, etc. See
// interfaces(5) for more information.
ConfigType AddressConfigType
// Addresses contains an optional list of static IP address to
// configure for this network interface. The subnet mask to set will be
// inferred from the CIDR value of the first entry which is always
// assumed to be the primary IP address for the interface.
Addresses ProviderAddresses
// ShadowAddresses contains an optional list of additional IP addresses
// that the underlying network provider associates with this network
// interface instance. These IP addresses are not typically visible
// to the machine that the interface is connected to.
ShadowAddresses ProviderAddresses
// DNSServers contains an optional list of IP addresses and/or
// host names to configure as DNS servers for this network interface.
DNSServers ProviderAddresses
// MTU is the Maximum Transmission Unit controlling the maximum size of the
// protocol packets that the interface can pass through. It is only used
// when > 0.
MTU int
// DNSSearchDomains contains the default DNS domain to use for non-FQDN
// lookups.
DNSSearchDomains []string
// Gateway address, if set, defines the default gateway to
// configure for this network interface. For containers this
// usually is (one of) the host address(es).
GatewayAddress ProviderAddress
// Routes defines a list of routes that should be added when this interface
// is brought up, and removed when this interface is stopped.
Routes []Route
// IsDefaultGateway is set if this device is a default gw on a machine.
IsDefaultGateway bool
// VirtualPortType provides additional information about the type of
// this device if it belongs to a virtual switch (e.g. when using
// open-vswitch).
VirtualPortType VirtualPortType
// Origin represents the authoritative source of the InterfaceInfo.
// It is expected that either the provider gave us this info or the
// machine gave us this info.
// Giving us this information allows us to reason about when a InterfaceInfo
// is in use.
Origin Origin
}
// ActualInterfaceName returns raw interface name for raw interface (e.g. "eth0") and
// virtual interface name for virtual interface (e.g. "eth0.42")
func (i *InterfaceInfo) ActualInterfaceName() string {
if i.VLANTag > 0 {
return fmt.Sprintf("%s.%d", i.InterfaceName, i.VLANTag)
}
return i.InterfaceName
}
// IsVirtual returns true when the interface is a virtual device, as
// opposed to a physical device (e.g. a VLAN, network alias or OVS-managed
// device).
func (i *InterfaceInfo) IsVirtual() bool {
return i.VLANTag > 0 || i.VirtualPortType != NonVirtualPort
}
// IsVLAN returns true when the interface is a VLAN interface.
func (i *InterfaceInfo) IsVLAN() bool {
return i.VLANTag > 0
}
// Validate checks that the receiver looks like a real interface.
// An error is returned if invalid members are detected.
func (i *InterfaceInfo) Validate() error {
if i.MACAddress != "" {
if _, err := net.ParseMAC(i.MACAddress); err != nil {
return errors.NotValidf("link-layer device hardware address %q", i.MACAddress)
}
}
if i.InterfaceName == "" {
return errors.NotValidf("link-layer device %q, empty name", i.MACAddress)
}
if !IsValidLinkLayerDeviceName(i.InterfaceName) {
// TODO (manadart 2020-07-07): This preserves prior behaviour.
// If we are waving invalid names through, I'm not sure of the value.
logger.Warningf(context.TODO(), "link-layer device %q has an invalid name, %q", i.MACAddress, i.InterfaceName)
}
if !IsValidLinkLayerDeviceType(string(i.InterfaceType)) {
return errors.NotValidf("link-layer device %q, type %q", i.InterfaceName, i.InterfaceType)
}
return nil
}
// PrimaryAddress returns the primary address for the interface.
func (i *InterfaceInfo) PrimaryAddress() ProviderAddress {
if len(i.Addresses) == 0 {
return ProviderAddress{}
}
// We assume that the primary IP is always listed first. The majority
// of providers only define a single IP so this will still work as
// expected. Notably, ec2 does allow multiple private IP addresses to
// be assigned to an interface but the provider ensures that the one
// flagged as primary is present at index 0.
return i.Addresses[0]
}
// InterfaceInfos is a slice of InterfaceInfo
// for a single host/machine/container.
type InterfaceInfos []InterfaceInfo
// Validate validates each interface, returning an error if any are invalid
func (s InterfaceInfos) Validate() error {
for _, dev := range s {
if err := dev.Validate(); err != nil {
return errors.Trace(err)
}
}
return nil
}
// InterfaceFilterFunc is a function that can be applied to filter a slice of
// InterfaceInfo instances. Calls to this function should return false if
// the specified InterfaceInfo should be filtered out.
type InterfaceFilterFunc func(InterfaceInfo) bool
// Filter applies keepFn to each entry in a InterfaceInfos list and returns
// back a filtered list containing the entries for which predicateFn returned
// true.
func (s InterfaceInfos) Filter(predicateFn InterfaceFilterFunc) InterfaceInfos {
var out InterfaceInfos
for _, iface := range s {
if !predicateFn(iface) {
continue
}
out = append(out, iface)
}
return out
}
// GetByName returns a new collection containing
// any interfaces with the input device name.
func (s InterfaceInfos) GetByName(name string) InterfaceInfos {
var res InterfaceInfos
for _, dev := range s {
if dev.InterfaceName == name {
res = append(res, dev)
}
}
return res
}
// ProviderInterfaceInfo holds enough information to identify an
// interface or link layer device to a provider so that it can be
// queried or manipulated. Its initial purpose is to pass to
// provider.ReleaseContainerAddresses.
type ProviderInterfaceInfo struct {
// InterfaceName is the raw OS-specific network device name (e.g.
// "eth1", even for a VLAN eth1.42 virtual interface).
InterfaceName string
// ProviderId is a provider-specific NIC id.
ProviderId Id
// HardwareAddress is the network interface's hardware address. The
// contents of this field depend on the NIC type (a MAC address for an
// ethernet device, a GUID for an infiniband device etc.)
HardwareAddress string
}
// NormalizeMACAddress replaces dashes with colons and lowercases the MAC
// address provided as input.
func NormalizeMACAddress(mac string) string {
return strings.ToLower(
strings.Replace(mac, "-", ":", -1),
)
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"os/exec"
"strings"
"github.com/juju/collections/set"
"github.com/juju/errors"
)
// Overridden by tests
var getCommandOutput = func(cmd *exec.Cmd) ([]byte, error) { return cmd.Output() }
// OvsManagedBridges returns a filtered version of ifaceList that only contains
// bridge interfaces managed by openvswitch.
func OvsManagedBridgeInterfaces(ifaceList InterfaceInfos) (InterfaceInfos, error) {
ovsBridges, err := OvsManagedBridges()
if err != nil {
return nil, errors.Trace(err)
}
return ifaceList.Filter(func(iface InterfaceInfo) bool {
return ovsBridges.Contains(iface.InterfaceName)
}), nil
}
// OvsManagedBridges returns a set containing the names of all bridge
// interfaces that are managed by openvswitch.
func OvsManagedBridges() (set.Strings, error) {
haveOvsCli, err := ovsToolsAvailable()
if err != nil {
return nil, errors.Trace(err)
} else if !haveOvsCli { // nothing to do if the tools are missing
return nil, nil
}
// Query list of ovs-managed device names
res, err := getCommandOutput(exec.Command("ovs-vsctl", "list-br"))
if err != nil {
return nil, errors.Annotate(err, "querying ovs-managed bridges via ovs-vsctl")
}
ovsBridges := set.NewStrings()
for _, iface := range strings.Split(string(res), "\n") {
if iface = strings.TrimSpace(iface); iface != "" {
ovsBridges.Add(iface)
}
}
return ovsBridges, nil
}
func ovsToolsAvailable() (bool, error) {
if _, err := exec.LookPath("ovs-vsctl"); err != nil {
// OVS tools not installed
if execErr, isExecErr := err.(*exec.Error); isExecErr && execErr.Unwrap() == exec.ErrNotFound {
return false, nil
}
return false, errors.Annotate(err, "looking for ovs-vsctl")
}
return true, nil
}
// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/juju/errors"
)
// GroupedPortRanges represents a list of PortRange instances grouped by a
// particular feature. (e.g. endpoint, unit name)
type GroupedPortRanges map[string][]PortRange
// MergePendingOpenPortRanges will merge this group's port ranges with the
// provided *open* ports. If the provided range already exists in this group
// then this method returns false and the group is not modified.
func (grp GroupedPortRanges) MergePendingOpenPortRanges(pendingOpenRanges GroupedPortRanges) bool {
var modified bool
for endpointName, pendingRanges := range pendingOpenRanges {
for _, pendingRange := range pendingRanges {
if grp.rangeExistsForEndpoint(endpointName, pendingRange) {
// Exists, no op for opening.
continue
}
grp[endpointName] = append(grp[endpointName], pendingRange)
modified = true
}
}
return modified
}
// MergePendingClosePortRanges will merge this group's port ranges with the
// provided *closed* ports. If the provided range does not exists in this group
// then this method returns false and the group is not modified.
func (grp GroupedPortRanges) MergePendingClosePortRanges(pendingCloseRanges GroupedPortRanges) bool {
var modified bool
for endpointName, pendingRanges := range pendingCloseRanges {
for _, pendingRange := range pendingRanges {
if !grp.rangeExistsForEndpoint(endpointName, pendingRange) {
// Not exists, no op for closing.
continue
}
modified = grp.removePortRange(endpointName, pendingRange)
}
}
return modified
}
func (grp GroupedPortRanges) removePortRange(endpointName string, portRange PortRange) bool {
var modified bool
existingRanges := grp[endpointName]
for i, v := range existingRanges {
if v != portRange {
continue
}
existingRanges = append(existingRanges[:i], existingRanges[i+1:]...)
if len(existingRanges) == 0 {
delete(grp, endpointName)
} else {
grp[endpointName] = existingRanges
}
modified = true
}
return modified
}
func (grp GroupedPortRanges) rangeExistsForEndpoint(endpointName string, portRange PortRange) bool {
if len(grp[endpointName]) == 0 {
return false
}
for _, existingRange := range grp[endpointName] {
if existingRange == portRange {
return true
}
}
return false
}
// UniquePortRanges returns the unique set of PortRanges in this group.
func (grp GroupedPortRanges) UniquePortRanges() []PortRange {
var allPorts []PortRange
for _, portRanges := range grp {
allPorts = append(allPorts, portRanges...)
}
uniquePortRanges := UniquePortRanges(allPorts)
SortPortRanges(uniquePortRanges)
return uniquePortRanges
}
// Clone returns a copy of this port range grouping.
func (grp GroupedPortRanges) Clone() GroupedPortRanges {
if grp == nil {
return nil
}
grpCopy := make(GroupedPortRanges, len(grp))
for k, v := range grp {
grpCopy[k] = append([]PortRange(nil), v...)
}
return grpCopy
}
// EqualTo returns true if this set of grouped port ranges are equal to other.
func (grp GroupedPortRanges) EqualTo(other GroupedPortRanges) bool {
if len(grp) != len(other) {
return false
}
for groupKey, portRanges := range grp {
otherPortRanges, found := other[groupKey]
if !found || len(portRanges) != len(otherPortRanges) {
return false
}
SortPortRanges(portRanges)
SortPortRanges(otherPortRanges)
for i, pr := range portRanges {
if pr != otherPortRanges[i] {
return false
}
}
}
return true
}
// PortRange represents a single range of ports on a particular subnet.
type PortRange struct {
FromPort int
ToPort int
Protocol string
}
// Validate determines if the port range is valid.
func (p PortRange) Validate() error {
proto := strings.ToLower(p.Protocol)
if proto != "tcp" && proto != "udp" && proto != "icmp" {
return errors.Errorf(`invalid protocol %q, expected "tcp", "udp", or "icmp"`, proto)
}
if proto == "icmp" {
if p.FromPort == p.ToPort && p.FromPort == -1 {
return nil
}
return errors.Errorf(`protocol "icmp" doesn't support any ports; got "%v"`, p.FromPort)
}
if p.FromPort > p.ToPort {
return errors.Errorf("invalid port range %s", p)
} else if p.FromPort < 0 || p.FromPort > 65535 || p.ToPort < 0 || p.ToPort > 65535 {
return errors.Errorf("port range bounds must be between 0 and 65535, got %d-%d", p.FromPort, p.ToPort)
}
return nil
}
// Length returns the number of ports in the range. If the range is not valid,
// it returns 0. If this range uses ICMP as the protocol then a -1 is returned
// instead.
func (p PortRange) Length() int {
if err := p.Validate(); err != nil {
return 0
}
return (p.ToPort - p.FromPort) + 1
}
// ConflictsWith determines if the two port ranges conflict.
func (p PortRange) ConflictsWith(other PortRange) bool {
if p.Protocol != other.Protocol {
return false
}
return p.ToPort >= other.FromPort && other.ToPort >= p.FromPort
}
// SanitizeBounds returns a copy of the port range, which is guaranteed to have
// FromPort >= ToPort and both FromPort and ToPort fit into the valid range
// from 1 to 65535, inclusive.
func (p PortRange) SanitizeBounds() PortRange {
res := p
if res.Protocol == "icmp" {
return res
}
if res.FromPort > res.ToPort {
res.FromPort, res.ToPort = res.ToPort, res.FromPort
}
for _, bound := range []*int{&res.FromPort, &res.ToPort} {
switch {
case *bound <= 0:
*bound = 1
case *bound > 65535:
*bound = 65535
}
}
return res
}
// String returns a formatted representation of this port range.
func (p PortRange) String() string {
protocol := strings.ToLower(p.Protocol)
if protocol == "icmp" {
return protocol
}
if p.FromPort == p.ToPort {
return fmt.Sprintf("%d/%s", p.FromPort, protocol)
}
return fmt.Sprintf("%d-%d/%s", p.FromPort, p.ToPort, protocol)
}
func (p PortRange) GoString() string {
return p.String()
}
// LessThan returns true if other should appear after p when sorting a port
// range list.
func (p PortRange) LessThan(other PortRange) bool {
if p.Protocol != other.Protocol {
return p.Protocol < other.Protocol
}
if p.FromPort != other.FromPort {
return p.FromPort < other.FromPort
}
return p.ToPort < other.ToPort
}
// SortPortRanges sorts the given ports, first by protocol, then by number.
func SortPortRanges(portRanges []PortRange) {
sort.Slice(portRanges, func(i, j int) bool {
return portRanges[i].LessThan(portRanges[j])
})
}
// UniquePortRanges removes any duplicate port ranges from the input and
// returns de-dupped list back.
func UniquePortRanges(portRanges []PortRange) []PortRange {
var (
res []PortRange
processed = make(map[PortRange]struct{})
)
for _, pr := range portRanges {
if _, seen := processed[pr]; seen {
continue
}
res = append(res, pr)
processed[pr] = struct{}{}
}
return res
}
// ParsePortRange builds a PortRange from the provided string. If the
// string does not include a protocol then "tcp" is used. Validate()
// gets called on the result before returning. If validation fails the
// invalid PortRange is still returned.
// Example strings: "80/tcp", "443", "12345-12349/udp", "icmp".
func ParsePortRange(inPortRange string) (PortRange, error) {
// Extract the protocol.
protocol := "tcp"
parts := strings.SplitN(inPortRange, "/", 2)
if len(parts) == 2 {
inPortRange = parts[0]
protocol = parts[1]
}
// Parse the ports.
portRange, err := parsePortRange(inPortRange)
if err != nil {
return portRange, errors.Trace(err)
}
if portRange.FromPort == -1 {
protocol = "icmp"
}
portRange.Protocol = protocol
return portRange, portRange.Validate()
}
// MustParsePortRange converts a raw port-range string into a PortRange.
// If the string is invalid, the function panics.
func MustParsePortRange(portRange string) PortRange {
portrange, err := ParsePortRange(portRange)
if err != nil {
panic(err)
}
return portrange
}
func parsePortRange(portRange string) (PortRange, error) {
var result PortRange
var start, end int
parts := strings.Split(portRange, "-")
if len(parts) > 2 {
return result, errors.Errorf("invalid port range %q", portRange)
}
if len(parts) == 1 {
if parts[0] == "icmp" {
start, end = -1, -1
} else {
port, err := strconv.Atoi(parts[0])
if err != nil {
return result, errors.Annotatef(err, "invalid port %q", portRange)
}
start, end = port, port
}
} else {
var err error
if start, err = strconv.Atoi(parts[0]); err != nil {
return result, errors.Annotatef(err, "invalid port %q", parts[0])
}
if end, err = strconv.Atoi(parts[1]); err != nil {
return result, errors.Annotatef(err, "invalid port %q", parts[1])
}
}
result = PortRange{
FromPort: start,
ToPort: end,
}
return result, nil
}
// CombinePortRanges groups together all port ranges according to
// protocol, and then combines then into contiguous port ranges.
// NOTE: Juju only allows its model to contain non-overlapping port ranges.
// This method operates on that assumption.
func CombinePortRanges(ranges ...PortRange) []PortRange {
SortPortRanges(ranges)
var result []PortRange
var current *PortRange
for _, pr := range ranges {
thispr := pr
if current == nil {
current = &thispr
continue
}
if pr.Protocol == current.Protocol && pr.FromPort == current.ToPort+1 {
current.ToPort = thispr.ToPort
continue
}
result = append(result, *current)
current = &thispr
}
if current != nil {
result = append(result, *current)
}
return result
}
// Copyright 2021 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"context"
"net"
"os"
"path/filepath"
"strings"
"github.com/juju/collections/set"
)
// SysClassNetPath is the full Linux SYSFS path containing
// information about each network interface on the system.
// TODO (manadart 2021-02-12): This remains in the main "source.go" module
// because there was previously only one ConfigSource implementation,
// which presumably did not work on Windows.
// When the netlinkConfigSource was introduced for use on Linux,
// we retained the old universal config source for use on Windows.
// If there comes a time when we properly implement a Windows source,
// this should be relocated to the Linux module and an appropriate counterpart
// introduced for Windows.
const SysClassNetPath = "/sys/class/net"
// ConfigSourceNIC describes a network interface detected on the local machine
// by an implementation of ConfigSource.
type ConfigSourceNIC interface {
// Name returns the name of the network interface; E.g. "eth0".
Name() string
// Type returns the type of the interface - Ethernet, VLAN, Loopback etc.
// TODO (manadart 2021-03-03): We do not recognise device types such as
// veth, tuntap, macvtap et al. Our parsing falls back to ethernet for such
// devices, which we should change in order to have a better informed
// networking model.
Type() LinkLayerDeviceType
// Index returns the index of the interface.
Index() int
// HardwareAddr returns the hardware address of the interface.
// It is the MAC address for ethernet devices.
HardwareAddr() net.HardwareAddr
// Addresses returns IP addresses associated with the network interface.
Addresses() ([]ConfigSourceAddr, error)
// MTU returns the maximum transmission unit for the interface.
MTU() int
// IsUp returns true if the interface is in the "up" state.
IsUp() bool
}
// ConfigSourceAddr describes addresses detected on a network interface
// represented by an implementation of ConfigSourceAddr.
type ConfigSourceAddr interface {
// IP returns the address in net.IP form.
IP() net.IP
// IPNet returns the subnet corresponding with the address
// provided that it can be determined.
IPNet() *net.IPNet
// IsSecondary returns true if this address can be determined not to be
// the primary address of its NIC.
// Such addresses are added by HA setups like Corosync+Pacemaker.
IsSecondary() bool
// String returns the address in string form,
// including the subnet mask if known.
String() string
}
// ConfigSource defines the necessary calls to obtain
// the network configuration of a machine.
type ConfigSource interface {
// Interfaces returns information about all
// network interfaces on the machine.
Interfaces() ([]ConfigSourceNIC, error)
// DefaultRoute returns the gateway IP address and device name of the
// default route on the machine. If there is no default route (known),
// then zero values are returned.
DefaultRoute() (net.IP, string, error)
// OvsManagedBridges returns the names of network interfaces that
// correspond to OVS-managed bridges.
OvsManagedBridges() (set.Strings, error)
// GetBridgePorts returns the names of network interfaces that are ports ot
// the bridge with the input device name.
GetBridgePorts(string) []string
}
// ParseInterfaceType parses the DEVTYPE attribute from the Linux kernel
// userspace SYSFS location "<sysPath/<interfaceName>/uevent" and returns it as
// InterfaceType. SysClassNetPath should be passed as sysPath. Returns
// UnknownInterface if the type cannot be reliably determined for any reason.
// Example call: network.ParseInterfaceType(network.SysClassNetPath, "br-eth1")
// TODO (manadart 2021-02-12): As with SysClassNetPath above, specific
// implementations should be sought for this that are OS-dependent.
func ParseInterfaceType(sysPath, interfaceName string) LinkLayerDeviceType {
const deviceType = "DEVTYPE="
location := filepath.Join(sysPath, interfaceName, "uevent")
data, err := os.ReadFile(location)
if err != nil {
logger.Debugf(context.TODO(), "ignoring error reading %q: %v", location, err)
return UnknownDevice
}
var devType string
lines := strings.Fields(string(data))
for _, line := range lines {
if !strings.HasPrefix(line, deviceType) {
continue
}
devType = strings.TrimPrefix(line, deviceType)
switch devType {
case "bridge":
return BridgeDevice
case "vlan":
return VLAN8021QDevice
case "bond":
return BondDevice
case "":
// DEVTYPE is not present for some types, like Ethernet and loopback
// interfaces, so if missing do not try to guess.
break
}
}
return UnknownDevice
}
// GetBridgePorts extracts and returns the names of all interfaces configured as
// ports of the given bridgeName from the Linux kernel userspace SYSFS location
// "<sysPath/<bridgeName>/brif/*". SysClassNetPath should be passed as sysPath.
// Returns an empty result if the ports cannot be determined reliably for any
// reason, or if there are no configured ports for the bridge.
// Example call: network.GetBridgePorts(network.SysClassNetPath, "br-eth1")
// TODO (manadart 2021-02-12): As with SysClassNetPath above, specific
// implementations should be sought for this that are OS-dependent.
func GetBridgePorts(sysPath, bridgeName string) []string {
portsGlobPath := filepath.Join(sysPath, bridgeName, "brif", "*")
// Glob ignores I/O errors and can only return ErrBadPattern, which we treat
// as no results, but for debugging we're still logging the error.
paths, err := filepath.Glob(portsGlobPath)
if err != nil {
logger.Debugf(context.TODO(), "ignoring error traversing path %q: %v", portsGlobPath, err)
}
if len(paths) == 0 {
return nil
}
// We need to convert full paths like /sys/class/net/br-eth0/brif/eth0 to
// just names.
names := make([]string, len(paths))
for i := range paths {
names[i] = filepath.Base(paths[i])
}
return names
}
// Copyright 2021 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
//go:build linux
package network
import (
"net"
"github.com/juju/collections/set"
"github.com/juju/errors"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
// netlinkAddr implements ConfigSourceAddr based on the
// netlink implementation of a network address.
type netlinkAddr struct {
addr *netlink.Addr
}
// IP (ConfigSourceAddr) is a simple property accessor.
func (a *netlinkAddr) IP() net.IP {
return a.addr.IP
}
// IPNet (ConfigSourceAddr) is a simple property accessor.
func (a *netlinkAddr) IPNet() *net.IPNet {
return a.addr.IPNet
}
// IsSecondary (ConfigSourceAddr) uses the IFA_F_SECONDARY flag to return
// whether this address is not the primary one for the NIC.
func (a *netlinkAddr) IsSecondary() bool {
return a.addr.Flags&unix.IFA_F_SECONDARY > 0
}
// String (ConfigSourceAddr) is a simple property accessor.
func (a *netlinkAddr) String() string {
return a.addr.String()
}
// netlinkNIC implements ConfigSourceNIC by wrapping a netlink Link.
type netlinkNIC struct {
nic netlink.Link
getAddrs func(netlink.Link) ([]netlink.Addr, error)
}
// Name returns the name of the device.
func (n netlinkNIC) Name() string {
return n.nic.Attrs().Name
}
// Type returns the interface type of the device.
func (n netlinkNIC) Type() LinkLayerDeviceType {
switch n.nic.Type() {
case "bridge":
return BridgeDevice
case "vlan":
return VLAN8021QDevice
case "bond":
return BondDevice
case "vxlan":
return VXLANDevice
}
if n.nic.Attrs().Flags&net.FlagLoopback > 0 {
return LoopbackDevice
}
// See comment on super-method.
// This is incorrect for veth, tuntap, macvtap et al.
return EthernetDevice
}
// Index returns the index of the device.
func (n netlinkNIC) Index() int {
return n.nic.Attrs().Index
}
// HardwareAddr returns the hardware address of the device.
func (n netlinkNIC) HardwareAddr() net.HardwareAddr {
return n.nic.Attrs().HardwareAddr
}
// Addresses returns all IP addresses associated with the device.
func (n netlinkNIC) Addresses() ([]ConfigSourceAddr, error) {
rawAddrs, err := n.getAddrs(n.nic)
if err != nil {
return nil, errors.Trace(err)
}
addrs := make([]ConfigSourceAddr, len(rawAddrs))
for i := range rawAddrs {
addrs[i] = &netlinkAddr{addr: &rawAddrs[i]}
}
return addrs, nil
}
// MTU returns the maximum transmission unit for the device.
func (n netlinkNIC) MTU() int {
return n.nic.Attrs().MTU
}
// IsUp returns true if the interface is in the "up" state.
func (n netlinkNIC) IsUp() bool {
return n.nic.Attrs().Flags&net.FlagUp > 0
}
type netlinkConfigSource struct {
sysClassNetPath string
linkList func() ([]netlink.Link, error)
}
// Interfaces returns the network interfaces on the machine.
func (s *netlinkConfigSource) Interfaces() ([]ConfigSourceNIC, error) {
links, err := s.linkList()
if err != nil {
return nil, errors.Trace(err)
}
getAddrs := func(l netlink.Link) ([]netlink.Addr, error) {
return netlink.AddrList(l, netlink.FAMILY_ALL)
}
nics := make([]ConfigSourceNIC, len(links))
for i := range links {
nics[i] = &netlinkNIC{
nic: links[i],
getAddrs: getAddrs,
}
}
return nics, nil
}
// OvsManagedBridges implements NetworkConfigSource.
func (*netlinkConfigSource) OvsManagedBridges() (set.Strings, error) {
return OvsManagedBridges()
}
// DefaultRoute implements NetworkConfigSource.
func (*netlinkConfigSource) DefaultRoute() (net.IP, string, error) {
return GetDefaultRoute()
}
// GetBridgePorts implements NetworkConfigSource.
func (s *netlinkConfigSource) GetBridgePorts(bridgeName string) []string {
return GetBridgePorts(s.sysClassNetPath, bridgeName)
}
// DefaultConfigSource returns a NetworkConfigSource backed by the
// netlink library, to be used with GetObservedNetworkConfig().
func DefaultConfigSource() ConfigSource {
return &netlinkConfigSource{
sysClassNetPath: SysClassNetPath,
linkList: netlink.LinkList,
}
}
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"context"
"fmt"
"net"
"regexp"
"strings"
"github.com/juju/collections/set"
"github.com/juju/errors"
)
const (
// AlphaSpaceId is the ID of the alpha network space.
// Application endpoints are bound to this space by default
// if no explicit binding is specified.
AlphaSpaceId = "0"
// AlphaSpaceName is the name of the alpha network space.
AlphaSpaceName = "alpha"
)
// SpaceLookup describes the ability to get a complete
// network topology, as understood by Juju.
type SpaceLookup interface {
GetAllSpaces(ctx context.Context) (SpaceInfos, error)
}
// SubnetLookup describes retrieving all subnets within a known set of spaces.
type SubnetLookup interface {
AllSubnetInfos() (SubnetInfos, error)
}
// SpaceName is the name of a network space.
type SpaceName string
// SpaceInfo defines a network space.
type SpaceInfo struct {
// ID is the unique identifier for the space.
// TODO (manadart 2020-04-10): This should be a typed ID.
ID string
// Name is the name of the space.
// It is used by operators for identifying a space and should be unique.
Name SpaceName
// ProviderId is the provider's unique identifier for the space,
// such as used by MAAS.
ProviderId Id
// Subnets are the subnets that have been grouped into this network space.
Subnets SubnetInfos
}
// SpaceInfos is a collection of spaces.
type SpaceInfos []SpaceInfo
// AllSpaceInfos satisfies the SpaceLookup interface.
// It is useful for passing to conversions where we already have the spaces
// materialised and don't need to pull them from the DB again.
func (s SpaceInfos) AllSpaceInfos() (SpaceInfos, error) {
return s, nil
}
// AllSubnetInfos returns all subnets contained in this collection of spaces.
// Since a subnet can only be in one space, we can simply accrue them all
// with the need for duplicate checking.
// As with AllSpaceInfos, it implements an interface that can be used to
// indirect state.
func (s SpaceInfos) AllSubnetInfos() (SubnetInfos, error) {
subs := make(SubnetInfos, 0)
for _, space := range s {
for _, sub := range space.Subnets {
subs = append(subs, sub)
}
}
return subs, nil
}
// MoveSubnets returns a new topology representing
// the movement of subnets to a new network space.
func (s SpaceInfos) MoveSubnets(subnetIDs IDSet, spaceName string) (SpaceInfos, error) {
newSpace := s.GetByName(spaceName)
if newSpace == nil {
return nil, errors.NotFoundf("space with name %q", spaceName)
}
// We return a copy, not mutating the original.
newSpaces := make(SpaceInfos, len(s))
var movers SubnetInfos
found := MakeIDSet()
// First accrue the moving subnets and remove them from their old spaces.
for i, space := range s {
newSpaces[i] = space
newSpaces[i].Subnets = nil
for _, sub := range space.Subnets {
if subnetIDs.Contains(sub.ID) {
// Indicate that we found the subnet,
// but don't do anything if it is already in the space.
found.Add(sub.ID)
if string(space.Name) != spaceName {
sub.SpaceID = newSpace.ID
sub.SpaceName = spaceName
sub.ProviderSpaceId = newSpace.ProviderId
movers = append(movers, sub)
}
continue
}
newSpaces[i].Subnets = append(newSpaces[i].Subnets, sub)
}
}
// Ensure that the input did not include subnets not in this collection.
if diff := subnetIDs.Difference(found); len(diff) != 0 {
return nil, errors.NotFoundf("subnet IDs %v", diff.SortedValues())
}
// Then put them against the new one.
// We have to find the space again in this collection,
// because newSpace was returned from a copy.
for i, space := range newSpaces {
if string(space.Name) == spaceName {
newSpaces[i].Subnets = append(space.Subnets, movers...)
break
}
}
return newSpaces, nil
}
// String returns returns a quoted, comma-delimited names of the spaces in the
// collection, or <none> if the collection is empty.
func (s SpaceInfos) String() string {
if len(s) == 0 {
return "<none>"
}
names := make([]string, len(s))
for i, v := range s {
names[i] = fmt.Sprintf("%q", string(v.Name))
}
return strings.Join(names, ", ")
}
// Names returns a string slice with each of the space names in the collection.
func (s SpaceInfos) Names() []string {
names := make([]string, len(s))
for i, v := range s {
names[i] = string(v.Name)
}
return names
}
// IDs returns a string slice with each of the space ids in the collection.
func (s SpaceInfos) IDs() []string {
ids := make([]string, len(s))
for i, v := range s {
ids[i] = v.ID
}
return ids
}
// GetByID returns a reference to the space with the input ID
// if it exists in the collection. Otherwise nil is returned.
func (s SpaceInfos) GetByID(id string) *SpaceInfo {
for _, space := range s {
if space.ID == id {
return &space
}
}
return nil
}
// GetByName returns a reference to the space with the input name
// if it exists in the collection. Otherwise nil is returned.
func (s SpaceInfos) GetByName(name string) *SpaceInfo {
for _, space := range s {
if string(space.Name) == name {
return &space
}
}
return nil
}
// ContainsID returns true if the collection contains a
// space with the given ID.
func (s SpaceInfos) ContainsID(id string) bool {
return s.GetByID(id) != nil
}
// ContainsName returns true if the collection contains a
// space with the given name.
func (s SpaceInfos) ContainsName(name string) bool {
return s.GetByName(name) != nil
}
// Minus returns a new SpaceInfos representing all the
// values in the target that are not in the parameter.
// Value matching is done by ID.
func (s SpaceInfos) Minus(other SpaceInfos) SpaceInfos {
result := make(SpaceInfos, 0)
for _, value := range s {
if !other.ContainsID(value.ID) {
result = append(result, value)
}
}
return result
}
func (s SpaceInfos) InferSpaceFromAddress(addr string) (*SpaceInfo, error) {
var (
ip = net.ParseIP(addr)
match *SpaceInfo
)
nextSpace:
for spIndex, space := range s {
for _, subnet := range space.Subnets {
ipNet, err := subnet.ParsedCIDRNetwork()
if err != nil {
// Subnets should always have a valid CIDR
return nil, errors.Trace(err)
}
if ipNet.Contains(ip) {
if match == nil {
match = &s[spIndex]
// We still need to check other spaces
// in case we have multiple networks
// with the same subnet CIDRs
continue nextSpace
}
return nil, errors.Errorf(
"unable to infer space for address %q: address matches the same CIDR in multiple spaces", addr)
}
}
}
if match == nil {
return nil, errors.NewNotFound(nil, fmt.Sprintf("unable to infer space for address %q", addr))
}
return match, nil
}
func (s SpaceInfos) InferSpaceFromCIDRAndSubnetID(cidr, providerSubnetID string) (*SpaceInfo, error) {
for _, space := range s {
for _, subnet := range space.Subnets {
if subnet.CIDR == cidr && string(subnet.ProviderId) == providerSubnetID {
return &space, nil
}
}
}
return nil, errors.NewNotFound(
nil, fmt.Sprintf("unable to infer space for CIDR %q and provider subnet ID %q", cidr, providerSubnetID))
}
// SubnetCIDRsBySpaceID returns the set of known subnet CIDRs grouped by the
// space ID they belong to.
func (s SpaceInfos) SubnetCIDRsBySpaceID() map[string][]string {
res := make(map[string][]string)
for _, space := range s {
for _, sub := range space.Subnets {
res[space.ID] = append(res[space.ID], sub.CIDR)
}
}
return res
}
var (
invalidSpaceNameChars = regexp.MustCompile("[^0-9a-z-]")
dashPrefix = regexp.MustCompile("^-*")
dashSuffix = regexp.MustCompile("-*$")
multipleDashes = regexp.MustCompile("--+")
)
// ConvertSpaceName is used to massage provider-sourced (i.e. MAAS)
// space names so that they conform to Juju's space name rules.
func ConvertSpaceName(name string, existing set.Strings) string {
// Lower case and replace spaces with dashes.
name = strings.Replace(name, " ", "-", -1)
name = strings.ToLower(name)
// Remove any character not in the set "-", "a-z", "0-9".
name = invalidSpaceNameChars.ReplaceAllString(name, "")
// Remove any dashes at the beginning and end.
name = dashPrefix.ReplaceAllString(name, "")
name = dashSuffix.ReplaceAllString(name, "")
// Replace multiple dashes with a single dash.
name = multipleDashes.ReplaceAllString(name, "-")
// If the name had only invalid characters, give it a new name.
if name == "" {
name = "empty"
}
// If this name is in use add a numerical suffix.
if existing.Contains(name) {
counter := 2
for existing.Contains(fmt.Sprintf("%s-%d", name, counter)) {
counter++
}
name = fmt.Sprintf("%s-%d", name, counter)
}
return name
}
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import (
"context"
"math/big"
"net"
"sort"
"strings"
"github.com/juju/collections/set"
"github.com/juju/errors"
"github.com/juju/juju/core/life"
)
// SubnetInfo is a source-agnostic representation of a subnet.
// It may originate from state, or from a provider.
type SubnetInfo struct {
// ID is the unique ID of the subnet.
ID Id
// CIDR of the network, in 123.45.67.89/24 format.
CIDR string
// Memoized value for the parsed network for the above CIDR.
parsedCIDRNetwork *net.IPNet
// ProviderId is a provider-specific subnet ID.
ProviderId Id
// ProviderSpaceId holds the provider ID of the space associated
// with this subnet. Can be empty if not supported.
ProviderSpaceId Id
// ProviderNetworkId holds the provider ID of the network
// containing this subnet, for example VPC id for EC2.
ProviderNetworkId Id
// VLANTag needs to be between 1 and 4094 for VLANs and 0 for
// normal networks. It's defined by IEEE 802.1Q standard, and used
// to define a VLAN network. For more information, see:
// http://en.wikipedia.org/wiki/IEEE_802.1Q.
VLANTag int
// AvailabilityZones describes which availability zones this
// subnet is in. It can be empty if the provider does not support
// availability zones.
AvailabilityZones []string
// SpaceID is the id of the space the subnet is associated with.
// Default value should be AlphaSpaceId. It can be empty if
// the subnet is returned from an networkingEnviron. SpaceID is
// preferred over SpaceName in state and non networkingEnviron use.
SpaceID string
// SpaceName is the name of the space the subnet is associated with.
// An empty string indicates it is part of the AlphaSpaceName OR
// if the SpaceID is set. Should primarily be used in an networkingEnviron.
SpaceName string
// Life represents the current life-cycle status of the subnets.
Life life.Value
}
// Validate validates the subnet, checking the CIDR, and VLANTag, if present.
func (s *SubnetInfo) Validate() error {
if s.CIDR == "" {
return errors.Errorf("missing CIDR")
} else if _, err := s.ParsedCIDRNetwork(); err != nil {
return errors.Trace(err)
}
if s.VLANTag < 0 || s.VLANTag > 4094 {
return errors.Errorf("invalid VLAN tag %d: must be between 0 and 4094", s.VLANTag)
}
return nil
}
// ParsedCIDRNetwork returns the network represented by the CIDR field.
func (s *SubnetInfo) ParsedCIDRNetwork() (*net.IPNet, error) {
// Memoize the CIDR the first time this method is called or if the
// CIDR field has changed.
if s.parsedCIDRNetwork == nil || s.parsedCIDRNetwork.String() != s.CIDR {
_, ipNet, err := net.ParseCIDR(s.CIDR)
if err != nil {
return nil, err
}
s.parsedCIDRNetwork = ipNet
}
return s.parsedCIDRNetwork, nil
}
// SubnetInfos is a collection of subnets.
type SubnetInfos []SubnetInfo
// SpaceIDs returns the set of space IDs that these subnets are in.
func (s SubnetInfos) SpaceIDs() set.Strings {
spaceIDs := set.NewStrings()
for _, sub := range s {
spaceIDs.Add(sub.SpaceID)
}
return spaceIDs
}
// ContainsID returns true if the collection contains a
// space with the given ID.
func (s SubnetInfos) ContainsID(id Id) bool {
return s.GetByID(id) != nil
}
// GetByID returns a reference to the subnet with the input ID if one is found.
func (s SubnetInfos) GetByID(id Id) *SubnetInfo {
for _, sub := range s {
if sub.ID == id {
return &sub
}
}
return nil
}
// GetByCIDR returns all subnets in the collection
// with a CIDR matching the input.
func (s SubnetInfos) GetByCIDR(cidr string) (SubnetInfos, error) {
if !IsValidCIDR(cidr) {
return nil, errors.NotValidf("CIDR %q", cidr)
}
var matching SubnetInfos
for _, sub := range s {
if sub.CIDR == cidr {
matching = append(matching, sub)
}
}
if len(matching) != 0 {
return matching, nil
}
// Some providers carve subnets into smaller CIDRs and assign addresses from
// the carved subnets to the machines. If we were not able to find a direct
// CIDR match fallback to a CIDR is sub-CIDR of check.
firstIP, lastIP, err := IPRangeForCIDR(cidr)
if err != nil {
return nil, errors.Annotatef(err, "unable to extract first and last IP addresses from CIDR %q", cidr)
}
for _, sub := range s {
subNet, err := sub.ParsedCIDRNetwork()
if err != nil { // this should not happen; but let's be paranoid.
logger.Warningf(context.TODO(), "unable to parse CIDR %q for subnet %q", sub.CIDR, sub.ID)
continue
}
if subNet.Contains(firstIP) && subNet.Contains(lastIP) {
matching = append(matching, sub)
}
}
return matching, nil
}
// GetByAddress returns subnets that based on IP range,
// include the input IP address.
func (s SubnetInfos) GetByAddress(addr string) (SubnetInfos, error) {
ip := net.ParseIP(addr)
if ip == nil {
return nil, errors.NotValidf("%q as IP address", addr)
}
var subs SubnetInfos
for _, sub := range s {
ipNet, err := sub.ParsedCIDRNetwork()
if err != nil {
return nil, errors.Trace(err)
}
if ipNet.Contains(ip) {
subs = append(subs, sub)
}
}
return subs, nil
}
// AllSubnetInfos implements SubnetLookup
// by returning all of the subnets.
func (s SubnetInfos) AllSubnetInfos() (SubnetInfos, error) {
return s, nil
}
// EqualTo returns true if this slice of SubnetInfo is equal to the input.
func (s SubnetInfos) EqualTo(other SubnetInfos) bool {
if len(s) != len(other) {
return false
}
SortSubnetInfos(s)
SortSubnetInfos(other)
for i := 0; i < len(s); i++ {
if s[i].ID != other[i].ID {
return false
}
}
return true
}
func (s SubnetInfos) Len() int { return len(s) }
func (s SubnetInfos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s SubnetInfos) Less(i, j int) bool {
return s[i].ID < s[j].ID
}
// SortSubnetInfos sorts subnets by ID.
func SortSubnetInfos(s SubnetInfos) {
sort.Sort(s)
}
// IsValidCIDR returns whether cidr is a valid subnet CIDR.
func IsValidCIDR(cidr string) bool {
_, ipNet, err := net.ParseCIDR(cidr)
if err == nil && ipNet.String() == cidr {
return true
}
return false
}
// FindSubnetIDsForAvailabilityZone returns a series of subnet IDs from a series
// of zones, if zones match the zoneName.
//
// Returns an error if no matching subnets match the zoneName.
func FindSubnetIDsForAvailabilityZone(zoneName string, subnetsToZones map[Id][]string) ([]Id, error) {
matchingSubnetIDs := set.NewStrings()
for subnetID, zones := range subnetsToZones {
zonesSet := set.NewStrings(zones...)
if zonesSet.Size() == 0 && zoneName == "" || zonesSet.Contains(zoneName) {
matchingSubnetIDs.Add(string(subnetID))
}
}
if matchingSubnetIDs.IsEmpty() {
return nil, errors.NotFoundf("subnets in AZ %q", zoneName)
}
sorted := make([]Id, matchingSubnetIDs.Size())
for k, v := range matchingSubnetIDs.SortedValues() {
sorted[k] = Id(v)
}
return sorted, nil
}
// InFan describes a network fan type.
const InFan = "INFAN"
// FilterInFanNetwork filters out any fan networks.
func FilterInFanNetwork(networks []Id) []Id {
var result []Id
for _, network := range networks {
if !IsInFanNetwork(network) {
result = append(result, network)
}
}
return result
}
func IsInFanNetwork(network Id) bool {
return strings.Contains(network.String(), InFan)
}
// IPRangeForCIDR returns the first and last addresses that correspond to the
// provided CIDR. The first address will always be the network address. The
// returned range also includes the broadcast address. For example, a CIDR of
// 10.0.0.0/24 yields: [10.0.0.0, 10.0.0.255].
func IPRangeForCIDR(cidr string) (net.IP, net.IP, error) {
_, ipNet, err := net.ParseCIDR(cidr)
if err != nil {
return net.IP{}, net.IP{}, errors.Trace(err)
}
ones, numBits := ipNet.Mask.Size()
// Special case: CIDR specifies a single address (i.e. a /32 or /128
// for IPV4 and IPV6 CIDRs accordingly).
if ones == numBits {
firstIP := ipNet.IP
lastIP := make(net.IP, len(firstIP))
copy(lastIP, firstIP)
return firstIP, lastIP, nil
}
// Calculate number of hosts in network (2^hostBits - 1) or the
// equivalent (1 << hostBits) - 1.
hostCount := big.NewInt(1)
hostCount = hostCount.Lsh(hostCount, uint(numBits-ones))
hostCount = hostCount.Sub(hostCount, big.NewInt(1))
// Calculate last IP in range.
lastIPNum := big.NewInt(0).SetBytes([]byte(ipNet.IP))
lastIPNum = lastIPNum.Add(lastIPNum, hostCount)
// Convert last IP into bytes. Since BigInt strips off leading zeroes
// we need to prepend them again before casting back to net.IP.
lastIPBytes := lastIPNum.Bytes()
lastIPBytes = append(make([]byte, len(ipNet.IP)-len(lastIPBytes)), lastIPBytes...)
return ipNet.IP, net.IP(lastIPBytes), nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package network
import "github.com/juju/errors"
// AvailabilityZone describes the common methods
// for general interaction with an AZ.
type AvailabilityZone interface {
// Name returns the name of the availability zone.
Name() string
// Available reports whether the availability zone is currently available.
Available() bool
}
// AvailabilityZones is a collection of AvailabilityZone.
type AvailabilityZones []AvailabilityZone
// Validate checks that a zone with the input name exists and is available
// according to the topology represented by the receiver.
// An error is returned if either of these conditions are not met.
func (a AvailabilityZones) Validate(zoneName string) error {
for _, az := range a {
if az.Name() == zoneName {
if az.Available() {
return nil
}
return errors.Errorf("zone %q is unavailable", zoneName)
}
}
return errors.NotValidf("availability zone %q", zoneName)
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package objectstore
import "github.com/juju/errors"
const (
// ObjectStoreLeaseHolderName is the name of the lease holder for the
// object store.
ObjectStoreLeaseHolderName = "objectstore"
)
// ParseLeaseHolderName returns true if the supplied name is a valid lease
// holder.
// This is used to ensure that the lease manager does not attempt to acquire
// leases for invalid names.
func ParseLeaseHolderName(name string) error {
if name == ObjectStoreLeaseHolderName {
return nil
}
return errors.NotValidf("lease holder name %q", name)
}
// Copyright 2023 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package objectstore
import (
"fmt"
"regexp"
"strings"
"github.com/juju/errors"
)
// BackendType is the type to identify the backend to use for the object store.
type BackendType string
const (
// FileBackend is the backend type for the file object store.
FileBackend BackendType = "file"
// S3Backend is the backend type for the s3 object store.
S3Backend BackendType = "s3"
)
func (b BackendType) String() string {
return string(b)
}
// ParseObjectStoreType parses the given string into a BackendType.
func ParseObjectStoreType(s string) (BackendType, error) {
switch s {
case string(FileBackend):
return FileBackend, nil
case string(S3Backend):
return S3Backend, nil
default:
return "", errors.NotValidf("object store type %q", s)
}
}
// BucketName is the name of the bucket to use for the object store.
// See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html
//
// This function doesn't use one big regexp, as it's harder to update when and
// if they change the naming rules.
func ParseObjectStoreBucketName(s string) (string, error) {
if s == "" {
return "", errors.NotValidf("bucket name %q", s)
}
// Bucket names must be between 3 (min) and 63 (max) characters long.
if num := len(s); num < 3 {
return "", errors.NewNotValid(nil, fmt.Sprintf("bucket name %q: too short", s))
} else if num > 63 {
return "", errors.NewNotValid(nil, fmt.Sprintf("bucket name %q: too long", s))
}
// Bucket names can consist only of lowercase letters, numbers, dots (.),
// and hyphens (-).
// Bucket names must begin and end with a letter or number.
// For best compatibility, we recommend that you avoid using dots (.) in
// bucket names, except for buckets that are used only for static website
// hosting. If you include dots in a bucket's name, you can't use
// virtual-host-style addressing over HTTPS, unless you perform your own
// certificate validation. This is because the security certificates used
// for virtual hosting of buckets don't work for buckets with dots in
// their names.
if !nameRegex.MatchString(s) {
return "", errors.NewNotValid(nil, fmt.Sprintf("bucket name %q: invalid characters", s))
}
// Note: We don't allow dots so these test isn't required.
// - Bucket names must not contain two adjacent periods (..).
// - Bucket names must not be formatted as an IP address (for example, 192.168.5.4).
// Bucket names must not start with the prefix xn--.
// Bucket names must not start with the prefix sthree- and the
// prefix sthree-configurator
// Note: the later isn't possible because of the last prefix check.
if strings.HasPrefix(s, "xn--") || strings.HasPrefix(s, "sthree-") {
return "", errors.NewNotValid(nil, fmt.Sprintf("bucket name %q: invalid prefix", s))
}
// Bucket names must not end with the suffix -s3alias. This suffix is
// reserved for access point alias names.
if strings.HasSuffix(s, "-s3alias") {
return "", errors.NewNotValid(nil, fmt.Sprintf("bucket name %q: invalid suffix", s))
}
return s, nil
}
var (
// This is the strict regex for bucket names, no dots allowed.
nameRegex = regexp.MustCompile(`^[a-z0-9][a-z0-9\-]{1,61}[a-z0-9]$`)
)
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package objectstore
import (
"fmt"
"github.com/juju/errors"
"github.com/juju/juju/internal/uuid"
)
// UUID represents a object store unique identifier.
type UUID string
// NewUUID is a convince function for generating a new object store uuid.
func NewUUID() (UUID, error) {
uuid, err := uuid.NewUUID()
if err != nil {
return UUID(""), err
}
return UUID(uuid.String()), nil
}
// ParseUUID returns a new UUID from the given string. If the string is not a
// valid uuid an error satisfying [errors.NotValid] will be returned.
func ParseUUID(value string) (UUID, error) {
if !uuid.IsValidUUIDString(value) {
return "", fmt.Errorf("id %q: %w", value, errors.NotValid)
}
return UUID(value), nil
}
// IsEmpty returns true if the UUID is empty.
func (u UUID) IsEmpty() bool {
return u == ""
}
// String implements the stringer interface for UUID.
func (u UUID) String() string {
return string(u)
}
// Validate ensures the consistency of the UUID. If the uuid is invalid an error
// satisfying [errors.NotValid] will be returned.
func (u UUID) Validate() error {
if u == "" {
return fmt.Errorf("%wuuid cannot be empty", errors.Hide(errors.NotValid))
}
if !uuid.IsValidUUIDString(string(u)) {
return fmt.Errorf("uuid %q %w", u, errors.NotValid)
}
return nil
}
// Copyright 2024 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package os
import (
"sync"
"github.com/juju/errors"
corebase "github.com/juju/juju/core/base"
)
var (
// HostBase returns the base of the machine the current process is
// running on (overrideable var for testing)
HostBase func() (corebase.Base, error) = hostBase
baseOnce sync.Once
// These are filled in by the first call to hostBase
base corebase.Base
baseErr error
)
func hostBase() (corebase.Base, error) {
var err error
baseOnce.Do(func() {
base, err = readBase()
if err != nil {
baseErr = errors.Annotate(err, "cannot determine host base")
}
})
return base, baseErr
}
// Copyright 2024 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package os
import (
corebase "github.com/juju/juju/core/base"
)
func readBase() (corebase.Base, error) {
values, err := ReadOSRelease(osReleaseFile)
if err != nil {
return corebase.Base{}, err
}
return corebase.ParseBase(values["ID"], values["VERSION_ID"])
}
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package os
import (
"strings"
)
var HostOS = hostOS // for monkey patching
// HostOSTypeName returns the name of the host OS.
func HostOSTypeName() (osTypeName string) {
defer func() {
if err := recover(); err != nil {
osTypeName = "unknown"
}
}()
return strings.ToLower(HostOS().String())
}
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package os
import (
"errors"
stdos "os"
"strings"
"sync"
"github.com/juju/juju/core/os/ostype"
)
var (
// osReleaseFile is the name of the file that is read in order to determine
// the linux type release version.
osReleaseFile = "/etc/os-release"
osOnce sync.Once
os ostype.OSType // filled in by the first call to hostOS
)
func hostOS() ostype.OSType {
osOnce.Do(func() {
var err error
os, err = updateOS(osReleaseFile)
if err != nil {
panic("unable to read " + osReleaseFile + ": " + err.Error())
}
})
return os
}
func updateOS(f string) (ostype.OSType, error) {
values, err := ReadOSRelease(f)
if err != nil {
return ostype.Unknown, err
}
switch values["ID"] {
case strings.ToLower(ostype.Ubuntu.String()):
return ostype.Ubuntu, nil
case strings.ToLower(ostype.CentOS.String()):
return ostype.CentOS, nil
default:
return ostype.GenericLinux, nil
}
}
// ReadOSRelease parses the information in the os-release file.
//
// See http://www.freedesktop.org/software/systemd/man/os-release.html.
func ReadOSRelease(f string) (map[string]string, error) {
contents, err := stdos.ReadFile(f)
if err != nil {
return nil, err
}
values := make(map[string]string)
releaseDetails := strings.Split(string(contents), "\n")
for _, val := range releaseDetails {
c := strings.SplitN(val, "=", 2)
if len(c) != 2 {
continue
}
values[c[0]] = strings.Trim(c[1], "\t '\"")
}
if _, ok := values["ID"]; !ok {
return nil, errors.New("OS release file is missing ID")
}
if _, ok := values["VERSION_ID"]; !ok {
return nil, errors.New("OS release file is missing VERSION_ID")
}
return values, nil
}
// Copyright 2024 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package ostype
import (
"strings"
"github.com/juju/errors"
)
type OSType int
const (
Unknown OSType = iota
Ubuntu
Windows
OSX
CentOS
GenericLinux
Kubernetes
)
func (t OSType) String() string {
switch t {
case Ubuntu:
return "Ubuntu"
case Windows:
return "Windows"
case OSX:
return "OSX"
case CentOS:
return "CentOS"
case GenericLinux:
return "GenericLinux"
case Kubernetes:
return "Kubernetes"
}
return "Unknown"
}
// EquivalentTo returns true if the OS type is equivalent to another
// OS type.
func (t OSType) EquivalentTo(t2 OSType) bool {
if t == t2 {
return true
}
return t.IsLinux() && t2.IsLinux()
}
// IsLinux returns true if the OS type is a Linux variant.
func (t OSType) IsLinux() bool {
switch t {
case Ubuntu, CentOS, GenericLinux:
return true
}
return false
}
var validOSTypeNames = map[string]OSType{
"ubuntu": Ubuntu,
"windows": Windows,
"osx": OSX,
"centos": CentOS,
"genericlinux": GenericLinux,
"kubernetes": Kubernetes,
}
// IsValidOSTypeName returns true if osType is a
// valid os type name.
func IsValidOSTypeName(osType string) bool {
for n := range validOSTypeNames {
if n == osType {
return true
}
}
return false
}
// OSTypeForName return the named OS.
func OSTypeForName(name string) OSType {
os, ok := validOSTypeNames[name]
if ok {
return os
}
return Unknown
}
// ParseOSType parses a string and returns the corresponding OSType.
func ParseOSType(s string) (OSType, error) {
osType, ok := validOSTypeNames[strings.ToLower(s)]
if !ok {
return Unknown, errors.NotValidf("unknown os type %q", s)
}
return osType, nil
}
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
//go:build !windows
package paths
import (
"os"
"os/user"
"strconv"
"github.com/juju/errors"
)
// LogfilePermission is the file mode to use for log files.
const LogfilePermission = os.FileMode(0640)
// SetSyslogOwner sets the owner and group of the file to be the appropriate
// syslog users as defined by the SyslogUserGroup method.
func SetSyslogOwner(filename string) error {
user, group := SyslogUserGroup()
return SetOwnership(filename, user, group)
}
// SetOwnership sets the ownership of a given file from a path.
// Searches for the corresponding id's from user, group and uses them to chown.
func SetOwnership(filePath string, wantedUser string, wantedGroup string) error {
group, err := user.LookupGroup(wantedGroup)
if err != nil {
return errors.Trace(err)
}
gid, err := strconv.Atoi(group.Gid)
if err != nil {
return errors.Trace(err)
}
usr, err := user.Lookup(wantedUser)
if err != nil {
return errors.Trace(err)
}
uid, err := strconv.Atoi(usr.Uid)
if err != nil {
return errors.Trace(err)
}
return Chown(filePath, uid, gid)
}
// PrimeLogFile ensures that the given log file is created with the
// correct mode and ownership.
func PrimeLogFile(path string) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, LogfilePermission)
if err != nil {
return errors.Trace(err)
}
if err := f.Close(); err != nil {
return errors.Trace(err)
}
return SetSyslogOwner(path)
}
// SyslogUserGroup returns the names of the user and group that own the log files.
func SyslogUserGroup() (string, string) {
return "syslog", "adm"
}
// Copyright 2014 Canonical Ltd.
// Copyright 2014 Cloudbase Solutions SRL
// Licensed under the AGPLv3, see LICENCE file for details.
package paths
import (
"os"
"runtime"
"github.com/juju/juju/core/os/ostype"
)
type OS int // strongly typed runtime.GOOS value to help with refactoring
const (
OSWindows OS = 1
OSUnixLike OS = 2
)
type osVarType int
const (
tmpDir osVarType = iota
logDir
dataDir
storageDir
confDir
jujuExec
certDir
metricsSpoolDir
uniterStateDir
jujuDumpLogs
jujuIntrospect
instanceCloudInitDir
cloudInitCfgDir
curtinInstallConfig
transientDataDir
)
const (
// NixDataDir is location for agent binaries on *nix operating systems.
NixDataDir = "/var/lib/juju"
// NixTransientDataDir is location for storing transient data on *nix
// operating systems.
NixTransientDataDir = "/var/run/juju"
// NixLogDir is location for Juju logs on *nix operating systems.
NixLogDir = "/var/log"
)
var nixVals = map[osVarType]string{
tmpDir: "/tmp",
logDir: NixLogDir,
dataDir: NixDataDir,
transientDataDir: NixTransientDataDir,
storageDir: "/var/lib/juju/storage",
confDir: "/etc/juju",
jujuExec: "/usr/bin/juju-exec",
jujuDumpLogs: "/usr/bin/juju-dumplogs",
jujuIntrospect: "/usr/bin/juju-introspect",
certDir: "/etc/juju/certs.d",
metricsSpoolDir: "/var/lib/juju/metricspool",
uniterStateDir: "/var/lib/juju/uniter/state",
instanceCloudInitDir: "/var/lib/cloud/instance",
cloudInitCfgDir: "/etc/cloud/cloud.cfg.d",
curtinInstallConfig: "/root/curtin-install-cfg.yaml",
}
var winVals = map[osVarType]string{
tmpDir: "C:/Juju/tmp",
logDir: "C:/Juju/log",
dataDir: "C:/Juju/lib/juju",
transientDataDir: "C:/Juju/lib/juju-transient",
storageDir: "C:/Juju/lib/juju/storage",
confDir: "C:/Juju/etc",
jujuExec: "C:/Juju/bin/juju-exec.exe",
jujuDumpLogs: "C:/Juju/bin/juju-dumplogs.exe",
jujuIntrospect: "C:/Juju/bin/juju-introspect.exe",
certDir: "C:/Juju/certs",
metricsSpoolDir: "C:/Juju/lib/juju/metricspool",
uniterStateDir: "C:/Juju/lib/juju/uniter/state",
}
// Chown is a variable here so it can be mocked out in tests to a no-op.
// Agents run as root, but users don't.
var Chown = os.Chown
// CurrentOS returns the OS value for the currently-running system.
func CurrentOS() OS {
switch runtime.GOOS {
case "windows":
return OSWindows
default:
return OSUnixLike
}
}
// OSType converts the given os name to an OS value.
func OSType(osName string) OS {
switch ostype.OSTypeForName(osName) {
case ostype.Windows:
return OSWindows
default:
return OSUnixLike
}
}
// osVal will lookup the value of the key valname
// in the appropriate map, based on the OS value.
func osVal(os OS, valname osVarType) string {
switch os {
case OSWindows:
return winVals[valname]
default:
return nixVals[valname]
}
}
// LogDir returns filesystem path the directory where juju may
// save log files.
func LogDir(os OS) string {
return osVal(os, logDir)
}
// DataDir returns a filesystem path to the folder used by juju to
// store tools, charms, locks, etc
func DataDir(os OS) string {
return osVal(os, dataDir)
}
// TransientDataDir returns a filesystem path to the folder used by juju to
// store transient data that will not survive a reboot.
func TransientDataDir(os OS) string {
return osVal(os, transientDataDir)
}
// MetricsSpoolDir returns a filesystem path to the folder used by juju
// to store metrics.
func MetricsSpoolDir(os OS) string {
return osVal(os, metricsSpoolDir)
}
// CertDir returns a filesystem path to the folder used by juju to
// store certificates that are added by default to the Juju client
// api certificate pool.
func CertDir(os OS) string {
return osVal(os, certDir)
}
// StorageDir returns a filesystem path to the folder used by juju to
// mount machine-level storage.
func StorageDir(os OS) string {
return osVal(os, storageDir)
}
// ConfDir returns the path to the directory where Juju may store
// configuration files.
func ConfDir(os OS) string {
return osVal(os, confDir)
}
// JujuExec returns the absolute path to the juju-exec binary for
// a particular series.
func JujuExec(os OS) string {
return osVal(os, jujuExec)
}
// JujuDumpLogs returns the absolute path to the juju-dumplogs binary
// for a particular series.
func JujuDumpLogs(os OS) string {
return osVal(os, jujuDumpLogs)
}
// JujuIntrospect returns the absolute path to the juju-introspect
// binary for a particular series.
func JujuIntrospect(os OS) string {
return osVal(os, jujuIntrospect)
}
// MachineCloudInitDir returns the absolute path to the instance
// cloudinit directory for a particular series.
func MachineCloudInitDir(os OS) string {
return osVal(os, instanceCloudInitDir)
}
// CurtinInstallConfig returns the absolute path the configuration file
// written by Curtin during machine provisioning.
func CurtinInstallConfig(os OS) string {
return osVal(os, curtinInstallConfig)
}
// CloudInitCfgDir returns the absolute path to the instance
// cloud config directory for a particular series.
func CloudInitCfgDir(os OS) string {
return osVal(os, cloudInitCfgDir)
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package permission
import (
"github.com/juju/errors"
"github.com/juju/names/v6"
)
// AccessChange represents a change in access level.
type AccessChange string
const (
// Grant represents a change in access level to grant.
Grant AccessChange = "grant"
// Revoke represents a change in access level to revoke.
Revoke AccessChange = "revoke"
)
// Access represents a level of access.
type Access string
const (
// NoAccess allows a user no permissions at all.
NoAccess Access = ""
// ReadAccess allows a user to read information about a permission subject,
// without being able to make any changes.
ReadAccess Access = "read"
// WriteAccess allows a user to make changes to a permission subject.
WriteAccess Access = "write"
// ConsumeAccess allows a user to consume a permission subject.
ConsumeAccess Access = "consume"
// AdminAccess allows a user full control over the subject.
AdminAccess Access = "admin"
// LoginAccess allows a user to log-ing into the subject.
LoginAccess Access = "login"
// AddModelAccess allows user to add new models in subjects supporting it.
AddModelAccess Access = "add-model"
// SuperuserAccess allows user unrestricted permissions in the subject.
SuperuserAccess Access = "superuser"
)
// AllAccessLevels is a list of all access levels.
var AllAccessLevels = []Access{
NoAccess,
ReadAccess,
WriteAccess,
ConsumeAccess,
AdminAccess,
LoginAccess,
AddModelAccess,
SuperuserAccess,
}
// Validate returns error if the current is not a valid access level.
func (a Access) Validate() error {
switch a {
case NoAccess, AdminAccess, ReadAccess, WriteAccess,
LoginAccess, AddModelAccess, SuperuserAccess, ConsumeAccess:
return nil
}
return errors.NotValidf("access level %s", a)
}
// String returns the access level as a string.
func (a Access) String() string {
return string(a)
}
// ObjectType is the type of the permission object/
type ObjectType string
// These values must match the values in the permission_object_type table.
const (
Cloud ObjectType = "cloud"
Controller ObjectType = "controller"
Model ObjectType = "model"
Offer ObjectType = "offer"
)
// Validate returns an error if the object type is not in the
// list of valid object types above.
func (o ObjectType) Validate() error {
switch o {
case Cloud, Controller, Model, Offer:
default:
return errors.NotValidf("object type %q", o)
}
return nil
}
// String returns the object type as a string.
func (o ObjectType) String() string {
return string(o)
}
// ID identifies the object of a permission, its key and type. Keys
// are names or uuid depending on the type.
type ID struct {
ObjectType ObjectType
Key string
}
// Validate returns an error if the key is empty and/or the ObjectType
// is not in the list.
func (i ID) Validate() error {
if i.Key == "" {
return errors.NotValidf("empty key")
}
return i.ObjectType.Validate()
}
// ValidateAccess validates the access value is valid for this ID.
func (i ID) ValidateAccess(access Access) error {
var err error
switch i.ObjectType {
case Cloud:
err = ValidateCloudAccess(access)
case Controller:
err = ValidateControllerAccess(access)
case Model:
err = ValidateModelAccess(access)
case Offer:
err = ValidateOfferAccess(access)
default:
err = errors.NotValidf("access type %q", i.ObjectType)
}
return err
}
// ParseTagForID returns an ID of a permission object and must
// conform to the known object types.
func ParseTagForID(tag names.Tag) (ID, error) {
if tag == nil {
return ID{}, errors.NotValidf("nil tag")
}
id := ID{Key: tag.Id()}
switch tag.Kind() {
case names.CloudTagKind:
id.ObjectType = Cloud
case names.ControllerTagKind:
id.ObjectType = Controller
case names.ModelTagKind:
id.ObjectType = Model
case names.ApplicationOfferTagKind:
id.ObjectType = Offer
default:
return id, errors.NotSupportedf("target tag type %s", tag.Kind())
}
return id, nil
}
// ValidateModelAccess returns error if the passed access is not a valid
// model access level.
func ValidateModelAccess(access Access) error {
switch access {
case ReadAccess, WriteAccess, AdminAccess:
return nil
}
return errors.NotValidf("%q model access", access)
}
// ValidateOfferAccess returns error if the passed access is not a valid
// offer access level.
func ValidateOfferAccess(access Access) error {
switch access {
case ReadAccess, ConsumeAccess, AdminAccess:
return nil
}
return errors.NotValidf("%q offer access", access)
}
// ValidateCloudAccess returns error if the passed access is not a valid
// cloud access level.
func ValidateCloudAccess(access Access) error {
switch access {
case AddModelAccess, AdminAccess:
return nil
}
return errors.NotValidf("%q cloud access", access)
}
// ValidateControllerAccess returns error if the passed access is not a valid
// controller access level.
func ValidateControllerAccess(access Access) error {
switch access {
case LoginAccess, SuperuserAccess:
return nil
}
return errors.NotValidf("%q controller access", access)
}
func (a Access) controllerValue() int {
switch a {
case NoAccess:
return 0
case LoginAccess:
return 1
case SuperuserAccess:
return 2
default:
return -1
}
}
func (a Access) cloudValue() int {
switch a {
case NoAccess:
return 0
case AddModelAccess:
return 1
case AdminAccess:
return 2
default:
return -1
}
}
func (a Access) modelValue() int {
switch a {
case NoAccess:
return 0
case ReadAccess:
return 1
case WriteAccess:
return 2
case AdminAccess:
return 3
default:
return -1
}
}
// EqualOrGreaterModelAccessThan returns true if the current access is equal
// or greater than the passed in access level.
func (a Access) EqualOrGreaterModelAccessThan(access Access) bool {
v1, v2 := a.modelValue(), access.modelValue()
if v1 < 0 || v2 < 0 {
return false
}
return v1 >= v2
}
// GreaterModelAccessThan returns true if the current access is greater than
// the passed in access level.
func (a Access) GreaterModelAccessThan(access Access) bool {
v1, v2 := a.modelValue(), access.modelValue()
if v1 < 0 || v2 < 0 {
return false
}
return v1 > v2
}
// EqualOrGreaterControllerAccessThan returns true if the current access is
// equal or greater than the passed in access level.
func (a Access) EqualOrGreaterControllerAccessThan(access Access) bool {
v1, v2 := a.controllerValue(), access.controllerValue()
if v1 < 0 || v2 < 0 {
return false
}
return v1 >= v2
}
// GreaterControllerAccessThan returns true if the current access is
// greater than the passed in access level.
func (a Access) GreaterControllerAccessThan(access Access) bool {
v1, v2 := a.controllerValue(), access.controllerValue()
if v1 < 0 || v2 < 0 {
return false
}
return v1 > v2
}
// EqualOrGreaterCloudAccessThan returns true if the current access is
// equal or greater than the passed in access level.
func (a Access) EqualOrGreaterCloudAccessThan(access Access) bool {
v1, v2 := a.cloudValue(), access.cloudValue()
if v1 < 0 || v2 < 0 {
return false
}
return v1 >= v2
}
func (a Access) offerValue() int {
switch a {
case NoAccess:
return 0
case ReadAccess:
return 1
case ConsumeAccess:
return 2
case AdminAccess:
return 3
default:
return -1
}
}
// EqualOrGreaterOfferAccessThan returns true if the current access is
// equal or greater than the passed in access level.
func (a Access) EqualOrGreaterOfferAccessThan(access Access) bool {
v1, v2 := a.offerValue(), access.offerValue()
if v1 < 0 || v2 < 0 {
return false
}
return v1 >= v2
}
// GreaterOfferAccessThan returns true if the current access is
// greater than the passed in access level.
func (a Access) GreaterOfferAccessThan(access Access) bool {
v1, v2 := a.offerValue(), access.offerValue()
if v1 < 0 || v2 < 0 {
return false
}
return v1 > v2
}
// modelRevoke provides the logic of revoking
// model access. Revoking:
// * AddModel gets you Write
// * Write gets you Read
// * Read gets you NoAccess
func modelRevoke(a Access) Access {
switch a {
case AdminAccess:
return WriteAccess
case WriteAccess:
return ReadAccess
default:
return NoAccess
}
}
// offerRevoke provides the logic of revoking
// offer access. Revoking:
// * Admin gets you Consume
// * Consume gets you Read
// * Read gets you NoAccess
func offerRevoke(a Access) Access {
switch a {
case AdminAccess:
return ConsumeAccess
case ConsumeAccess:
return ReadAccess
default:
return NoAccess
}
}
// controllerRevoke provides the logic of revoking
// controller access. Revoking:
// * Superuser gets you Login
// * Login gets you NoAccess
func controllerRevoke(a Access) Access {
switch a {
case SuperuserAccess:
return LoginAccess
default:
return NoAccess
}
}
// cloudRevoke provides the logic of revoking
// cloud access. Revoking:
// * Admin gets you AddModel
// * AddModel gets you NoAccess
func cloudRevoke(a Access) Access {
switch a {
case AdminAccess:
return AddModelAccess
default:
return NoAccess
}
}
// EqualOrGreaterThan returns true if the current access is
// equal or greater than the passed in access level.
func (a AccessSpec) EqualOrGreaterThan(access Access) bool {
switch a.Target.ObjectType {
case Cloud:
return a.Access.EqualOrGreaterCloudAccessThan(access)
case Controller:
return a.Access.EqualOrGreaterControllerAccessThan(access)
case Model:
return a.Access.EqualOrGreaterModelAccessThan(access)
case Offer:
return a.Access.EqualOrGreaterOfferAccessThan(access)
default:
return false
}
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package permission
import (
"github.com/juju/errors"
"github.com/juju/juju/core/user"
)
// EveryoneUserName represents a special user that is has the base permission
// level of all external users.
var EveryoneUserName, _ = user.NewName("everyone@external")
// AccessSpec defines the attributes that can be set when adding a new
// access.
type AccessSpec struct {
Target ID
Access Access
}
// Validate validates that the access and target specified in the
// spec are values allowed together and that the User is not an
// empty string. If any of these are untrue, a NotValid error is
// returned.
func (u AccessSpec) Validate() error {
if err := u.Target.Validate(); err != nil {
return err
}
if err := u.Target.ValidateAccess(u.Access); err != nil {
return err
}
return nil
}
// RevokeAccess returns the new access level based on the revoking the current
// value setting. E.g. revoking SuperuserAccess sets LoginAccess for
// controllers.
func (a AccessSpec) RevokeAccess() Access {
switch a.Target.ObjectType {
case Cloud:
return cloudRevoke(a.Access)
case Controller:
return controllerRevoke(a.Access)
case Model:
return modelRevoke(a.Access)
case Offer:
return offerRevoke(a.Access)
default:
return NoAccess
}
}
// UserAccessSpec defines the attributes that can be set when adding a new
// user access.
type UserAccessSpec struct {
AccessSpec
User user.Name
}
// Validate validates that the access and target specified in the
// spec are values allowed together and that the User is not an
// empty string. If any of these are untrue, a NotValid error is
// returned.
func (u UserAccessSpec) Validate() error {
if u.User.IsZero() {
return errors.NotValidf("empty user")
}
if err := u.AccessSpec.Validate(); err != nil {
return err
}
return nil
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package permission
import (
"time"
"github.com/juju/juju/core/user"
)
// TODO hml 2024-02-05
// Replace CreatedBy, UserTag and Object with non Tag types once
// the cut over the permission domain is complete. Is UserTag still
// necessary if Name exists?
// UserAccess represents a user access to a target whereas the user
// could represent a remote user or a user across multiple models the
// user access always represents a single user for a single target.
// There should be no more than one UserAccess per target/user pair.
// Many of these fields are storage artifacts but generate them from
// other fields implies out of band knowledge of other packages.
type UserAccess struct {
// UserID is the stored ID of the user.
UserID string
// PermissionID is the stored ID of the permission.
PermissionID string
// Object is the ID of the object of this access grant.
Object ID
// Access represents the level of access subject has over object.
Access Access
// CreatedBy is the tag of the user that granted the access.
CreatedBy user.Name
// DateCreated is the date the user was created in UTC.
DateCreated time.Time
// DisplayName is the name we are showing for this user.
DisplayName string
// UserName is the actual username for this access.
UserName user.Name
}
// IsEmptyUserAccess returns true if the passed UserAccess instance
// is empty.
func IsEmptyUserAccess(a UserAccess) bool {
return a == UserAccess{}
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package resource
import (
coreunit "github.com/juju/juju/core/unit"
"github.com/juju/juju/internal/charm/resource"
)
// ApplicationResources contains the list of resources for the application and all its
// units.
type ApplicationResources struct {
// Resources are the current version of the resource for the application that
// resource-get will retrieve.
Resources []Resource
// RepositoryResources provides the resource info from the charm
// store for each of the application's resources. The information from
// the charm store is current as of the last time the charm store
// was polled. Each entry here corresponds to the same indexed entry
// in the Resources field.
RepositoryResources []resource.Resource
// UnitResources reports the currently-in-use version of resources for each
// unit.
UnitResources []UnitResources
}
// Updates returns the list of charm store resources corresponding to
// the application's resources that are out of date. If there is a charm
// store resource with a different revision than the one used into the
// application, it will be returned.
// Any charm store resources with the same revision number from the
// corresponding application resources will be filtered out.
func (sr ApplicationResources) Updates() ([]resource.Resource, error) {
storeResources := map[string]resource.Resource{}
for _, res := range sr.RepositoryResources {
storeResources[res.Name] = res
}
var updates []resource.Resource
for _, res := range sr.Resources {
if res.Origin != resource.OriginStore {
continue
}
csRes, ok := storeResources[res.Name]
// If the revision is the same then all the other info must be.
if !ok || res.Revision == csRes.Revision {
continue
}
updates = append(updates, csRes)
}
return updates, nil
}
// UnitResources contains the list of resources used by a unit.
type UnitResources struct {
// Name is the name of the unit.
Name coreunit.Name
// Resources are the resource versions currently in use by this unit.
Resources []Resource
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package resource
// TODO(ericsnow) Move this file to the charm repo?
import (
"io"
"os"
"github.com/juju/errors"
"github.com/juju/utils/v4"
charmresource "github.com/juju/juju/internal/charm/resource"
)
// Content holds a reader for the content of a resource along
// with details about that content.
type Content struct {
// Data holds the resource content, ready to be read (once).
Data io.Reader
// Size is the byte count of the data.
Size int64
// Fingerprint holds the checksum of the data.
Fingerprint charmresource.Fingerprint
}
// GenerateContent returns a new Content for the given data stream.
func GenerateContent(reader io.ReadSeeker) (Content, error) {
var sizer utils.SizeTracker
sizingReader := io.TeeReader(reader, &sizer)
fp, err := charmresource.GenerateFingerprint(sizingReader)
if err != nil {
return Content{}, errors.Trace(err)
}
if _, err := reader.Seek(0, os.SEEK_SET); err != nil {
return Content{}, errors.Trace(err)
}
size := sizer.Size()
content := Content{
Data: reader,
Size: size,
Fingerprint: fp,
}
return content, nil
}
// Copyright 2018 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package resource
import (
// Import shas that are used for docker image validation.
_ "crypto/sha256"
_ "crypto/sha512"
"time"
)
// Token defines a token value with expiration time.
type Token struct {
// Value is the value of the token.
Value string
// ExpiresAt is the unix time in seconds and milliseconds when the authorization token expires.
ExpiresAt *time.Time
}
// NewToken creates a Token.
func NewToken(value string) *Token {
if value == "" {
return nil
}
return &Token{Value: value}
}
// Empty checks if the auth information is empty.
func (t *Token) Empty() bool {
return t == nil || t.Value == ""
}
// Content returns the raw content of the token.
func (t *Token) Content() string {
if t.Empty() {
return ""
}
return t.Value
}
// BasicAuthConfig contains authorization information for basic auth.
type BasicAuthConfig struct {
// Auth is the base64 encoded "username:password" string.
Auth *Token
// Username holds the username used to gain access to a non-public image.
Username string
// Password holds the password used to gain access to a non-public image.
Password string
}
// Empty checks if the auth information is empty.
func (ba BasicAuthConfig) Empty() bool {
return ba.Auth.Empty() && ba.Username == "" && ba.Password == ""
}
// TokenAuthConfig contains authorization information for token auth.
// Juju does not support the docker credential helper because k8s does not support it either.
// https://kubernetes.io/docs/concepts/containers/images/#configuring-nodes-to-authenticate-to-a-private-registry
type TokenAuthConfig struct {
Email string
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken *Token
// RegistryToken is a bearer token to be sent to a registry
RegistryToken *Token
}
// Empty checks if the auth information is empty.
func (ac TokenAuthConfig) Empty() bool {
return ac.RegistryToken.Empty() && ac.IdentityToken.Empty()
}
// ImageRepoDetails contains authorization information for connecting to a Registry.
type ImageRepoDetails struct {
BasicAuthConfig
TokenAuthConfig
// Repository is the namespace of the image repo.
Repository string
// ServerAddress is the auth server address.
ServerAddress string
// Region is the cloud region.
Region string
}
// IsPrivate checks if the repository detail is private.
func (rid ImageRepoDetails) IsPrivate() bool {
return !rid.BasicAuthConfig.Empty() || !rid.TokenAuthConfig.Empty()
}
// DockerImageDetails holds the details for a Docker resource type.
type DockerImageDetails struct {
// RegistryPath holds the path of the Docker image (including host and sha256) in a docker registry.
RegistryPath string
ImageRepoDetails
}
// IsPrivate shows if the image repo is private or not.
func (did DockerImageDetails) IsPrivate() bool {
return did.ImageRepoDetails.IsPrivate()
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package resource
import (
"context"
"io"
"github.com/juju/errors"
)
// Opened provides both the resource info and content.
type Opened struct {
Resource
io.ReadCloser
}
// Content returns the "content" for the opened resource.
func (o Opened) Content() Content {
return Content{
Data: o.ReadCloser,
Size: o.Size,
Fingerprint: o.Fingerprint,
}
}
func (o Opened) Close() error {
return errors.Trace(o.ReadCloser.Close())
}
// Opener exposes the functionality for opening a resource.
type Opener interface {
// OpenResource returns an opened resource with a reader that will
// stream the resource content.
OpenResource(ctx context.Context, name string) (Opened, error)
// SetResource records that the resource is currently in use.
SetResourceUsed(ctx context.Context, resName string) error
}
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package resource
import (
"fmt"
"time"
"github.com/juju/errors"
"github.com/juju/juju/internal/charm/resource"
)
// Resource defines a single resource within a Juju model.
//
// Each application will have exactly the same resources associated
// with it as are defined in the charm's metadata, no more, no less.
// When associated with the application the resource may have additional
// information associated with it.
//
// A resource may be a "placeholder", meaning it is only partially
// populated before an upload (whether local or from the charm store).
// In that case the following fields are not set:
//
// UUID, Timestamp, RetrievedBy
//
// For "upload" placeholders, the following additional fields are
// not set:
//
// Fingerprint, Size
type Resource struct {
resource.Resource
UUID UUID
// ApplicationName identifies the application name for the resource
ApplicationName string
// RetrievedBy is the name of who added the resource to the controller.
// The name is a username if the resource is uploaded from the cli
// by a specific user. If the resource is downloaded from a repository,
// the ID of the unit which triggered the download is used.
RetrievedBy string
// Timestamp indicates when this resource was added to the model in
// the case of applications or when this resource was loaded by a unit.
Timestamp time.Time
}
// RetrievedByType indicates what the RetrievedBy name represents.
type RetrievedByType string
const (
Unknown RetrievedByType = "unknown"
Application RetrievedByType = "application"
Unit RetrievedByType = "unit"
User RetrievedByType = "user"
)
func (r RetrievedByType) String() string {
return string(r)
}
// Validate ensures that the spec is valid.
func (res Resource) Validate() error {
// TODO(ericsnow) Ensure that the "placeholder" fields are not set
// if IsLocalPlaceholder() returns true (and that they *are* set
// otherwise)? Also ensure an "upload" origin in the "placeholder"
// case?
if err := res.Resource.Validate(); err != nil {
return errors.Annotate(err, "bad info")
}
if res.ApplicationName == "" {
return errors.Annotate(errors.NotValid, "missing application name")
}
// TODO(ericsnow) Require that RetrievedBy be set if timestamp is?
if res.Timestamp.IsZero() && res.RetrievedBy != "" {
return errors.NewNotValid(nil, "missing timestamp")
}
return nil
}
// IsPlaceholder indicates if the resource is a
// "placeholder" (partially populated pending an upload).
func (res Resource) IsPlaceholder() bool {
return res.Timestamp.IsZero()
}
// TimestampGranular returns the timestamp at a resolution of 1 second.
func (res Resource) TimestampGranular() time.Time {
return time.Unix(res.Timestamp.Unix(), 0)
}
// RevisionString returns the human-readable revision for the resource.
func (res Resource) RevisionString() string {
switch res.Origin {
case resource.OriginUpload:
if res.IsPlaceholder() {
return "-"
}
return res.TimestampGranular().UTC().String()
case resource.OriginStore:
return fmt.Sprintf("%d", res.Revision)
default:
// note: this should probably never happen.
return "-"
}
}
// AsMap returns the mapping of resource name to info for each of the
// given resources.
func AsMap(resources []Resource) map[string]Resource {
results := make(map[string]Resource, len(resources))
for _, res := range resources {
results[res.Name] = res
}
return results
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package resource
import (
"github.com/juju/errors"
"github.com/juju/juju/internal/charm/resource"
)
// DeserializeFingerprint converts the serialized fingerprint back into
// a Fingerprint. "zero" values are treated appropriately.
func DeserializeFingerprint(fpSum []byte) (resource.Fingerprint, error) {
var fp resource.Fingerprint
if len(fpSum) != 0 {
var err error
fp, err = resource.NewFingerprint(fpSum)
if err != nil {
return fp, errors.Trace(err)
}
}
return fp, nil
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package resource
import (
"sort"
)
// Sort sorts the provided resources.
func Sort(resources []Resource) {
sort.Sort(byName(resources))
}
type byName []Resource
func (sorted byName) Len() int { return len(sorted) }
func (sorted byName) Swap(i, j int) { sorted[i], sorted[j] = sorted[j], sorted[i] }
func (sorted byName) Less(i, j int) bool { return sorted[i].Name < sorted[j].Name }
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package resource
import (
"github.com/juju/juju/internal/errors"
)
// These are the valid resource states.
const (
// StateAvailable represents a resource which will be used by any units at
// this point in time
StateAvailable State = "available"
// StatePotential indicates there is a different revision of the resource
// available in a repository. Used to let users know a resource can be
// upgraded.
StatePotential State = "potential"
)
// State identifies the resource state in an application
type State string
// ParseState converts the provided string into an State.
// If it is not a known state then an error is returned.
func ParseState(value string) (State, error) {
state := State(value)
return state, state.Validate()
}
// String returns the printable representation of the state.
func (o State) String() string {
return string(o)
}
// Validate ensures that the state is correct.
func (o State) Validate() error {
if _, ok := map[State]bool{
StateAvailable: true,
StatePotential: true,
}[o]; !ok {
return errors.Errorf("state %q invalid", o)
}
return nil
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package resource
import (
"fmt"
"github.com/juju/errors"
"github.com/juju/juju/internal/uuid"
)
// UUID represents a resource unique identifier.
type UUID string
// NewUUID is a convince function for generating a new resource uuid.
func NewUUID() (UUID, error) {
id, err := uuid.NewUUID()
if err != nil {
return UUID(""), err
}
return UUID(id.String()), nil
}
// ParseUUID returns a new UUID from the given string. If the string is not a
// valid uuid an error satisfying [errors.NotValid] will be returned.
func ParseUUID(value string) (UUID, error) {
if !uuid.IsValidUUIDString(value) {
return "", fmt.Errorf("id %q %w", value, errors.NotValid)
}
return UUID(value), nil
}
// String implements the stringer interface for UUID.
func (u UUID) String() string {
return string(u)
}
// Validate ensures the consistency of the UUID. If the uuid is invalid an error
// satisfying [errors.NotValid] will be returned.
func (u UUID) Validate() error {
if u == "" {
return fmt.Errorf("%wuuid cannot be empty", errors.Hide(errors.NotValid))
}
if !uuid.IsValidUUIDString(string(u)) {
return fmt.Errorf("uuid %q %w", u, errors.NotValid)
}
return nil
}
// Copyright 2021 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package secrets
import (
"encoding/base64"
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
"github.com/juju/errors"
"github.com/juju/utils/v4"
"gopkg.in/yaml.v2"
)
var keyRegExp = regexp.MustCompile("^([a-z](?:-?[a-z0-9]){2,})$")
// SecretData holds secret key values.
type SecretData map[string]string
const (
fileSuffix = "#file"
maxValueSizeBytes = 8 * 1024
maxContentSizeBytes = 64 * 1024
)
// CreateSecretData creates a secret data bag from a list of arguments.
// If a key has the #base64 suffix, then the value is already base64 encoded,
// otherwise the value is base64 encoded as it is added to the data bag.
func CreateSecretData(args []string) (SecretData, error) {
data := make(SecretData)
for _, val := range args {
// Remove any base64 padding ("=") before splitting the key=value.
stripped := strings.TrimRight(val, string(base64.StdPadding))
idx := strings.Index(stripped, "=")
if idx < 1 {
return nil, errors.NotValidf("key value %q", val)
}
keyVal := []string{
val[0:idx],
val[idx+1:],
}
key := keyVal[0]
value := keyVal[1]
if !strings.HasSuffix(key, fileSuffix) {
data[key] = value
continue
}
key = strings.TrimSuffix(key, fileSuffix)
path, err := utils.NormalizePath(value)
if err != nil {
return nil, errors.Trace(err)
}
fs, err := os.Stat(path)
if err == nil && fs.Size() > maxValueSizeBytes {
return nil, errors.Errorf("secret content in file %q too large: %d bytes", path, fs.Size())
}
content, err := os.ReadFile(value)
if err != nil {
return nil, errors.Annotatef(err, "reading content for secret key %q", key)
}
data[key] = string(content)
}
return encodeBase64(data)
}
// ReadSecretData reads secret data from a YAML or JSON file as key value pairs.
func ReadSecretData(f string) (SecretData, error) {
attrs := make(SecretData)
path, err := utils.NormalizePath(f)
if err != nil {
return nil, errors.Trace(err)
}
fs, err := os.Stat(path)
if err == nil && fs.Size() > maxContentSizeBytes {
return nil, errors.Errorf("secret content in file %q too large: %d bytes", path, fs.Size())
}
data, err := os.ReadFile(path)
if err != nil {
return nil, errors.Trace(err)
}
if err := json.Unmarshal(data, &attrs); err != nil {
err = yaml.Unmarshal(data, &attrs)
if err != nil {
return nil, errors.Trace(err)
}
}
return encodeBase64(attrs)
}
const base64Suffix = "#base64"
func encodeBase64(in SecretData) (SecretData, error) {
out := make(SecretData, len(in))
var contentSize int
for k, v := range in {
if len(v) > maxValueSizeBytes {
return nil, errors.Errorf("secret content for key %q too large: %d bytes", k, len(v))
}
contentSize += len(v)
if strings.HasSuffix(k, base64Suffix) {
k = strings.TrimSuffix(k, base64Suffix)
if !keyRegExp.MatchString(k) {
return nil, errors.NotValidf("key %q", k)
}
out[k] = v
continue
}
if !keyRegExp.MatchString(k) {
return nil, errors.NotValidf("key %q", k)
}
out[k] = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%v", v)))
}
if contentSize > maxContentSizeBytes {
return nil, errors.Errorf("secret content too large: %d bytes", contentSize)
}
return out, nil
}
// Copyright 2021 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package secrets
// SecretRole is an access role on a secret.
type SecretRole string
const (
RoleNone = SecretRole("")
RoleView = SecretRole("view")
RoleRotate = SecretRole("rotate")
RoleManage = SecretRole("manage")
)
// IsValid returns true if r is a valid secret role.
func (r SecretRole) IsValid() bool {
switch r {
case RoleNone, RoleView, RoleRotate, RoleManage:
return true
}
return false
}
func (r SecretRole) value() int {
switch r {
case RoleView:
return 1
case RoleRotate:
return 2
case RoleManage:
return 3
default:
return -1
}
}
func (r SecretRole) Allowed(wanted SecretRole) bool {
v1, v2 := r.value(), wanted.value()
if v1 < 0 || v2 < 0 {
return false
}
return v1 >= v2
}
// Copyright 2022 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package secrets
import "time"
// RotatePolicy defines a policy for how often
// to rotate a secret.
type RotatePolicy string
const (
RotateNever = RotatePolicy("never")
RotateHourly = RotatePolicy("hourly")
RotateDaily = RotatePolicy("daily")
RotateWeekly = RotatePolicy("weekly")
RotateMonthly = RotatePolicy("monthly")
RotateQuarterly = RotatePolicy("quarterly")
RotateYearly = RotatePolicy("yearly")
)
const (
// RotateRetryDelay is how long to wait to re-run the rotate hook
// if the secret was not updated.
RotateRetryDelay = 5 * time.Minute
// ExpireRetryDelay is how long to wait to re-run the expire hook
// if the expired secret revision was not removed.
ExpireRetryDelay = 5 * time.Minute
)
func (p RotatePolicy) String() string {
if p == "" {
return string(RotateNever)
}
return string(p)
}
// WillRotate returns true if the policy is not RotateNever.
func (p *RotatePolicy) WillRotate() bool {
return p != nil && *p != "" && *p != RotateNever
}
// IsValid returns true if p is a valid rotate policy.
func (p RotatePolicy) IsValid() bool {
switch p {
case RotateNever, RotateHourly, RotateDaily, RotateWeekly,
RotateMonthly, RotateQuarterly, RotateYearly:
return true
}
return false
}
// NextRotateTime returns when the policy dictates a secret should be next
// rotated given the last rotation time.
func (p RotatePolicy) NextRotateTime(lastRotated time.Time) *time.Time {
var result time.Time
switch p {
case RotateNever:
return nil
case RotateHourly:
result = lastRotated.Add(time.Hour)
case RotateDaily:
result = lastRotated.AddDate(0, 0, 1)
case RotateWeekly:
result = lastRotated.AddDate(0, 0, 7)
case RotateMonthly:
result = lastRotated.AddDate(0, 1, 0)
case RotateQuarterly:
result = lastRotated.AddDate(0, 3, 0)
case RotateYearly:
result = lastRotated.AddDate(1, 0, 0)
}
return &result
}
// Copyright 2021 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package secrets
import (
"fmt"
"net/url"
"regexp"
"strings"
"time"
"github.com/juju/errors"
"github.com/rs/xid"
)
// SecretConfig is used when creating a secret.
type SecretConfig struct {
RotatePolicy *RotatePolicy
NextRotateTime *time.Time
ExpireTime *time.Time
Description *string
Label *string
Params map[string]interface{}
}
// Validate returns an error if params are invalid.
func (c *SecretConfig) Validate() error {
if c.RotatePolicy != nil && !c.RotatePolicy.IsValid() {
return errors.NotValidf("secret rotate policy %q", c.RotatePolicy)
}
if c.RotatePolicy.WillRotate() && c.NextRotateTime == nil {
return errors.New("cannot specify a secret rotate policy without a next rotate time")
}
if !c.RotatePolicy.WillRotate() && c.NextRotateTime != nil {
return errors.New("cannot specify a secret rotate time without a rotate policy")
}
return nil
}
// URI represents a reference to a secret.
type URI struct {
SourceUUID string
ID string
}
const (
idSnippet = `[0-9a-z]{20}`
uuidSnippet = `[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}`
// SecretScheme is the URL prefix for a secret.
SecretScheme = "secret"
)
var validUUID = regexp.MustCompile(uuidSnippet)
var secretURIParse = regexp.MustCompile(`^` +
fmt.Sprintf(`((?P<source>%s)/)?(?P<id>%s)`, uuidSnippet, idSnippet) +
`$`)
// ParseURI parses the specified string into a URI.
func ParseURI(str string) (*URI, error) {
u, err := url.Parse(str)
if err != nil {
return nil, errors.Trace(err)
}
if u.Scheme == "" {
u.Scheme = SecretScheme
} else if u.Scheme != SecretScheme {
return nil, errors.NotValidf("secret URI scheme %q", u.Scheme)
}
if u.Host != "" && !validUUID.MatchString(u.Host) {
return nil, errors.NotValidf("host controller UUID %q", u.Host)
}
idStr := strings.TrimLeft(u.Path, "/")
if idStr == "" {
idStr = u.Opaque
}
valid := secretURIParse.MatchString(idStr)
if !valid {
return nil, errors.NotValidf("secret URI %q", str)
}
sourceUUID := secretURIParse.ReplaceAllString(idStr, "$source")
if sourceUUID == "" {
sourceUUID = u.Host
}
idPart := secretURIParse.ReplaceAllString(idStr, "$id")
id, err := xid.FromString(idPart)
if err != nil {
return nil, errors.NotValidf("secret URI %q", str)
}
result := &URI{
SourceUUID: sourceUUID,
ID: id.String(),
}
return result, nil
}
// NewURI returns a new secret URI.
func NewURI() *URI {
return &URI{
ID: xid.New().String(),
}
}
// WithSource returns a secret URI with the source.
func (u *URI) WithSource(uuid string) *URI {
u.SourceUUID = uuid
return u
}
// IsLocal returns true if this URI is local
// to the specified uuid.
func (u *URI) IsLocal(sourceUUID string) bool {
return u.SourceUUID == "" || u.SourceUUID == sourceUUID
}
// Name generates the secret name.
func (u URI) Name(revision int) string {
return fmt.Sprintf("%s-%d", u.ID, revision)
}
// String prints the URI as a string.
func (u *URI) String() string {
if u == nil {
return ""
}
var fullPath []string
fullPath = append(fullPath, u.ID)
str := strings.Join(fullPath, "/")
if u.SourceUUID == "" {
urlValue := url.URL{
Scheme: SecretScheme,
Opaque: str,
}
return urlValue.String()
}
urlValue := url.URL{
Scheme: SecretScheme,
Host: u.SourceUUID,
Path: str,
}
return urlValue.String()
}
// OwnerKind represents the kind of a secret owner entity.
type OwnerKind string
// These represent the kinds of secret owner.
const (
ApplicationOwner OwnerKind = "application"
UnitOwner OwnerKind = "unit"
ModelOwner OwnerKind = "model"
)
// Owner is the owner of a secret.
type Owner struct {
Kind OwnerKind
ID string
}
func (o Owner) String() string {
return fmt.Sprintf("%s-%s", o.Kind, strings.ReplaceAll(o.ID, "/", "-"))
}
// SecretMetadata holds metadata about a secret.
type SecretMetadata struct {
// Read only after creation.
URI *URI
// Version starts at 1 and is incremented
// whenever an incompatible change is made.
Version int
// These can be updated after creation.
Description string
Label string
RotatePolicy RotatePolicy
// Set by service on creation/update.
// Owner is the entity which created the secret.
Owner Owner
CreateTime time.Time
UpdateTime time.Time
// These are denormalised here for ease of access.
// LatestRevision is the most recent secret revision.
LatestRevision int
// LatestRevisionChecksum is the checksum of the most
// recent revision content.
LatestRevisionChecksum string
// LatestExpireTime is the expire time of the most recent revision.
LatestExpireTime *time.Time
// NextRotateTime is when the secret should be rotated.
NextRotateTime *time.Time
// AutoPrune is true if the secret revisions should be pruned when it's not been used.
AutoPrune bool
// Access is a list of access information for this secret.
Access []AccessInfo
}
// AccessInfo holds info about a secret access information.
type AccessInfo struct {
Target string
Scope string
Role SecretRole
}
// AccessorKind represents the kind of a secret accessor entity.
type AccessorKind string
// These represent the kinds of secret accessor.
const (
UnitAccessor AccessorKind = "unit"
ModelAccessor AccessorKind = "model"
)
// Accessor is the accessor of a secret.
type Accessor struct {
Kind AccessorKind
ID string
}
func (a Accessor) String() string {
return fmt.Sprintf("%s-%s", a.Kind, strings.ReplaceAll(a.ID, "/", "-"))
}
// SecretRevisionRef is a reference to a secret revision
// stored in a secret backend.
type SecretRevisionRef struct {
URI *URI
RevisionID string
}
// SecretRevisionMetadata holds metadata about a secret revision.
type SecretRevisionMetadata struct {
Revision int
ValueRef *ValueRef
BackendName *string
CreateTime time.Time
UpdateTime time.Time
ExpireTime *time.Time
}
// SecretOwnerMetadata holds a secret metadata and any backend references of revisions.
type SecretOwnerMetadata struct {
Metadata SecretMetadata
Revisions []int
}
// SecretExternalRevision holds metadata about an external secret revision.
type SecretExternalRevision struct {
Revision int
ValueRef *ValueRef
}
// SecretMetadataForDrain holds a secret metadata and any backend references of revisions for drain.
type SecretMetadataForDrain struct {
URI *URI
Revisions []SecretExternalRevision
}
// SecretConsumerMetadata holds metadata about a secret
// for a consumer of the secret.
type SecretConsumerMetadata struct {
// Label is used when notifying the consumer
// about changes to the secret.
Label string
// CurrentRevision is current revision the
// consumer wants to read.
CurrentRevision int
}
// SecretRevisionInfo holds info used to read a secret vale.
type SecretRevisionInfo struct {
LatestRevision int
Label string
}
// Filter is used when querying secrets.
type Filter struct {
URI *URI
Label *string
Revision *int
Owner *Owner
}
// Copyright 2022 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package secrets
import (
"fmt"
"time"
"github.com/juju/errors"
"github.com/juju/utils/v4"
)
// IsInternalSecretBackendID returns true if the supplied backend ID is the internal backend ID.
func IsInternalSecretBackendID(backendID string) bool {
// TODO: Fix me!!! This is not correct anymore because secret backend IDs now are all UUIDs.
return utils.IsValidUUIDString(backendID)
}
// SecretBackend defines a secrets backend.
type SecretBackend struct {
ID string
Name string
BackendType string
TokenRotateInterval *time.Duration
Config map[string]interface{}
}
// ValueRef represents a reference to a secret
// content value stored in a backend.
type ValueRef struct {
BackendID string
RevisionID string
}
func (r *ValueRef) String() string {
return fmt.Sprintf("%s:%s", r.BackendID, r.RevisionID)
}
// NextBackendRotateTime returns the next time a token rotate is due,
// given the supplied rotate interval.
func NextBackendRotateTime(now time.Time, rotateInterval time.Duration) (*time.Time, error) {
if rotateInterval > 0 && rotateInterval < time.Hour {
return nil, errors.NotValidf("token rotate interval %q less than 1h", rotateInterval)
}
// Rotate a reasonable time before the token is due to expire.
const maxInterval = 24 * time.Hour
nextInterval := time.Duration(0.75*rotateInterval.Seconds()) * time.Second
if nextInterval > maxInterval {
nextInterval = maxInterval
}
when := now.Add(nextInterval)
return &when, nil
}
// Copyright 2021 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package secrets
import (
"bytes"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"encoding/json"
"io"
"strings"
"github.com/juju/errors"
)
// SecretValue holds the value of a secret.
// Instances of SecretValue are returned by a secret store
// when a secret look up is performed. The underlying value
// is a map of base64 encoded values represented as []byte.
type SecretValue interface {
// EncodedValues returns the key values of a secret as
// the raw base64 encoded strings.
// For the special case where the secret only has a
// single key value "data", then use BinaryValue()
//to get the result.
EncodedValues() map[string]string
// Values returns the key values of a secret as strings.
// For the special case where the secret only has a
// single key value "data", then use StringValue()
//to get the result.
Values() (map[string]string, error)
// KeyValue returns the specified secret value for the key.
// If the key has a #base64 suffix, the returned value is base64 encoded.
KeyValue(string) (string, error)
// IsEmpty checks if the value is empty.
IsEmpty() bool
// Checksum is the checksum of the secret content.
Checksum() (string, error)
}
type secretValue struct {
// Data holds the key values of a secret.
// We use a map to hold multiple values, eg cert and key
// The serialised form of any string values is a
// base64 encoded string, representing arbitrary values.
data map[string][]byte
}
// NewSecretValue returns a secret using the specified map of values.
// The map values are assumed to be already base64 encoded.
func NewSecretValue(data map[string]string) SecretValue {
dataCopy := make(map[string][]byte, len(data))
for k, v := range data {
dataCopy[k] = append([]byte(nil), v...)
}
return &secretValue{data: dataCopy}
}
// NewSecretBytes returns a secret using the specified map of values.
// The map values are assumed to be already base64 encoded.
func NewSecretBytes(data map[string][]byte) SecretValue {
dataCopy := make(map[string][]byte, len(data))
for k, v := range data {
dataCopy[k] = append([]byte(nil), v...)
}
return &secretValue{data: dataCopy}
}
// IsEmpty checks if the value is empty.
func (v secretValue) IsEmpty() bool {
return len(v.data) == 0
}
// EncodedValues implements SecretValue.
func (v secretValue) EncodedValues() map[string]string {
dataCopy := make(map[string]string, len(v.data))
for k, val := range v.data {
dataCopy[k] = string(val)
}
return dataCopy
}
// Values implements SecretValue.
func (v secretValue) Values() (map[string]string, error) {
dataCopy := v.EncodedValues()
for k, v := range dataCopy {
data, err := base64.StdEncoding.DecodeString(v)
if err != nil {
return nil, errors.Trace(err)
}
dataCopy[k] = string(data)
}
return dataCopy, nil
}
// KeyValue implements SecretValue.
func (v secretValue) KeyValue(key string) (string, error) {
useBase64 := false
if strings.HasSuffix(key, base64Suffix) {
key = strings.TrimSuffix(key, base64Suffix)
useBase64 = true
}
val, ok := v.data[key]
if !ok {
return "", errors.NotFoundf("secret key value %q", key)
}
// The stored value is always base64 encoded.
if useBase64 {
return string(val), nil
}
b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewReader(val))
result, err := io.ReadAll(b64)
if err != nil {
return "", errors.Trace(err)
}
return string(result), nil
}
// Checksum implements SecretValue.
func (v secretValue) Checksum() (string, error) {
data, err := json.Marshal(v.EncodedValues())
if err != nil {
return "", errors.Trace(err)
}
hash := sha256.New()
_, err = hash.Write(data)
if err != nil {
return "", errors.Trace(err)
}
return hex.EncodeToString(hash.Sum(nil)), nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package status
// UnitDisplayStatus is used for CAAS units where the status of the unit
// could be overridden by the status of the container.
func UnitDisplayStatus(unitStatus, containerStatus StatusInfo) StatusInfo {
if unitStatus.Status == Terminated {
return unitStatus
}
if containerStatus.Status == Terminated {
return containerStatus
}
if containerStatus.Status == "" {
// No container update received from k8s yet.
// Unit may have set status, in which case use it.
if isStatusModified(unitStatus) {
return unitStatus
}
// If no unit status set, assume still allocating.
return StatusInfo{
Status: Waiting,
Message: unitStatus.Message,
Since: containerStatus.Since,
}
}
if unitStatus.Status != Active && unitStatus.Status != Waiting && unitStatus.Status != Blocked {
// Charm has said that there's a problem (error) or
// it's doing something (maintenance) so we'll stick with that.
return unitStatus
}
// Charm may think it's active, but as yet there's no way for it to
// query the workload state, so we'll ensure that we only say that
// it's active if the pod is reported as running. If not, we'll report
// any pod error.
switch containerStatus.Status {
case Error, Blocked, Allocating:
return containerStatus
case Waiting:
if unitStatus.Status == Active {
return containerStatus
}
case Running:
// Unit hasn't moved from initial state.
// thumper: I find this questionable, at best it is Unknown.
if !isStatusModified(unitStatus) {
return containerStatus
}
}
return unitStatus
}
func isStatusModified(unitStatus StatusInfo) bool {
return (unitStatus.Status != "" && unitStatus.Status != Waiting) ||
(unitStatus.Message != MessageWaitForContainer &&
unitStatus.Message != MessageInitializingAgent &&
unitStatus.Message != MessageInstallingAgent)
}
// Copyright 2025 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package status
// IsMachinePresent returns true if the machine is started.
func IsMachinePresent(status StatusInfo) bool {
// This traps the known machine status codes, but if the status isn't
// recognised, we assume the machine is not present.
switch status.Status {
case Started:
return true
case Pending, Down, Stopped, Error, Unknown:
return false
default:
return false
}
}
// IsInstancePresent returns true if the instance is running.
func IsInstancePresent(status StatusInfo) bool {
// This traps the known instance status codes, but if the status isn't
// recognised, we assume the instance is not present.
switch status.Status {
case Running:
return true
case Empty, Allocating, Error, ProvisioningError, Unknown:
return false
default:
return false
}
}
// IsAgentPresent returns true if the agent is idle or executing.
func IsAgentPresent(status StatusInfo) bool {
// This traps the known agent status codes, but if the status isn't
// recognised, we assume the agent is not present.
switch status.Status {
case Idle, Executing:
return true
case Allocating, Error, Failed, Rebooting:
return false
default:
return false
}
}
// IsUnitWorkloadPresent returns true if the unit workload is active, or is
// in a state where it is expected to become active.
func IsUnitWorkloadPresent(status StatusInfo) bool {
// This traps the known workload status codes, but if the status isn't
// recognised, we assume the workload is not present.
switch status.Status {
case Active:
return true
case Maintenance:
switch status.Message {
case MessageInstallingCharm:
return false
}
return true
case Waiting:
switch status.Message {
case MessageWaitForMachine,
MessageInstallingAgent,
MessageInitializingAgent:
return false
}
return true
case Blocked, Error, Terminated, Unknown:
return false
default:
return false
}
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package status
import (
"time"
)
// Status used to represent the status of an entity, but has recently become
// and applies to "workloads" as well, which we don't currently model, for no
// very clear reason.
//
// Status values currently apply to machine (agents), unit (agents), unit
// (workloads), application (workloads), volumes, filesystems, and models.
type Status string
// String returns a string representation of the Status.
func (s Status) String() string {
return string(s)
}
// StatusInfo holds a Status and associated information.
type StatusInfo struct {
Status Status
Message string
Data map[string]interface{}
Since *time.Time
}
// StatusSetter represents a type whose status can be set.
type StatusSetter interface {
SetStatus(StatusInfo) error
}
// StatusGetter represents a type whose status can be read.
type StatusGetter interface {
Status() (StatusInfo, error)
}
// ModificationStatusGetter represents a type whose modification status can be
// read.
type ModificationStatusGetter interface {
ModificationStatus() (StatusInfo, error)
}
const (
// Status values common to machine and unit agents.
// Error means the entity requires human intervention
// in order to operate correctly.
Error Status = "error"
// Started is set when:
// The entity is actively participating in the model.
// For unit agents, this is a state we preserve for backwards
// compatibility with scripts during the life of Juju 1.x.
// In Juju 2.x, the agent-state will remain “active” and scripts
// will watch the unit-state instead for signals of application readiness.
Started Status = "started"
)
const (
// Status values specific to machine agents.
// Pending is set when:
// The machine is not yet participating in the model.
Pending Status = "pending"
// Stopped is set when:
// The machine's agent will perform no further action, other than
// to set the unit to Dead at a suitable moment.
Stopped Status = "stopped"
// Down is set when:
// The machine ought to be signalling activity, but it cannot be
// detected.
Down Status = "down"
)
const (
// Status values specific to unit agents.
// Allocating is set when:
// The machine on which a unit is to be hosted is still being
// spun up in the cloud.
Allocating Status = "allocating"
// Rebooting is set when:
// The machine on which this agent is running is being rebooted.
// The juju-agent should move from rebooting to idle when the reboot is complete.
Rebooting Status = "rebooting"
// Executing is set when:
// The agent is running a hook or action. The human-readable message should reflect
// which hook or action is being run.
Executing Status = "executing"
// Idle is set when:
// Once the agent is installed and running it will notify the Juju server and its state
// becomes "idle". It will stay "idle" until some action (e.g. it needs to run a hook) or
// error (e.g it loses contact with the Juju server) moves it to a different state.
Idle Status = "idle"
// Failed is set when:
// The unit agent has failed in some way,eg the agent ought to be signalling
// activity, but it cannot be detected. It might also be that the unit agent
// detected an unrecoverable condition and managed to tell the Juju server about it.
Failed Status = "failed"
// Lost is set when:
// The juju agent has not communicated with the juju server for an unexpectedly long time;
// the unit agent ought to be signalling activity, but none has been detected.
Lost Status = "lost"
)
const (
// Status values specific to applications and units, reflecting the
// state of the software itself.
// Unset is only for applications, and is a placeholder status.
// The core/cache package deals with aggregating the unit status
// to the application level.
Unset Status = "unset"
// Maintenance is set when:
// The unit is not yet providing services, but is actively doing stuff
// in preparation for providing those services.
// This is a "spinning" state, not an error state.
// It reflects activity on the unit itself, not on peers or related units.
Maintenance Status = "maintenance"
// Terminated is set when:
// This unit used to exist, we have a record of it (perhaps because of storage
// allocated for it that was flagged to survive it). Nonetheless, it is now gone.
Terminated Status = "terminated"
// Unknown is set when:
// A unit-agent has finished calling install, config-changed, and start,
// but the charm has not called status-set yet.
Unknown Status = "unknown"
// Waiting is set when:
// The unit is unable to progress to an active state because an application to
// which it is related is not running.
Waiting Status = "waiting"
// Blocked is set when:
// The unit needs manual intervention to get back to the Running state.
Blocked Status = "blocked"
// Active is set when:
// The unit believes it is correctly offering all the services it has
// been asked to offer.
Active Status = "active"
)
const (
// Status values specific to storage.
// Attaching indicates that the storage is being attached
// to a machine.
Attaching Status = "attaching"
// Attached indicates that the storage is attached to a
// machine.
Attached Status = "attached"
// Detaching indicates that the storage is being detached
// from a machine.
Detaching Status = "detaching"
// Detached indicates that the storage is not attached to
// any machine.
Detached Status = "detached"
)
const (
// Status values specific to models.
// Available indicates that the model is available for use.
Available Status = "available"
// Busy indicates that the model is not available for use because it is
// running a process that must take the model offline, such as a migration,
// upgrade, or backup. This is a spinning state, it is not an error state,
// and it should be expected that the model will eventually go back to
// available.
Busy Status = "busy"
)
const (
// Status values specific to relations.
// Joining is used to signify that a relation should become joined soon.
Joining Status = "joining"
// Joined is the normal status for a healthy, alive relation.
Joined Status = "joined"
// Broken is the status for when a relation life goes to Dead.
Broken Status = "broken"
// Suspending is used to signify that a relation will be temporarily broken
// pending action to resume it.
Suspending Status = "suspending"
// Suspended is used to signify that a relation is temporarily broken pending
// action to resume it.
Suspended Status = "suspended"
)
const (
// Status values that are common to several entities.
// Destroying indicates that the entity is being destroyed.
//
// This is valid for volumes, filesystems, and models.
Destroying Status = "destroying"
)
// InstanceStatus
const (
Empty Status = ""
Provisioning Status = "allocating"
Running Status = "running"
ProvisioningError Status = "provisioning error"
)
// ModificationStatus
const (
Applied Status = "applied"
)
const (
MessageWaitForMachine = "waiting for machine"
MessageWaitForContainer = "waiting for container"
MessageInstallingAgent = "installing agent"
MessageInitializingAgent = "agent initialising"
MessageInstallingCharm = "installing charm software"
)
// KnownModificationStatus returns true if the status has a known value for
// a modification of an instance.
func (s Status) KnownModificationStatus() bool {
switch s {
case
Idle,
Applied,
Error,
Unknown:
return true
}
return false
}
// KnownInstanceStatus returns true if status has a known value for a machine
// cloud instance.
func (s Status) KnownInstanceStatus() bool {
switch s {
case
Pending,
ProvisioningError,
Allocating,
Running,
Error,
Unknown:
return true
}
return false
}
// KnownMachineStatus returns true if status has a known value for a machine.
func (s Status) KnownMachineStatus() bool {
switch s {
case
Error,
Started,
Pending,
Stopped,
Down:
return true
}
return false
}
// KnownAgentStatus returns true if status has a known value for an agent.
// It includes every status that has ever been valid for a unit or machine agent.
// This is used by the apiserver client facade to filter out unknown values.
func (s Status) KnownAgentStatus() bool {
switch s {
case
Allocating,
Error,
Failed,
Rebooting,
Executing,
Idle:
return true
}
return false
}
// KnownWorkloadStatus returns true if status has a known value for a workload.
// It includes every status that has ever been valid for a unit agent.
// This is used by the apiserver client facade to filter out unknown values.
func (s Status) KnownWorkloadStatus() bool {
if ValidWorkloadStatus(s) {
return true
}
switch s {
case Error: // include error so that we can filter on what the spec says is valid
return true
default:
return false
}
}
// ValidWorkloadStatus returns true if status has a valid value (that is to say,
// a value that it's OK to set) for units or applications.
func ValidWorkloadStatus(status Status) bool {
switch status {
case
Blocked,
Maintenance,
Waiting,
Active,
Unknown,
Terminated:
return true
default:
return false
}
}
// WorkloadMatches returns true if the candidate matches status,
// taking into account that the candidate may be a legacy
// status value which has been deprecated.
func (s Status) WorkloadMatches(candidate Status) bool {
return s == candidate
}
// ValidModelStatus returns true if status has a valid value (that is to say,
// a value that it's OK to set) for models.
func ValidModelStatus(status Status) bool {
switch status {
case
Available,
Busy,
Destroying,
Suspended, // For model, this means that its cloud credential is invalid and model will not be doing any cloud calls.
Error:
return true
default:
return false
}
}
// Matches returns true if the candidate matches status,
// taking into account that the candidate may be a legacy
// status value which has been deprecated.
func (s Status) Matches(candidate Status) bool {
return s == candidate
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package status
import (
"time"
"github.com/juju/collections/set"
"github.com/juju/errors"
)
// StatusHistoryFilter holds arguments that can be use to filter a status history backlog.
type StatusHistoryFilter struct {
// Size indicates how many results are expected at most.
Size int
// FromDate indicates the earliest date from which logs are expected.
FromDate *time.Time
// Delta indicates the age of the oldest log expected.
Delta *time.Duration
// Exclude indicates the status messages that should be excluded
// from the returned result.
Exclude set.Strings
}
// Validate checks that the minimum requirements of a StatusHistoryFilter are met.
func (f *StatusHistoryFilter) Validate() error {
s := f.Size > 0
t := f.FromDate != nil
d := f.Delta != nil
switch {
case !(s || t || d):
return errors.NotValidf("missing filter parameters")
case s && t:
return errors.NotValidf("Size and Date together")
case s && d:
return errors.NotValidf("Size and Delta together")
case t && d:
return errors.NotValidf("Date and Delta together")
}
return nil
}
// InstanceStatusHistoryGetter instances can fetch their instance status history.
type InstanceStatusHistoryGetter interface {
InstanceStatusHistory(filter StatusHistoryFilter) ([]StatusInfo, error)
}
// DetailedStatus holds status info about a machine or unit agent.
type DetailedStatus struct {
Status Status
Info string
Data map[string]interface{}
Since *time.Time
Kind HistoryKind
}
// History holds many DetailedStatus,
type History []DetailedStatus
// HistoryKind represents the possible types of
// status history entries.
type HistoryKind string
// IMPORTANT DEV NOTE: when changing this HistoryKind list in any way, these may need to be revised:
//
// * HistoryKind.Valid()
// * AllHistoryKind()
// * command help for 'show-status-log' describing these kinds.
const (
// KindModel represents the model itself.
KindModel HistoryKind = "model"
// KindApplication represents an entry for an application.
KindApplication HistoryKind = "application"
// KindSAAS represents an entry for a saas application.
KindSAAS HistoryKind = "saas"
// KindUnit represents agent and workload combined.
KindUnit HistoryKind = "unit"
// KindUnitAgent represent a unit agent status history entry.
KindUnitAgent HistoryKind = "juju-unit"
// KindWorkload represents a charm workload status history entry.
KindWorkload HistoryKind = "workload"
// KindMachineInstance represents an entry for a machine instance.
KindMachineInstance HistoryKind = "machine"
// KindMachine represents an entry for a machine agent.
KindMachine HistoryKind = "juju-machine"
// KindContainerInstance represents an entry for a container instance.
KindContainerInstance HistoryKind = "container"
// KindContainer represents an entry for a container agent.
KindContainer HistoryKind = "juju-container"
)
// String returns a string representation of the HistoryKind.
func (k HistoryKind) String() string {
return string(k)
}
// Valid will return true if the current kind is a valid one.
func (k HistoryKind) Valid() bool {
switch k {
case KindModel, KindUnit, KindUnitAgent, KindWorkload,
KindApplication, KindSAAS,
KindMachineInstance, KindMachine,
KindContainerInstance, KindContainer:
return true
}
return false
}
// AllHistoryKind will return all valid HistoryKinds.
func AllHistoryKind() map[HistoryKind]string {
return map[HistoryKind]string{
KindModel: "statuses for the model itself",
KindApplication: "statuses for the specified application",
KindSAAS: "statuses for the specified SAAS application",
KindUnit: "statuses for specified unit and its workload",
KindUnitAgent: "statuses from the agent that is managing a unit",
KindWorkload: "statuses for unit's workload",
KindMachineInstance: "statuses that occur due to provisioning of a machine",
KindMachine: "status of the agent that is managing a machine",
KindContainerInstance: "statuses from the agent that is managing containers",
KindContainer: "statuses from the containers only and not their host machines",
}
}
// Copyright 2023 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package trace
import (
"strconv"
)
// Attribute allows you to add additional information to help identify
// an operation (event, error or span end).
type Attribute interface {
Key() string
Value() string
}
// StringAttribute defines an attribute with a string value.
type StringAttribute struct {
key, value string
}
// StringAttr creates a StringAttribute.
func StringAttr(key, value string) StringAttribute {
return StringAttribute{key: key, value: value}
}
// Key defines the identifier for the attribute.
func (a StringAttribute) Key() string {
return a.key
}
// Value returns a string.
func (a StringAttribute) Value() string {
return a.value
}
// IntAttribute defines an attribute with a string value.
type IntAttribute struct {
key, value string
}
// IntAttr creates a IntAttribute.
func IntAttr(key string, value int) IntAttribute {
return IntAttribute{key: key, value: strconv.Itoa(value)}
}
// Key defines the identifier for the attribute.
func (a IntAttribute) Key() string {
return a.key
}
// Value returns a string.
func (a IntAttribute) Value() string {
return a.value
}
// Int64Attribute defines an attribute with a string value.
type Int64Attribute struct {
key, value string
}
// Int64Attr creates a Int64Attribute.
func Int64Attr(key string, value int64) Int64Attribute {
return Int64Attribute{key: key, value: strconv.FormatInt(value, 10)}
}
// Key defines the identifier for the attribute.
func (a Int64Attribute) Key() string {
return a.key
}
// Value returns a string.
func (a Int64Attribute) Value() string {
return a.value
}
// Copyright 2023 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package trace
import (
"context"
)
type contextKey string
const (
traceContextKey contextKey = "trace"
spanContextKey contextKey = "span"
traceIDContextKey contextKey = "traceID"
spanIDContextKey contextKey = "spanID"
traceFlagsContextKey contextKey = "traceFlags"
)
// TracerFromContext returns a tracer from the context. If no tracer is found,
// an empty tracer is returned.
func TracerFromContext(ctx context.Context) (Tracer, bool) {
value := ctx.Value(traceContextKey)
if value == nil {
return NoopTracer{}, false
}
tracer, ok := value.(Tracer)
if !ok {
return NoopTracer{}, false
}
return tracer, tracer.Enabled()
}
// SpanFromContext returns a span from the context. If no span is found,
// an empty span is returned.
func SpanFromContext(ctx context.Context) Span {
value := ctx.Value(spanContextKey)
if value == nil {
return NoopSpan{}
}
span, ok := value.(Span)
if !ok {
return NoopSpan{}
}
return span
}
// WithTracer returns a new context with the given tracer.
func WithTracer(ctx context.Context, tracer Tracer) context.Context {
if tracer == nil {
tracer = NoopTracer{}
}
return context.WithValue(ctx, traceContextKey, tracer)
}
// InjectTracerIfRequired returns a new context with the given tracer if one
// isn't already set on the context.
func InjectTracerIfRequired(ctx context.Context, tracer Tracer) context.Context {
// If the tracer is nil, we'll just pass back the context, as that will
// either have a tracer or it won't. Using nil tracer could invalidate the
// parent one, so just send it back.
if tracer == nil {
return ctx
}
// If the parent tracer is parent tracer is not nil, then use that one.
if value := ctx.Value(traceContextKey); value != nil {
return ctx
}
// If the tracer isn't already found, then inject the new one.
return context.WithValue(ctx, traceContextKey, tracer)
}
// WithSpan returns a new context with the given span.
func WithSpan(ctx context.Context, span Span) context.Context {
if span == nil {
span = NoopSpan{}
}
return context.WithValue(ctx, spanContextKey, span)
}
// WithTraceScope returns a new context with the given trace scope (traceID and
// spanID).
func WithTraceScope(ctx context.Context, traceID, spanID string, flags int) context.Context {
ctx = context.WithValue(ctx, traceFlagsContextKey, flags)
ctx = context.WithValue(ctx, spanIDContextKey, spanID)
return context.WithValue(ctx, traceIDContextKey, traceID)
}
// RemoveTraceScope returns a new context without the trace scope.
func RemoveTraceScope(ctx context.Context) context.Context {
ctx = context.WithValue(ctx, traceFlagsContextKey, nil)
ctx = context.WithValue(ctx, spanIDContextKey, nil)
return context.WithValue(ctx, traceIDContextKey, nil)
}
// ScopeFromContext returns the traceID, spanID and the flags from the context.
// Both traceID and spanID can be in the form of a hex string or a raw
// string.
func ScopeFromContext(ctx context.Context) (string, string, int, bool) {
// You must have all to have one.
trace := ctx.Value(traceIDContextKey)
if trace == nil {
return "", "", 0, false
}
traceID, ok := trace.(string)
if !ok {
return "", "", 0, false
}
span := ctx.Value(spanIDContextKey)
if span == nil {
return "", "", 0, false
}
spanID, ok := span.(string)
if !ok {
return "", "", 0, false
}
flags, ok := ctx.Value(traceFlagsContextKey).(int)
return traceID, spanID, flags, ok && traceID != "" && spanID != ""
}
// TraceIDFromContext returns the traceID from the context.
func TraceIDFromContext(ctx context.Context) (string, bool) {
trace := ctx.Value(traceIDContextKey)
traceID, ok := trace.(string)
return traceID, ok && traceID != ""
}
// WithTraceID returns a new context with the given trace ID.
func WithTraceID(ctx context.Context, traceID string) context.Context {
return context.WithValue(ctx, traceIDContextKey, traceID)
}
// NoopTracer is a tracer that does nothing.
type NoopTracer struct{}
func (NoopTracer) Start(ctx context.Context, name string, options ...Option) (context.Context, Span) {
return ctx, NoopSpan{}
}
func (NoopTracer) Enabled() bool {
return false
}
// NoopSpan is a span that does nothing.
type NoopSpan struct{}
// Scope returns the scope of the span.
func (NoopSpan) Scope() Scope {
return NoopScope{}
}
// AddEvent will record an event for this span. This is a manual mechanism
// for recording an event, it is useful to log information about what
// happened during the lifetime of a span.
// This is not the same as a log attached to a span, unfortunately the
// OpenTelemetry API does not have a way to record logs yet.
func (NoopSpan) AddEvent(string, ...Attribute) {}
// RecordError will record err as an exception span event for this span. If
// this span is not being recorded or err is nil then this method does
// nothing.
// The attributes is lazy and only called if the span is recording.
func (NoopSpan) RecordError(error, ...Attribute) {}
// End completes the Span. The Span is considered complete and ready to be
// delivered through the rest of the telemetry pipeline after this method
// is called. Therefore, updates to the Span are not allowed after this
// method has been called.
func (NoopSpan) End(...Attribute) {}
// NoopScope is a scope that does nothing.
type NoopScope struct{}
// TraceID returns the trace ID of the span.
func (NoopScope) TraceID() string {
return ""
}
// SpanID returns the span ID of the span.
func (NoopScope) SpanID() string {
return ""
}
// TraceFlags returns the trace flags of the span.
func (NoopScope) TraceFlags() int {
return 0
}
// IsSampled returns if the span is sampled.
func (NoopScope) IsSampled() bool {
return false
}
// Copyright 2023 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package trace
import (
"context"
"fmt"
"runtime"
"strings"
"github.com/juju/errors"
"github.com/juju/names/v6"
)
const (
// OTELTraceID is the trace ID key used in the go label.
OTELTraceID = "otel.traceid"
)
const (
// ErrTracerDying is used to indicate to *third parties* that the
// tracer worker is dying, instead of catacomb.ErrDying, which is
// unsuitable for propagating inter-worker.
// This error indicates to consuming workers that their dependency has
// become unmet and a restart by the dependency engine is imminent.
ErrTracerDying = errors.ConstError("tracer worker is dying")
)
const (
controllerNamespace = "controller"
)
// Option are options that can be passed to the Tracer.Start() method.
type Option func(*TracerOption)
// TracerOption is an option that can be passed to the Tracer.Start() method.
type TracerOption struct {
attributes []Attribute
stackTrace bool
}
// Attributes returns a slice of attributes for creating a span.
func (t *TracerOption) Attributes() []Attribute {
return t.attributes
}
// StackTrace returns if the stack trace is enabled on the span on errors.
func (t *TracerOption) StackTrace() bool {
return t.stackTrace
}
// WithAttributes returns a Option that sets the attributes on the span.
func WithAttributes(attributes ...Attribute) Option {
return func(o *TracerOption) {
o.attributes = attributes
}
}
// WithStackTrace returns a Option that sets the stack trace on the span.
func WithStackTrace() Option {
return func(o *TracerOption) {
o.stackTrace = true
}
}
// NewTracerOptions returns a new tracerOption.
func NewTracerOptions() *TracerOption {
return &TracerOption{
stackTrace: true,
}
}
// Tracer is the interface that all tracers must implement.
type Tracer interface {
// Start creates a span and a context.Context containing the newly-created
// span.
//
// If the context.Context provided in `ctx` contains a Span then the
// newly-created Span will be a child of that span, otherwise it will be a
// root span.
//
// Any Span that is created MUST also be ended. This is the responsibility
// of the user. Implementations of this API may leak memory or other
// resources if Spans are not ended.
Start(context.Context, string, ...Option) (context.Context, Span)
// Enabled returns if the tracer is enabled.
Enabled() bool
}
// Span is the individual component of a trace. It represents a single named
// and timed operation of a workflow that is traced. A Tracer is used to
// create a Span and it is then up to the operation the Span represents to
// properly end the Span when the operation itself ends.
type Span interface {
// Scope returns the scope of the span. This is useful for identifying
// the trace and span ID.
Scope() Scope
// AddEvent will record an event for this span. This is a manual mechanism
// for recording an event, it is useful to log information about what
// happened during the lifetime of a span.
// This is not the same as a log attached to a span, unfortunately the
// OpenTelemetry API does not have a way to record logs yet.
AddEvent(string, ...Attribute)
// RecordError will record err as an exception span event for this span. If
// this span is not being recorded or err is nil then this method does
// nothing.
// The attributes is lazy and only called if the span is recording.
RecordError(error, ...Attribute)
// End completes the Span. The Span is considered complete and ready to be
// delivered through the rest of the telemetry pipeline after this method
// is called. Therefore, updates to the Span are not allowed after this
// method has been called.
End(...Attribute)
}
// Scope is the scope of the span.
type Scope interface {
// TraceID returns the trace ID of the span.
TraceID() string
// SpanID returns the span ID of the span.
SpanID() string
// TraceFlags returns the trace flags of the span.
TraceFlags() int
// IsSampled returns if the span is sampled.
IsSampled() bool
}
// Name is the name of the span.
type Name string
func (n Name) String() string {
return string(n)
}
// NameFromFunc will return the name from the function. This is useful for
// automatically generating a name for a span.
func NameFromFunc() Name {
// Get caller frame.
var pcs [1]uintptr
n := runtime.Callers(2, pcs[:])
if n < 1 {
return "unknown"
}
fn := runtime.FuncForPC(pcs[0])
name := fn.Name()
if lastSlash := strings.LastIndexByte(name, '/'); lastSlash > 0 {
name = name[lastSlash+1:]
}
return Name(name)
}
// Start returns a new context with the given trace.
func Start(ctx context.Context, name Name, options ...Option) (context.Context, Span) {
// Tracer is always guaranteed to be returned here. If there is no tracer
// available it will return a noop tracer.
tracer, _ := TracerFromContext(ctx)
return tracer.Start(ctx, name.String(), options...)
}
// TracerNamespace is a combination of the worker name and the namespace, it
// allows us to uniquely identify a tracer.
// Note: the worker doesn't need to be 100% accurate, it is just used to
// identify the tracer.
type TracerNamespace struct {
Worker string
Namespace string
}
// Namespace returns a new namespace.
func Namespace(worker, namespace string) TracerNamespace {
return TracerNamespace{
Worker: worker,
Namespace: namespace,
}
}
// ShortNamespace returns a short representation of the namespace.
func (ns TracerNamespace) ShortNamespace() string {
// Don't shorten the controller namespace.
if ns.Namespace == controllerNamespace {
return ns.Namespace
}
// If the namespace is less than 6 characters then return the whole
// namespace.
if len(ns.Namespace) < 6 {
return ns.Namespace
}
return ns.Namespace[:6]
}
// String returns a short representation of the namespace.
func (ns TracerNamespace) String() string {
if ns.Namespace == "" {
return ns.Worker
}
return fmt.Sprintf("%s:%s", ns.Worker, ns.Namespace)
}
// WithTagAndKind returns a new TaggedTracerNamespace.
func (ns TracerNamespace) WithTagAndKind(tag names.Tag, kind Kind) TaggedTracerNamespace {
return TaggedTracerNamespace{
TracerNamespace: ns,
Tag: tag,
Kind: kind,
}
}
// TaggedTracerNamespace is a TracerNamespace with a tag.
type TaggedTracerNamespace struct {
TracerNamespace
Tag names.Tag
Kind Kind
}
func (ns TaggedTracerNamespace) String() string {
return fmt.Sprintf("%s:%s", ns.Kind, ns.ShortNamespace())
}
// Kind represents the source of the trace. Either the trace will come
// from a controller, unit or client.
// We can expand on these later, for example we can add machine or worker kinds,
// but for now this is enough.
type Kind string
const (
KindController Kind = "controller"
KindUnit Kind = "unit"
KindClient Kind = "client"
)
func (k Kind) String() string {
return string(k)
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package unit
import (
"fmt"
"github.com/juju/errors"
"github.com/juju/juju/internal/uuid"
)
// UUID represents a unit unique identifier.
type UUID string
// NewUUID is a convince function for generating a new unit uuid.
func NewUUID() (UUID, error) {
id, err := uuid.NewUUID()
if err != nil {
return UUID(""), err
}
return UUID(id.String()), nil
}
// ParseID returns a new UUID from the given string. If the string is not a valid
// uuid an error satisfying [errors.NotValid] will be returned.
func ParseID(value string) (UUID, error) {
if !uuid.IsValidUUIDString(value) {
return "", fmt.Errorf("id %q %w", value, errors.NotValid)
}
return UUID(value), nil
}
// String implements the stringer interface for UUID.
func (u UUID) String() string {
return string(u)
}
// Validate ensures the consistency of the UUID. If the uuid is invalid an error
// satisfying [errors.NotValid] will be returned.
func (u UUID) Validate() error {
if u == "" {
return fmt.Errorf("%wuuid cannot be empty", errors.Hide(errors.NotValid))
}
if !uuid.IsValidUUIDString(string(u)) {
return fmt.Errorf("uuid %q %w", u, errors.NotValid)
}
return nil
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package unit
import (
"fmt"
"regexp"
"github.com/juju/juju/internal/errors"
)
const (
// applicationSnippet is a non-compiled regexp that can be composed with
// other snippets to form a valid application regexp.
//
// Application names a series lower case alpha-numeric strings, which can be
// broken up with hyphens. The first character must be a letter. Each segment
// must contain at least one letter.
applicationPattern = "(?:[a-z][a-z0-9]*(?:-[a-z0-9]*[a-z][a-z0-9]*)*)"
// numberPattern is a non-compiled regexp that can be composed with other
// snippets for validating small number sequences.
//
// Numbers are a series of digits, with no leading zeros unless the number
// is exactly 0.
numberPattern = "(?:0|[1-9][0-9]*)"
// unitPattern is a non-compiled regexp for a valid unit name.
unitPattern = "(" + applicationPattern + ")/(" + numberPattern + ")"
)
const (
InvalidUnitName = errors.ConstError("invalid unit name")
)
var validUnit = regexp.MustCompile("^" + unitPattern + "$")
// Name represents a units name, used as a human-readable unique identifier.
type Name string
// NewName returns a new Name. If the name is invalid, an InvalidUnitName error
// will be returned.
func NewName(name string) (Name, error) {
n := Name(name)
return n, n.Validate()
}
// NewNameFromParts returns a new Name from the application and number parts. If
// the name is invalid, an InvalidUnitName error will be returned.
func NewNameFromParts(applicationName string, number int) (Name, error) {
return NewName(fmt.Sprintf("%s/%d", applicationName, number))
}
// String returns the Name as a string.
func (n Name) String() string {
return string(n)
}
// Validate returns an error if the Name is invalid. The returned error is an
// InvalidUnitName error.
func (n Name) Validate() error {
if !validUnit.MatchString(n.String()) {
return errors.Errorf("%w: %q", InvalidUnitName, n)
}
return nil
}
// Application returns the name of the application that the unit is
// associated with. The name must be valid.
func (n Name) Application() string {
s := validUnit.FindStringSubmatch(n.String())
if s == nil {
// Should never happen.
return ""
}
return s[1]
}
// Copyright 2023 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package user
import (
"fmt"
"regexp"
"time"
"github.com/juju/errors"
"github.com/juju/juju/internal/uuid"
)
const (
LocalUserDomain = "local"
)
// User represents a user in the system.
type User struct {
// UUID is the unique identifier for the user.
UUID UUID
// Name is the username of the user.
Name Name
// DisplayName is a user-friendly name represent the user as.
DisplayName string
// CreatorUUID is the associated user that created this user.
CreatorUUID UUID
// CreatorName is the name of the user that created this user.
CreatorName Name
// CreatedAt is the time that the user was created at.
CreatedAt time.Time
// LastLogin is the last time the user logged in.
LastLogin time.Time
// Disabled is true if the user is disabled.
Disabled bool
}
// UUID is a unique identifier for a user.
type UUID string
// NewUUID returns a new UUID.
func NewUUID() (UUID, error) {
uuid, err := uuid.NewUUID()
if err != nil {
return "", errors.Trace(err)
}
return UUID(uuid.String()), nil
}
// Validate returns an error if the UUID is invalid. The error returned
// satisfies [errors.NotValid].
func (u UUID) Validate() error {
if u == "" {
return fmt.Errorf("empty uuid%w", errors.Hide(errors.NotValid))
}
if !uuid.IsValidUUIDString(string(u)) {
return fmt.Errorf("invalid uuid: %q%w", u, errors.Hide(errors.NotValid))
}
return nil
}
// String returns the UUID as a string.
func (u UUID) String() string {
return string(u)
}
// userNameTag is the name of the user.
type userNameTag interface {
// Name returns the name of the user.
Name() string
// Domain returns the domain of the user.
Domain() string
}
var (
validUserNameSnippet = "[a-zA-Z0-9][a-zA-Z0-9.+-]*[a-zA-Z0-9]"
validName = regexp.MustCompile(fmt.Sprintf("^(?P<name>%s)(?:@(?P<domain>%s))?$", validUserNameSnippet, validUserNameSnippet))
)
// NewName validates the name and returns a new Name object. If the name is not
// valid an error satisfying [errors.NotValid] will be returned.
func NewName(name string) (Name, error) {
parts := validName.FindStringSubmatch(name)
if len(parts) != 3 {
return Name{}, errors.NotValidf("user name %q", name)
}
domain := parts[2]
if domain == LocalUserDomain {
domain = ""
}
return Name{
name: parts[1],
domain: domain,
}, nil
}
// Name represents the identity of a user.
type Name struct {
// name is the name of the user, it does not include the domain.
name string
// domain is the part of the username after the "@".
domain string
}
// Name returns the full username.
func (n Name) Name() string {
if n.domain == "" || n.domain == LocalUserDomain {
return n.name
}
return n.name + "@" + n.domain
}
// IsLocal indicates if the username is a local or external username.
func (n Name) IsLocal() bool {
return n.Domain() == LocalUserDomain || n.Domain() == ""
}
// Domain returns the user domain. Users in the local database
// are from the LocalDomain. Other users are considered 'remote' users.
func (n Name) Domain() string {
return n.domain
}
// String returns the full username.
func (n Name) String() string {
return n.Name()
}
// IsZero return true if the struct is uninitiated.
func (n Name) IsZero() bool {
// The empty string in an invalid user name so the struct is uninitiated if
// it is empty.
return n.name == "" && n.domain == ""
}
// NameFromTag generates a Name from a tag.
func NameFromTag(tag userNameTag) Name {
return Name{
name: tag.Name(),
domain: tag.Domain(),
}
}
// IsValidName returns whether the given name is a valid user name string.
func IsValidName(name string) bool {
return validName.MatchString(name)
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package version
import corebase "github.com/juju/juju/core/base"
// DefaultSupportedLTSBase returns the latest LTS base that Juju supports
// and is compatible with.
func DefaultSupportedLTSBase() corebase.Base {
return corebase.MakeDefaultBase(corebase.UbuntuOS, "24.04")
}
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package version
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/juju/errors"
semversion "github.com/juju/version/v2"
)
// The presence and format of this constant is very important.
// The debian/rules build recipe uses this value for the version
// number of the release package.
const version = "4.0-beta6"
// UserAgentVersion defines a user agent version used for communication for
// outside resources.
const UserAgentVersion = "Juju/" + version
const (
// TreeStateDirty when the build was made with a dirty checkout.
TreeStateDirty = "dirty"
// TreeStateClean when the build was made with a clean checkout.
TreeStateClean = "clean"
// TreeStateArchive when the build was made outside of a git checkout.
TreeStateArchive = "archive"
)
// The version that we switched over from old style numbering to new style.
var switchOverVersion = semversion.MustParse("1.19.9")
// build is a string representing this build of Juju's number.
//
// NOTE: This is injected by the build system. In Makefile, we override
// this value with the value of the JUJU_BUILD_NUMBER environment variable.
var build string
// OfficialBuild is a monotonic number injected by Jenkins.
var OfficialBuild = mustParseBuildInt(build)
// Current gives the current version of the system.
//
// If the file "FORCE-VERSION" is present in the same directory as the running
// binary, it will override this.
//
// We also later set the build number with build, if it is set.
var Current = semversion.MustParse(version)
// Compiler is the go compiler used to build the binary.
var Compiler = runtime.Compiler
// GitCommit represents the git commit sha used to build the binary.
//
// NOTE: This is injected by the build system. In Makefile, we override
// this value with the value of the GIT_COMMIT environment variable.
var GitCommit string
// GitTreeState is "clean" when built from a working copy that matches the
// GitCommit treeish.
//
// NOTE: This is injected by the build system. In Makefile, we override
// this value with the value of the GIT_TREE_STATE environment variable.
var GitTreeState string = TreeStateDirty
// GoBuildTags is the build tags used to build the binary.
//
// NOTE: This is injected by the build system. In Makefile, we override
// this value with the value of the FINAL_BUILD_TAGS environment variable.
var GoBuildTags string
func init() {
defer func() {
if Current.Build == 0 {
// We set the Build to OfficialBuild if no build number provided in the FORCE-VERSION file.
Current.Build = OfficialBuild
}
}()
toolsDir := filepath.Dir(os.Args[0])
v, err := os.ReadFile(filepath.Join(toolsDir, "FORCE-VERSION"))
if err != nil {
if !os.IsNotExist(err) {
fmt.Fprintf(os.Stderr, "WARNING: cannot read forced version: %v\n", err)
}
return
}
Current = semversion.MustParse(strings.TrimSpace(string(v)))
}
func isOdd(x int) bool {
return x%2 != 0
}
// IsDev returns whether the version represents a development version. A
// version with a tag or a nonzero build component is considered to be a
// development version. Versions older than or equal to 1.19.3 (the switch
// over time) check for odd minor versions.
func IsDev(v semversion.Number) bool {
if v.Compare(switchOverVersion) <= 0 {
return isOdd(v.Minor) || v.Build > 0
}
return v.Tag != "" || v.Build > 0
}
func mustParseBuildInt(buildInt string) int {
if buildInt == "" {
return 0
}
i, err := strconv.Atoi(buildInt)
if err != nil {
panic(err)
}
return i
}
// CheckJujuMinVersion returns an error if the specified version to check is
// less than the current Juju version.
func CheckJujuMinVersion(toCheck semversion.Number, jujuVersion semversion.Number) (err error) {
// It only makes sense to allow charms to specify they depend
// on a released version of Juju. If this is a beta or rc version
// of Juju, treat it like it's the released version to allow
// charms to be tested prior to release.
jujuVersion.Tag = ""
jujuVersion.Build = 0
if toCheck != semversion.Zero && toCheck.Compare(jujuVersion) > 0 {
return minVersionError(toCheck, jujuVersion)
}
return nil
}
func minVersionError(minver, jujuver semversion.Number) error {
err := errors.NewErr("charm's min version (%s) is higher than this juju model's version (%s)",
minver, jujuver)
err.SetLocation(1)
return minJujuVersionErr{&err}
}
type minJujuVersionErr struct {
*errors.Err
}
// IsMinVersionError returns true if the given error was caused by the charm
// having a minjujuversion higher than the juju model's version.
func IsMinVersionError(err error) bool {
_, ok := errors.Cause(err).(minJujuVersionErr)
return ok
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package watcher
import (
"github.com/juju/worker/v4"
"github.com/juju/worker/v4/catacomb"
)
// Normalise takes any watcher and normalises it down to a NotifyWatcher.
// This is useful in legacy code that expects a NotifyWatcher.
func Normalise[T any](source Watcher[T]) (NotifyWatcher, error) {
if w, ok := source.(NotifyWatcher); ok {
// If we are already a NotifyWatcher, we can return the source watcher.
return w, nil
}
ch := make(chan struct{})
w := &normaliseWatcher{
ch: ch,
}
loop := func() error {
defer close(ch)
for {
select {
case <-w.catacomb.Dying():
return w.catacomb.ErrDying()
case _, ok := <-source.Changes():
if !ok {
select {
case <-w.catacomb.Dying():
return w.catacomb.ErrDying()
default:
return nil
}
}
select {
case <-w.catacomb.Dying():
return w.catacomb.ErrDying()
case ch <- struct{}{}:
}
}
}
}
err := catacomb.Invoke(catacomb.Plan{
Site: &w.catacomb,
Work: loop,
Init: []worker.Worker{source},
})
if err != nil {
return nil, err
}
return w, nil
}
type normaliseWatcher struct {
catacomb catacomb.Catacomb
ch chan struct{}
}
func (w *normaliseWatcher) Kill() {
w.catacomb.Kill(nil)
}
func (w *normaliseWatcher) Wait() error {
return w.catacomb.Wait()
}
func (w *normaliseWatcher) Changes() <-chan struct{} {
return w.ch
}
// Copyright 2012-2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package watcher
import (
"context"
"github.com/juju/errors"
"github.com/juju/worker/v4"
"github.com/juju/worker/v4/catacomb"
)
// NotifyChannel is a channel that receives a single value to indicate that the
// watch is active, and subsequent values whenever the value(s) under
// observation change(s).
// This is deprecated; use <-chan struct{} instead.
type NotifyChannel = <-chan struct{}
// NotifyWatcher sends a single value to indicate that the watch is active, and
// subsequent values whenever the value(s) under observation change(s).
type NotifyWatcher = Watcher[struct{}]
// NotifyHandler defines the operation of a NotifyWorker.
type NotifyHandler interface {
// SetUp is called once when creating a NotifyWorker. It must return a
// NotifyWatcher or an error. The NotifyHandler takes responsibility for
// stopping any returned watcher and handling any errors.
SetUp(context.Context) (NotifyWatcher, error)
// Handle is called whenever a value is received from the NotifyWatcher
// returned by SetUp. If it returns an error, the NotifyWorker will be
// stopped.
//
// If Handle runs any blocking operations it must pass through, or select
// on, the supplied context done channel; the context will be canceled when
// the NotifyWorker is killed. An aborted Handle should not return an error.
Handle(context.Context) error
// TearDown is called once when stopping a NotifyWorker, whether or not
// SetUp succeeded. It need not concern itself with the NotifyWatcher, but
// must clean up any other resources created in SetUp or Handle.
TearDown() error
}
// NotifyConfig holds the direct dependencies of a NotifyWorker.
type NotifyConfig struct {
Handler NotifyHandler
}
// Validate returns an error if the config cannot start a NotifyWorker.
func (config NotifyConfig) Validate() error {
if config.Handler == nil {
return errors.NotValidf("nil Handler")
}
return nil
}
// NewNotifyWorker starts a new worker that runs a NotifyHandler.
func NewNotifyWorker(config NotifyConfig) (*NotifyWorker, error) {
if err := config.Validate(); err != nil {
return nil, errors.Trace(err)
}
nw := &NotifyWorker{
config: config,
}
err := catacomb.Invoke(catacomb.Plan{
Site: &nw.catacomb,
Work: nw.loop,
})
if err != nil {
return nil, errors.Trace(err)
}
return nw, nil
}
// NotifyWorker is a worker that wraps a NotifyWatcher.
type NotifyWorker struct {
catacomb catacomb.Catacomb
config NotifyConfig
}
func (nw *NotifyWorker) loop() (err error) {
changes := nw.setUp()
defer nw.tearDown(err)
for {
select {
case <-nw.catacomb.Dying():
return nw.catacomb.ErrDying()
case _, ok := <-changes:
if !ok {
return errors.New("change channel closed")
}
if err := nw.dispatchChange(); err != nil {
return errors.Trace(err)
}
}
}
}
// setUp calls the handler's SetUp method; registers any returned watcher with
// the worker's catacomb; and returns the watcher's changes channel. Any errors
// encountered kill the worker and cause a nil channel to be returned.
func (nw *NotifyWorker) setUp() <-chan struct{} {
ctx, cancel := nw.scopedContext()
defer cancel()
watcher, err := nw.config.Handler.SetUp(ctx)
if err != nil {
nw.catacomb.Kill(err)
}
if watcher == nil {
nw.catacomb.Kill(errors.New("handler returned nil watcher"))
} else if err := nw.catacomb.Add(watcher); err != nil {
nw.catacomb.Kill(err)
} else {
return watcher.Changes()
}
return nil
}
// tearDown kills the worker with the supplied error; and then kills it with
// any error returned by the handler's TearDown method.
func (nw *NotifyWorker) tearDown(err error) {
nw.catacomb.Kill(err)
err = nw.config.Handler.TearDown()
nw.catacomb.Kill(err)
}
func (nw *NotifyWorker) dispatchChange() error {
ctx, cancel := nw.scopedContext()
defer cancel()
err := nw.config.Handler.Handle(ctx)
// Ensure we don't return the context.Cancelled error when we've been
// aborted as per the documentation.
if errors.Is(err, context.Canceled) {
return nil
}
return errors.Trace(err)
}
// Kill is part of the worker.Worker interface.
func (nw *NotifyWorker) Kill() {
nw.catacomb.Kill(nil)
}
// Wait is part of the worker.Worker interface.
func (nw *NotifyWorker) Wait() error {
return nw.catacomb.Wait()
}
// Report implements dependency.Reporter.
func (nw *NotifyWorker) Report() map[string]interface{} {
if r, ok := nw.config.Handler.(worker.Reporter); ok {
return r.Report()
}
return nil
}
func (nw *NotifyWorker) scopedContext() (context.Context, context.CancelFunc) {
return context.WithCancel(nw.catacomb.Context(context.Background()))
}
// Copyright 2023 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package watcher
import (
"fmt"
"time"
)
// SecretBackendRotateChange describes changes to a secret backend
// rotation trigger.
type SecretBackendRotateChange struct {
ID string
Name string
NextTriggerTime time.Time
}
func (s SecretBackendRotateChange) GoString() string {
whenMsg := "never"
if !s.NextTriggerTime.IsZero() {
interval := s.NextTriggerTime.Sub(time.Now())
if interval < 0 {
whenMsg = fmt.Sprintf("%v ago at %s", -interval, s.NextTriggerTime.Format(time.RFC3339))
} else {
whenMsg = fmt.Sprintf("in %v at %s", interval, s.NextTriggerTime.Format(time.RFC3339))
}
}
return fmt.Sprintf("%s token rotate: %s", s.Name, whenMsg)
}
// SecretBackendRotateChannel is a change channel as described in the
// CoreWatcher docs.
// This is deprecated; use <-chan []SecretBackendRotateChange instead.
type SecretBackendRotateChannel = <-chan []SecretBackendRotateChange
// SecretBackendRotateWatcher represents a watcher that returns a slice of
// SecretBackendRotateChange.
type SecretBackendRotateWatcher = Watcher[[]SecretBackendRotateChange]
// Copyright 2021 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package watcher
import (
"fmt"
"time"
"github.com/juju/juju/core/secrets"
)
// SecretTriggerChange describes changes to a secret trigger.
// eg rotation or expiry.
type SecretTriggerChange struct {
URI *secrets.URI
Revision int
NextTriggerTime time.Time
}
// String returns a string representation of the change.
func (s SecretTriggerChange) String() string {
str := s.URI.String()
if s.Revision > 0 {
str = fmt.Sprintf("%s/%d", s.URI.String(), s.Revision)
}
return str
}
// GoString returns a Go-syntax representation of the change.
func (s SecretTriggerChange) GoString() string {
revMsg := ""
if s.Revision > 0 {
revMsg = fmt.Sprintf("/%d", s.Revision)
}
whenMsg := "never"
if !s.NextTriggerTime.IsZero() {
interval := s.NextTriggerTime.Sub(time.Now())
if interval < 0 {
whenMsg = fmt.Sprintf("%v ago at %s", -interval, s.NextTriggerTime.Format(time.RFC3339))
} else {
whenMsg = fmt.Sprintf("in %v at %s", interval, s.NextTriggerTime.Format(time.RFC3339))
}
}
return fmt.Sprintf("%s%s trigger: %s", s.URI.ID, revMsg, whenMsg)
}
// SecretTriggerChannel is a change channel as described in the CoreWatcher
// docs.
// This is deprecated; use <-chan []SecretTriggerChange instead.
type SecretTriggerChannel = <-chan []SecretTriggerChange
// SecretTriggerWatcher represents a watcher that reports the latest
// trigger of a secret.
type SecretTriggerWatcher = Watcher[[]SecretTriggerChange]
// SecretRevisionChange describes changes to a secret.
type SecretRevisionChange struct {
URI *secrets.URI
Revision int
}
func (s SecretRevisionChange) GoString() string {
return fmt.Sprintf("%s/%d", s.URI.ID, s.Revision)
}
// SecretRevisionChannel is a channel used to notify of
// changes to a secret.
// This is deprecated; use <-chan []SecretRevisionChange instead.
type SecretRevisionChannel = <-chan []SecretRevisionChange
// SecretsRevisionWatcher represents a watcher that reports the latest
// revision of a secret.
type SecretsRevisionWatcher = Watcher[[]SecretRevisionChange]
// Copyright 2013-2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package watcher
import (
"context"
"github.com/juju/errors"
"github.com/juju/worker/v4"
"github.com/juju/worker/v4/catacomb"
)
// StringsChannel is a channel that receives a baseline set of values, and
// subsequent values representing additions, changes, and/or removals of those
// values.
// This is deprecated; use <-chan []string instead.
type StringsChannel = <-chan []string
// StringsWatcher sends a single value indicating a baseline set of values, and
// subsequent values representing additions, changes, and/or removals of those
// values.
type StringsWatcher = Watcher[[]string]
// StringsHandler defines the operation of a StringsWorker.
type StringsHandler interface {
// SetUp is called once when creating a StringsWorker. It must return a
// StringsWatcher or an error. The StringsHandler takes responsibility for
// stopping any returned watcher and handling any errors.
SetUp(ctx context.Context) (StringsWatcher, error)
// Handle is called with every value received from the StringsWatcher
// returned by SetUp. If it returns an error, the StringsWorker will be
// stopped.
//
// If Handle runs any blocking operations it must pass through, or select
// on, the supplied abort channel; this channel will be closed when the
// StringsWorker is killed. An aborted Handle should not return an error.
Handle(ctx context.Context, changes []string) error
// TearDown is called once when stopping a StringsWorker, whether or not
// SetUp succeeded. It need not concern itself with the StringsWatcher, but
// must clean up any other resources created in SetUp or Handle.
TearDown() error
}
// StringsConfig holds the direct dependencies of a StringsWorker.
type StringsConfig struct {
Handler StringsHandler
}
// Validate returns ann error if the config cannot start a StringsWorker.
func (config StringsConfig) Validate() error {
if config.Handler == nil {
return errors.NotValidf("nil Handler")
}
return nil
}
// NewStringsWorker starts a new worker that runs a StringsHandler.
func NewStringsWorker(config StringsConfig) (*StringsWorker, error) {
if err := config.Validate(); err != nil {
return nil, errors.Trace(err)
}
sw := &StringsWorker{
config: config,
}
err := catacomb.Invoke(catacomb.Plan{
Site: &sw.catacomb,
Work: sw.loop,
})
if err != nil {
return nil, errors.Trace(err)
}
return sw, nil
}
// StringsWorker is a worker that wraps a StringsWatcher.
type StringsWorker struct {
config StringsConfig
catacomb catacomb.Catacomb
}
func (sw *StringsWorker) loop() (err error) {
ctx, cancel := sw.scopedContext()
defer cancel()
changes := sw.setUp()
defer sw.tearDown(err)
for {
select {
case <-sw.catacomb.Dying():
return sw.catacomb.ErrDying()
case strings, ok := <-changes:
if !ok {
return errors.New("change channel closed")
}
err = sw.config.Handler.Handle(ctx, strings)
if err != nil {
return err
}
}
}
}
// setUp calls the handler's SetUp method; registers any returned watcher with
// the worker's catacomb; and returns the watcher's changes channel. Any errors
// encountered kill the worker and cause a nil channel to be returned.
func (sw *StringsWorker) setUp() <-chan []string {
ctx, cancel := sw.scopedContext()
defer cancel()
watcher, err := sw.config.Handler.SetUp(ctx)
if err != nil {
sw.catacomb.Kill(err)
}
if watcher == nil {
sw.catacomb.Kill(errors.New("handler returned nil watcher"))
} else {
if err := sw.catacomb.Add(watcher); err != nil {
sw.catacomb.Kill(err)
} else {
return watcher.Changes()
}
}
return nil
}
// tearDown kills the worker with the supplied error; and then kills it with
// any error returned by the handler's TearDown method.
func (sw *StringsWorker) tearDown(err error) {
sw.catacomb.Kill(err)
err = sw.config.Handler.TearDown()
sw.catacomb.Kill(err)
}
// Kill is part of the worker.Worker interface.
func (sw *StringsWorker) Kill() {
sw.catacomb.Kill(nil)
}
// Wait is part of the worker.Worker interface.
func (sw *StringsWorker) Wait() error {
return sw.catacomb.Wait()
}
// Report implements dependency.Reporter.
func (sw *StringsWorker) Report() map[string]interface{} {
if r, ok := sw.config.Handler.(worker.Reporter); ok {
return r.Report()
}
return nil
}
func (sw *StringsWorker) scopedContext() (context.Context, context.CancelFunc) {
return context.WithCancel(sw.catacomb.Context(context.Background()))
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package watcher
// TODO returns a watcher for type T that sends an initial change
// with the empty value of type T.
func TODO[T any]() Watcher[T] {
var empty T
ch := make(chan T, 1)
ch <- empty
w := &todoWatcher[T]{
ch: ch,
done: make(chan struct{}),
}
return w
}
type todoWatcher[T any] struct {
ch chan T
done chan struct{}
}
func (w *todoWatcher[T]) Kill() {
select {
case <-w.done:
default:
close(w.done)
close(w.ch)
}
}
func (w *todoWatcher[T]) Wait() error {
<-w.done
return nil
}
func (w *todoWatcher[T]) Changes() <-chan T {
return w.ch
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package cloudspec
import (
"github.com/juju/errors"
"github.com/juju/names/v6"
jujucloud "github.com/juju/juju/cloud"
)
// CloudSpec describes a specific cloud configuration, for the purpose
// of opening an Environ to manage the cloud resources.
type CloudSpec struct {
// Type is the type of cloud, eg aws, openstack etc.
Type string
// Name is the name of the cloud.
Name string
// Region is the name of the cloud region, if the cloud supports
// regions.
Region string
// Endpoint is the endpoint for the cloud (region).
Endpoint string
// IdentityEndpoint is the identity endpoint for the cloud (region).
IdentityEndpoint string
// StorageEndpoint is the storage endpoint for the cloud (region).
StorageEndpoint string
// Credential is the cloud credential to use to authenticate
// with the cloud, or nil if the cloud does not require any
// credentials.
Credential *jujucloud.Credential
// CACertificates contains an optional list of Certificate
// Authority certificates to be used to validate certificates
// of cloud infrastructure components
// The contents are Base64 encoded x.509 certs.
CACertificates []string
// SkipTLSVerify is true if the client should be asked not to
// validate certificates. It is not recommended for production clouds.
// It is secure (false) by default.
SkipTLSVerify bool
// IsControllerCloud is true when this is the cloud used by the controller.
IsControllerCloud bool
}
// Validate validates that the CloudSpec is well-formed. It does
// not ensure that the cloud type and credentials are valid.
func (cs CloudSpec) Validate() error {
if cs.Type == "" {
return errors.NotValidf("empty Type")
}
if !names.IsValidCloud(cs.Name) {
return errors.NotValidf("cloud name %q", cs.Name)
}
return nil
}
// MakeCloudSpec returns a CloudSpec from the given
// Cloud, cloud and region names, and credential.
func MakeCloudSpec(cloud jujucloud.Cloud, cloudRegionName string, credential *jujucloud.Credential) (CloudSpec, error) {
cloudSpec := CloudSpec{
Type: cloud.Type,
Name: cloud.Name,
Region: cloudRegionName,
Endpoint: cloud.Endpoint,
IdentityEndpoint: cloud.IdentityEndpoint,
StorageEndpoint: cloud.StorageEndpoint,
CACertificates: cloud.CACertificates,
SkipTLSVerify: cloud.SkipTLSVerify,
Credential: credential,
IsControllerCloud: cloud.IsControllerCloud,
}
if cloudRegionName != "" {
cloudRegion, err := jujucloud.RegionByName(cloud.Regions, cloudRegionName)
if err != nil {
return CloudSpec{}, errors.Annotate(err, "getting cloud region definition")
}
if !cloudRegion.IsEmpty() {
cloudSpec.Endpoint = cloudRegion.Endpoint
cloudSpec.IdentityEndpoint = cloudRegion.IdentityEndpoint
cloudSpec.StorageEndpoint = cloudRegion.StorageEndpoint
}
}
return cloudSpec, nil
}
// CloudRegionSpec contains the information needed to lookup specific
// cloud or cloud region configuration. This is for use in calling
// state/modelconfig.(ComposeNewModelConfig) so there is no need to serialize it.
type CloudRegionSpec struct {
// Cloud is the name of the cloud.
Cloud string
// Region is the name of the cloud region.
Region string
}
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package config
import (
"context"
"fmt"
"maps"
"net"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/juju/collections/set"
"github.com/juju/errors"
"github.com/juju/loggo/v2"
"github.com/juju/names/v6"
"github.com/juju/proxy"
"github.com/juju/schema"
"github.com/juju/utils/v4"
"github.com/juju/version/v2"
"gopkg.in/yaml.v2"
corebase "github.com/juju/juju/core/base"
coremodelconfig "github.com/juju/juju/core/modelconfig"
jujuversion "github.com/juju/juju/core/version"
"github.com/juju/juju/environs/tags"
"github.com/juju/juju/internal/charmhub"
"github.com/juju/juju/internal/featureflag"
internallogger "github.com/juju/juju/internal/logger"
"github.com/juju/juju/juju/osenv"
)
var logger = internallogger.GetLogger("juju.environs.config")
const (
// FwInstance requests the use of an individual firewall per instance.
FwInstance = "instance"
// FwGlobal requests the use of a single firewall group for all machines.
// When ports are opened for one machine, all machines will have the same
// port opened.
FwGlobal = "global"
// FwNone requests that no firewalling should be performed inside
// the environment. No firewaller worker will be started. It's
// useful for clouds without support for either global or per
// instance security groups.
FwNone = "none"
)
// TODO(katco-): Please grow this over time.
// Centralized place to store values of config keys. This transitions
// mistakes in referencing key-values to a compile-time error.
const (
//
// Settings Attributes
//
// AdminSecretKey
AdminSecretKey = "admin-secret"
// CAPrivateKeyKey
CAPrivateKeyKey = "ca-private-key"
// NameKey is the key for the model's name.
NameKey = "name"
// TypeKey is the key for the model's cloud type.
TypeKey = "type"
// AgentVersionKey is the key for the model's Juju agent version.
AgentVersionKey = "agent-version"
// UUIDKey is the key for the model UUID attribute.
UUIDKey = "uuid"
// AuthorizedKeysKey is the key for the authorized-keys attribute.
AuthorizedKeysKey = "authorized-keys"
// ProvisionerHarvestModeKey stores the key for this setting.
ProvisionerHarvestModeKey = "provisioner-harvest-mode"
// NumProvisionWorkersKey is the key for number of model provisioner
// workers.
NumProvisionWorkersKey = "num-provision-workers"
// NumContainerProvisionWorkersKey is the key for the number of
// container provisioner workers per machine setting.
NumContainerProvisionWorkersKey = "num-container-provision-workers"
// ImageStreamKey is the key used to specify the stream
// for OS images.
ImageStreamKey = "image-stream"
// ImageMetadataURLKey is the key used to specify the location
// of OS image metadata.
ImageMetadataURLKey = "image-metadata-url"
// ImageMetadataDefaultsDisabledKey is the key used to disable image
// metadata default sources.
ImageMetadataDefaultsDisabledKey = "image-metadata-defaults-disabled"
// AgentStreamKey stores the key for this setting.
AgentStreamKey = "agent-stream"
// AgentMetadataURLKey stores the key for this setting.
AgentMetadataURLKey = "agent-metadata-url"
// ContainerImageStreamKey is the key used to specify the stream
// for container OS images.
ContainerImageStreamKey = "container-image-stream"
// ContainerImageMetadataURLKey is the key used to specify the location
// of OS image metadata for containers.
ContainerImageMetadataURLKey = "container-image-metadata-url"
// ContainerImageMetadataDefaultsDisabledKey is the key used to disable image
// metadata default sources for containers.
ContainerImageMetadataDefaultsDisabledKey = "container-image-metadata-defaults-disabled"
// Proxy behaviour has become something of an annoying thing to define
// well. These following four proxy variables are being kept to continue
// with the existing behaviour for those deployments that specify them.
// With these proxy values set, a file is written to every machine
// in /etc/profile.d so the ubuntu user gets the environment variables
// set when SSHing in. The OS environment also is set in the juju agents
// and charm hook environments.
// HTTPProxyKey stores the key for this setting.
HTTPProxyKey = "http-proxy"
// HTTPSProxyKey stores the key for this setting.
HTTPSProxyKey = "https-proxy"
// FTPProxyKey stores the key for this setting.
FTPProxyKey = "ftp-proxy"
// NoProxyKey stores the key for this setting.
NoProxyKey = "no-proxy"
// The new proxy keys are passed into hook contexts with the prefix
// JUJU_CHARM_ then HTTP_PROXY, HTTPS_PROXY, FTP_PROXY, and NO_PROXY.
// This allows the charm to set a proxy when it thinks it needs one.
// These values are not set in the general environment.
// JujuHTTPProxyKey stores the key for this setting.
JujuHTTPProxyKey = "juju-http-proxy"
// JujuHTTPSProxyKey stores the key for this setting.
JujuHTTPSProxyKey = "juju-https-proxy"
// JujuFTPProxyKey stores the key for this setting.
JujuFTPProxyKey = "juju-ftp-proxy"
// JujuNoProxyKey stores the key for this setting.
JujuNoProxyKey = "juju-no-proxy"
// The APT proxy values specified here work with both the
// legacy and juju proxy settings. If no value is specified,
// the value is determined by the either the legacy or juju value
// if one is specified.
// AptHTTPProxyKey stores the key for this setting.
AptHTTPProxyKey = "apt-http-proxy"
// AptHTTPSProxyKey stores the key for this setting.
AptHTTPSProxyKey = "apt-https-proxy"
// AptFTPProxyKey stores the key for this setting.
AptFTPProxyKey = "apt-ftp-proxy"
// AptNoProxyKey stores the key for this setting.
AptNoProxyKey = "apt-no-proxy"
// AptMirrorKey is used to set the apt mirror.
AptMirrorKey = "apt-mirror"
// SnapHTTPProxyKey is used to set the snap core setting proxy.http for deployed machines.
SnapHTTPProxyKey = "snap-http-proxy"
// SnapHTTPSProxyKey is used to set the snap core setting proxy.https for deployed machines.
SnapHTTPSProxyKey = "snap-https-proxy"
// SnapStoreProxyKey is used to set the snap core setting proxy.store for deployed machines.
SnapStoreProxyKey = "snap-store-proxy"
// SnapStoreAssertionsKey is used to configure the deployed machines to acknowledge the
// store proxy assertions.
SnapStoreAssertionsKey = "snap-store-assertions"
// SnapStoreProxyURLKey is used to specify the URL to a snap store proxy.
SnapStoreProxyURLKey = "snap-store-proxy-url"
// NetBondReconfigureDelayKey is the key to pass when bridging
// the network for containers.
NetBondReconfigureDelayKey = "net-bond-reconfigure-delay"
// ContainerNetworkingMethodKey is the key for setting up networking method
// for containers.
ContainerNetworkingMethodKey = "container-networking-method"
// StorageDefaultBlockSourceKey is the key for the default block storage source.
StorageDefaultBlockSourceKey = "storage-default-block-source"
// StorageDefaultFilesystemSourceKey is the key for the default filesystem storage source.
StorageDefaultFilesystemSourceKey = "storage-default-filesystem-source"
// ResourceTagsKey is an optional list or space-separated string
// of k=v pairs, defining the tags for ResourceTags.
ResourceTagsKey = "resource-tags"
// AutomaticallyRetryHooks determines whether the uniter will
// automatically retry a hook that has failed
AutomaticallyRetryHooks = "automatically-retry-hooks"
// EnableOSRefreshUpdateKey determines whether newly provisioned instances
// should run their respective OS's update capability.
EnableOSRefreshUpdateKey = "enable-os-refresh-update"
// EnableOSUpgradeKey determines whether newly provisioned instances
// should run their respective OS's upgrade capability.
EnableOSUpgradeKey = "enable-os-upgrade"
// DevelopmentKey determines whether the model is in development mode.
DevelopmentKey = "development"
// SSLHostnameVerificationKey determines whether the environment has
// SSL hostname verification enabled.
SSLHostnameVerificationKey = "ssl-hostname-verification"
// TransmitVendorMetricsKey is the key for whether the controller sends
// metrics collected in this model for anonymized aggregate analytics.
TransmitVendorMetricsKey = "transmit-vendor-metrics"
// ExtraInfoKey is the key for arbitrary user specified string data that
// is stored against the model.
ExtraInfoKey = "extra-info"
// MaxActionResultsAge is the maximum age of actions to keep when pruning, eg
// "72h"
MaxActionResultsAge = "max-action-results-age"
// MaxActionResultsSize is the maximum size the actions collection can
// grow to before it is pruned, eg "5M"
MaxActionResultsSize = "max-action-results-size"
// UpdateStatusHookInterval is how often to run the update-status hook.
UpdateStatusHookInterval = "update-status-hook-interval"
// EgressSubnets are the source addresses from which traffic from this model
// originates if the model is deployed such that NAT or similar is in use.
EgressSubnets = "egress-subnets"
// CloudInitUserDataKey is the key to specify cloud-init yaml the user
// wants to add into the cloud-config data produced by Juju when
// provisioning machines.
CloudInitUserDataKey = "cloudinit-userdata"
// BackupDirKey specifies the backup working directory.
BackupDirKey = "backup-dir"
// ContainerInheritPropertiesKey is the key to specify a list of properties
// to be copied from a machine to a container during provisioning. The
// list will be comma separated.
ContainerInheritPropertiesKey = "container-inherit-properties"
// DefaultSpace specifies which space should be used for the default
// endpoint bindings.
DefaultSpaceKey = "default-space"
// LXDSnapChannel selects the channel to use when installing LXD from a snap.
LXDSnapChannel = "lxd-snap-channel"
// CharmHubURLKey is the key for the url to use for CharmHub API calls
CharmHubURLKey = "charmhub-url"
// ModeKey is the key for defining the mode that a given model should be
// using.
// It is expected that when in a different mode, Juju will perform in a
// different state.
ModeKey = "mode"
// SSHAllowKey is a comma separated list of CIDRs from which machines in
// this model will accept connections to the SSH service
SSHAllowKey = "ssh-allow"
// SAASIngressAllowKey is a comma separated list of CIDRs
// specifying what ingress can be applied to offers in this model
SAASIngressAllowKey = "saas-ingress-allow"
//
// Deprecated Settings Attributes
//
// IgnoreMachineAddresses when true, will cause the
// machine worker not to discover any machine addresses
// on start up.
IgnoreMachineAddresses = "ignore-machine-addresses"
// TestModeKey is the key for identifying the model should be run in test
// mode.
TestModeKey = "test-mode"
// DisableTelemetryKey is a key for determining whether telemetry on juju
// models will be done.
DisableTelemetryKey = "disable-telemetry"
// DefaultBaseKey is a key for determining the base a model should
// explicitly use for charms unless otherwise provided.
DefaultBaseKey = "default-base"
// LoggingConfigKey is used to specify the logging backend configuration.
LoggingConfigKey = "logging-config"
)
// ParseHarvestMode parses description of harvesting method and
// returns the representation.
func ParseHarvestMode(description string) (HarvestMode, error) {
description = strings.ToLower(description)
for method, descr := range harvestingMethodToFlag {
if description == descr {
return method, nil
}
}
return 0, fmt.Errorf("unknown harvesting method: %s", description)
}
// HarvestMode is a bit field which is used to store the harvesting
// behavior for Juju.
type HarvestMode uint32
const (
// HarvestNone signifies that Juju should not harvest any
// machines.
HarvestNone HarvestMode = 1 << iota
// HarvestUnknown signifies that Juju should only harvest machines
// which exist, but we don't know about.
HarvestUnknown
// HarvestDestroyed signifies that Juju should only harvest
// machines which have been explicitly released by the user
// through a destroy of an application/model/unit.
HarvestDestroyed
// HarvestAll signifies that Juju should harvest both unknown and
// destroyed instances. ♫ Don't fear the reaper. ♫
HarvestAll = HarvestUnknown | HarvestDestroyed
)
// A mapping from method to description. Going this way will be the
// more common operation, so we want this type of lookup to be O(1).
var harvestingMethodToFlag = map[HarvestMode]string{
HarvestAll: "all",
HarvestNone: "none",
HarvestUnknown: "unknown",
HarvestDestroyed: "destroyed",
}
// String returns the description of the harvesting mode.
func (method HarvestMode) String() string {
if description, ok := harvestingMethodToFlag[method]; ok {
return description
}
panic("Unknown harvesting method.")
}
// HarvestNone returns whether or not the None harvesting flag is set.
func (method HarvestMode) HarvestNone() bool {
return method&HarvestNone != 0
}
// HarvestDestroyed returns whether or not the Destroyed harvesting flag is set.
func (method HarvestMode) HarvestDestroyed() bool {
return method&HarvestDestroyed != 0
}
// HarvestUnknown returns whether or not the Unknown harvesting flag is set.
func (method HarvestMode) HarvestUnknown() bool {
return method&HarvestUnknown != 0
}
// GetDefaultSupportedLTSBase returns the DefaultSupportedLTSBase.
// This is exposed for one reason and one reason only; testing!
// The fact that PreferredBase doesn't take an argument for a default base
// as a fallback. We then have to expose this so we can exercise the branching
// code for other scenarios makes me sad.
var GetDefaultSupportedLTSBase = jujuversion.DefaultSupportedLTSBase
// HasDefaultBase defines a interface if a type has a default base or not.
type HasDefaultBase interface {
DefaultBase() (string, bool)
}
// PreferredBase returns the preferred base to use when a charm does not
// explicitly specify a base.
func PreferredBase(cfg HasDefaultBase) corebase.Base {
base, ok := cfg.DefaultBase()
if ok {
// We can safely ignore the error here as we know that we have
// validated the base when we set it.
return corebase.MustParseBaseFromString(base)
}
return GetDefaultSupportedLTSBase()
}
// Config holds an immutable environment configuration.
type Config struct {
// defined holds the attributes that are defined for Config.
// unknown holds the other attributes that are passed in (aka UnknownAttrs).
// the union of these two are AllAttrs
defined, unknown map[string]any
}
// Defaulting is a value that specifies whether a configuration
// creator should use defaults from the environment.
type Defaulting bool
const (
// UseDefaults defines a constant for indicating of the default should be
// used for the configuration.
UseDefaults Defaulting = true
// NoDefaults defines a constant for indicating that no defaults should be
// used for the configuration.
NoDefaults Defaulting = false
)
// TODO(rog) update the doc comment below - it's getting messy
// and it assumes too much prior knowledge.
// New returns a new configuration. Fields that are common to all
// environment providers are verified. If useDefaults is UseDefaults,
// default values will be taken from the environment.
//
// "ca-cert-path" and "ca-private-key-path" are translated into the
// "ca-cert" and "ca-private-key" values. If not specified, CA details
// will be read from:
//
// ~/.local/share/juju/<name>-cert.pem
// ~/.local/share/juju/<name>-private-key.pem
//
// if $XDG_DATA_HOME is defined it will be used instead of ~/.local/share
//
// The attrs map can not be nil, otherwise a panic is raised.
func New(withDefaults Defaulting, attrs map[string]any) (*Config, error) {
initSchema.Do(initSchemas)
checker := noDefaultsChecker
if withDefaults {
checker = withDefaultsChecker
} else {
// Config may be from an older Juju.
// Handle the case where we are parsing a fully formed
// set of config attributes (NoDefaults) and a value is strictly
// not optional, but may have previously been either set to empty
// or is missing.
// In this case, we use the default.
for k := range defaultsWhenParsing {
v, ok := attrs[k]
if ok && v != "" {
continue
}
_, explicitlyOptional := alwaysOptional[k]
if !explicitlyOptional {
attrs[k] = defaultsWhenParsing[k]
}
}
}
defined, err := checker.Coerce(attrs, nil)
if err != nil {
return nil, errors.Trace(err)
}
c := &Config{
defined: defined.(map[string]any),
unknown: make(map[string]any),
}
if err := c.setLoggingFromEnviron(); err != nil {
return nil, errors.Trace(err)
}
// no old config to compare against
if err := Validate(context.TODO(), c, nil); err != nil {
return nil, errors.Trace(err)
}
// Copy unknown attributes onto the type-specific map.
for k, v := range attrs {
if _, ok := allFields[k]; !ok {
c.unknown[k] = v
}
}
return c, nil
}
const (
// DefaultUpdateStatusHookInterval is the default value for
// UpdateStatusHookInterval
DefaultUpdateStatusHookInterval = "5m"
// DefaultActionResultsAge is the default for the age of the results for an
// action.
DefaultActionResultsAge = "336h" // 2 weeks
// DefaultActionResultsSize is the default size of the action results.
DefaultActionResultsSize = "5G"
// DefaultLxdSnapChannel is the default lxd snap channel to install on host vms.
DefaultLxdSnapChannel = "5.0/stable"
// DefaultSecretBackend is the default secret backend to use.
DefaultSecretBackend = "auto"
)
var defaultConfigValues = map[string]any{
// Network.
"firewall-mode": FwInstance,
"disable-network-management": false,
IgnoreMachineAddresses: false,
SSLHostnameVerificationKey: true,
"proxy-ssh": false,
DefaultSpaceKey: "",
// Why is net-bond-reconfigure-delay set to 17 seconds?
//
// The value represents the amount of time in seconds to sleep
// between ifdown and ifup when bridging bonded interfaces;
// this is a platform bug and all of this can go away when bug
// #1657579 (and #1594855 and #1269921) are fixed.
//
// For a long time the bridge script hardcoded a value of 3s
// but some setups now require an even longer period. The last
// reported issue was fixed with a 10s timeout, however, we're
// increasing that because this issue (and solution) is not
// very discoverable and we would like bridging to work
// out-of-the-box.
//
// This value can be further tweaked via:
//
// $ juju model-config net-bond-reconfigure-delay=30
NetBondReconfigureDelayKey: 17,
ContainerNetworkingMethodKey: "",
DefaultBaseKey: "",
ProvisionerHarvestModeKey: HarvestDestroyed.String(),
NumProvisionWorkersKey: 16,
NumContainerProvisionWorkersKey: 4,
ResourceTagsKey: "",
LoggingConfigKey: "",
AutomaticallyRetryHooks: true,
EnableOSRefreshUpdateKey: true,
EnableOSUpgradeKey: true,
DevelopmentKey: false,
TestModeKey: false,
ModeKey: RequiresPromptsMode,
DisableTelemetryKey: false,
TransmitVendorMetricsKey: true,
UpdateStatusHookInterval: DefaultUpdateStatusHookInterval,
EgressSubnets: "",
CloudInitUserDataKey: "",
ContainerInheritPropertiesKey: "",
BackupDirKey: "",
LXDSnapChannel: DefaultLxdSnapChannel,
CharmHubURLKey: charmhub.DefaultServerURL,
// Image and agent streams and URLs.
ImageStreamKey: "released",
ImageMetadataURLKey: "",
ImageMetadataDefaultsDisabledKey: false,
AgentStreamKey: "released",
AgentMetadataURLKey: "",
ContainerImageStreamKey: "released",
ContainerImageMetadataURLKey: "",
ContainerImageMetadataDefaultsDisabledKey: false,
// Proxy settings.
HTTPProxyKey: "",
HTTPSProxyKey: "",
FTPProxyKey: "",
NoProxyKey: "127.0.0.1,localhost,::1",
JujuHTTPProxyKey: "",
JujuHTTPSProxyKey: "",
JujuFTPProxyKey: "",
JujuNoProxyKey: "127.0.0.1,localhost,::1",
AptHTTPProxyKey: "",
AptHTTPSProxyKey: "",
AptFTPProxyKey: "",
AptNoProxyKey: "",
AptMirrorKey: "",
SnapHTTPProxyKey: "",
SnapHTTPSProxyKey: "",
SnapStoreProxyKey: "",
SnapStoreAssertionsKey: "",
SnapStoreProxyURLKey: "",
// Status history settings
MaxActionResultsAge: DefaultActionResultsAge,
MaxActionResultsSize: DefaultActionResultsSize,
// Model firewall settings
SSHAllowKey: "0.0.0.0/0,::/0",
SAASIngressAllowKey: "0.0.0.0/0,::/0",
}
// defaultLoggingConfig is the default value for logging-config if it is otherwise not set.
// We don't use the defaultConfigValues mechanism because one way to set the logging config is
// via the JUJU_LOGGING_CONFIG environment variable, which needs to be taken into account before
// we set the default.
const defaultLoggingConfig = "<root>=INFO"
// ConfigDefaults returns the config default values
// to be used for any new model where there is no
// value yet defined.
func ConfigDefaults() map[string]any {
defaults := make(map[string]any)
for name, value := range defaultConfigValues {
if developerConfigValue(name) {
continue
}
defaults[name] = value
}
return defaults
}
func (c *Config) setLoggingFromEnviron() error {
loggingConfig := c.asString(LoggingConfigKey)
// If the logging config hasn't been set, then look for the os environment
// variable, and failing that, get the config from loggo itself.
if loggingConfig == "" {
if environmentValue := os.Getenv(osenv.JujuLoggingConfigEnvKey); environmentValue != "" {
c.defined[LoggingConfigKey] = environmentValue
} else {
c.defined[LoggingConfigKey] = defaultLoggingConfig
}
}
return nil
}
// CoerceForStorage transforms attributes prior to being saved in a persistent store.
func CoerceForStorage(attrs map[string]any) map[string]any {
coercedAttrs := make(map[string]any, len(attrs))
for attrName, attrValue := range attrs {
if attrName == ResourceTagsKey {
// Resource Tags are specified by the user as a string but transformed
// to a map when config is parsed. We want to store as a string.
var tagsSlice []string
if tags, ok := attrValue.(map[string]string); ok {
for resKey, resValue := range tags {
tagsSlice = append(tagsSlice, fmt.Sprintf("%v=%v", resKey, resValue))
}
attrValue = strings.Join(tagsSlice, " ")
}
}
coercedAttrs[attrName] = attrValue
}
return coercedAttrs
}
func initSchemas() {
allFields = fields()
defaultsWhenParsing = allDefaults()
withDefaultsChecker = schema.FieldMap(allFields, defaultsWhenParsing)
noDefaultsChecker = schema.FieldMap(allFields, alwaysOptional)
coerceOptional := schema.Defaults{}
maps.Copy(coerceOptional, alwaysOptional)
coerceOptional[UUIDKey] = schema.Omit
coerceOptional[NameKey] = schema.Omit
coerceOptional[TypeKey] = schema.Omit
coerceChecker = schema.FieldMap(allFields, coerceOptional)
}
// Coerce transforms the attributes from strings to their typed values.
func Coerce(attrs map[string]string) (map[string]any, error) {
initSchema.Do(initSchemas)
result, err := coerceChecker.Coerce(attrs, nil)
if err != nil {
return nil, errors.Trace(err)
}
return result.(map[string]any), nil
}
// Validate ensures that config is a valid configuration. If old is not nil,
// it holds the previous environment configuration for consideration when
// validating changes.
func Validate(_ctx context.Context, cfg, old *Config) error {
// Check that all other fields that have been specified are non-empty,
// unless they're allowed to be empty for backward compatibility,
for attr, val := range cfg.defined {
if !isEmpty(val) {
continue
}
if !allowEmpty(attr) {
return fmt.Errorf("empty %s in model configuration", attr)
}
}
modelName := cfg.asString(NameKey)
if modelName == "" {
return errors.New("empty name in model configuration")
}
if !names.IsValidModelName(modelName) {
return fmt.Errorf("%q is not a valid name: model names may only contain lowercase letters, digits and hyphens", modelName)
}
// Check that the agent version parses ok if set explicitly; otherwise leave
// it alone.
if v, ok := cfg.defined[AgentVersionKey].(string); ok {
if _, err := version.Parse(v); err != nil {
return fmt.Errorf("invalid agent version in model configuration: %q", v)
}
}
// If the logging config is set, make sure it is valid.
if v, ok := cfg.defined[LoggingConfigKey].(string); ok {
if _, err := loggo.ParseConfigString(v); err != nil {
return err
}
}
if uuid := cfg.UUID(); !utils.IsValidUUIDString(uuid) {
return errors.Errorf("uuid: expected UUID, got string(%q)", uuid)
}
// Ensure the resource tags have the expected k=v format.
if _, err := cfg.resourceTags(); err != nil {
return errors.Annotate(err, "validating resource tags")
}
if v, ok := cfg.defined[MaxActionResultsAge].(string); ok {
if _, err := time.ParseDuration(v); err != nil {
return errors.Annotate(err, "invalid max action age in model configuration")
}
}
if v, ok := cfg.defined[MaxActionResultsSize].(string); ok {
if _, err := utils.ParseSize(v); err != nil {
return errors.Annotate(err, "invalid max action size in model configuration")
}
}
if v, ok := cfg.defined[UpdateStatusHookInterval].(string); ok {
duration, err := time.ParseDuration(v)
if err != nil {
return errors.Annotate(err, "invalid update status hook interval in model configuration")
}
if duration < 1*time.Minute {
return errors.Annotatef(err, "update status hook frequency %v cannot be less than 1m", duration)
}
if duration > 60*time.Minute {
return errors.Annotatef(err, "update status hook frequency %v cannot be greater than 60m", duration)
}
}
if v, ok := cfg.defined[EgressSubnets].(string); ok && v != "" {
cidrs := strings.Split(v, ",")
for _, cidr := range cidrs {
if _, _, err := net.ParseCIDR(strings.TrimSpace(cidr)); err != nil {
return errors.Annotatef(err, "invalid egress subnet: %v", cidr)
}
if cidr == "0.0.0.0/0" {
return errors.Errorf("CIDR %q not allowed", cidr)
}
}
}
if raw, ok := cfg.defined[CloudInitUserDataKey].(string); ok && raw != "" {
userDataMap, err := ensureStringMaps(raw)
if err != nil {
return errors.Annotate(err, "cloudinit-userdata")
}
// if there packages, ensure they are strings
if packages, ok := userDataMap["packages"].([]any); ok {
for _, v := range packages {
checker := schema.String()
if _, err := checker.Coerce(v, nil); err != nil {
return errors.Annotate(err, "cloudinit-userdata: packages must be a list of strings")
}
}
}
// error if users is specified
if _, ok := userDataMap["users"]; ok {
return errors.New("cloudinit-userdata: users not allowed")
}
// error if runcmd is specified
if _, ok := userDataMap["runcmd"]; ok {
return errors.New("cloudinit-userdata: runcmd not allowed, use preruncmd or postruncmd instead")
}
// error if bootcmd is specified
if _, ok := userDataMap["bootcmd"]; ok {
return errors.New("cloudinit-userdata: bootcmd not allowed")
}
}
if raw, ok := cfg.defined[ContainerInheritPropertiesKey].(string); ok && raw != "" {
rawProperties := strings.Split(raw, ",")
propertySet := set.NewStrings()
for _, prop := range rawProperties {
propertySet.Add(strings.TrimSpace(prop))
}
whiteListSet := set.NewStrings("apt-primary", "apt-sources", "apt-security", "ca-certs")
diffSet := propertySet.Difference(whiteListSet)
if !diffSet.IsEmpty() {
return errors.Errorf("container-inherit-properties: %s not allowed", strings.Join(diffSet.SortedValues(), ", "))
}
}
if err := cfg.validateCharmHubURL(); err != nil {
return errors.Trace(err)
}
if err := cfg.validateDefaultSpace(); err != nil {
return errors.Trace(err)
}
if err := cfg.validateDefaultBase(); err != nil {
return errors.Trace(err)
}
if err := cfg.validateMode(); err != nil {
return errors.Trace(err)
}
if err := cfg.validateCIDRs(cfg.SSHAllow(), true); err != nil {
return errors.Trace(err)
}
if err := cfg.validateCIDRs(cfg.SAASIngressAllow(), false); err != nil {
return errors.Trace(err)
}
if err := cfg.validateNumProvisionWorkers(); err != nil {
return errors.Trace(err)
}
if err := cfg.validateNumContainerProvisionWorkers(); err != nil {
return errors.Trace(err)
}
if old != nil {
// Check the immutable config values. These can't change
for _, attr := range immutableAttributes {
oldv, ok := old.defined[attr]
if !ok {
continue
}
if newv := cfg.defined[attr]; newv != oldv {
return fmt.Errorf("cannot change %s from %#v to %#v", attr, oldv, newv)
}
}
if _, oldFound := old.AgentVersion(); oldFound {
if _, newFound := cfg.AgentVersion(); !newFound {
return errors.New("cannot clear agent-version")
}
}
if _, oldFound := old.CharmHubURL(); oldFound {
if _, newFound := cfg.CharmHubURL(); !newFound {
return errors.New("cannot clear charmhub-url")
}
}
// apt-mirror can't be set back to "" if it has previously been set.
if old.AptMirror() != "" && cfg.AptMirror() == "" {
return errors.New("cannot clear apt-mirror")
}
}
// The user shouldn't specify both old and new proxy values.
if cfg.HasLegacyProxy() && cfg.HasJujuProxy() {
return errors.New("cannot specify both legacy proxy values and juju proxy values")
}
return nil
}
// ensureStringMaps takes in a string and returns YAML in a map
// where all keys of any nested maps are strings.
func ensureStringMaps(in string) (map[string]any, error) {
userDataMap := make(map[string]any)
if err := yaml.Unmarshal([]byte(in), &userDataMap); err != nil {
return nil, errors.Annotate(err, "must be valid YAML")
}
out, err := utils.ConformYAML(userDataMap)
if err != nil {
return nil, err
}
return out.(map[string]any), nil
}
func isEmpty(val any) bool {
switch val := val.(type) {
case nil:
return true
case bool:
return false
case int:
// TODO(rog) fix this to return false when
// we can lose backward compatibility.
// https://bugs.launchpad.net/juju-core/+bug/1224492
return val == 0
case string:
return val == ""
case []any:
return len(val) == 0
case []string:
return len(val) == 0
case map[string]string:
return len(val) == 0
}
panic(fmt.Errorf("unexpected type %T in configuration", val))
}
// asString is a private helper method to keep the ugly string casting
// in once place. It returns the given named attribute as a string,
// returning "" if it isn't found.
func (c *Config) asString(name string) string {
value, _ := c.defined[name].(string)
return value
}
// mustString returns the named attribute as an string, panicking if
// it is not found or is empty.
func (c *Config) mustString(name string) string {
value, _ := c.defined[name].(string)
if value == "" {
panic(fmt.Errorf("empty value for %q found in configuration (type %T, val %v)", name, c.defined[name], c.defined[name]))
}
return value
}
// Type returns the model's cloud provider type.
func (c *Config) Type() string {
return c.mustString(TypeKey)
}
// Name returns the model name.
func (c *Config) Name() string {
return c.mustString(NameKey)
}
// UUID returns the uuid for the model.
func (c *Config) UUID() string {
return c.mustString(UUIDKey)
}
func (c *Config) validateDefaultSpace() error {
if raw, ok := c.defined[DefaultSpaceKey]; ok {
if v, ok := raw.(string); ok {
if v == "" {
return nil
}
if !names.IsValidSpace(v) {
return errors.NotValidf("default space name %q", raw)
}
} else {
return errors.NotValidf("type for default space name %v", raw)
}
}
return nil
}
// DefaultSpace returns the name of the space for to be used
// for endpoint bindings that are not explicitly set.
func (c *Config) DefaultSpace() string {
return c.asString(DefaultSpaceKey)
}
func (c *Config) validateDefaultBase() error {
defaultBase, configured := c.DefaultBase()
if !configured {
return nil
}
parsedBase, err := corebase.ParseBaseFromString(defaultBase)
if err != nil {
return errors.Annotatef(err, "invalid default base %q", defaultBase)
}
supported := corebase.WorkloadBases()
logger.Tracef(context.TODO(), "supported bases %s", supported)
var found bool
for _, supportedBase := range supported {
if parsedBase.IsCompatible(supportedBase) {
found = true
break
}
}
if !found {
return errors.NotSupportedf("base %q", parsedBase.DisplayString())
}
return nil
}
// DefaultBase returns the configured default base for the model, and whether
// the default base was explicitly configured on the environment.
func (c *Config) DefaultBase() (string, bool) {
s, ok := c.defined[DefaultBaseKey]
if !ok {
return "", false
}
switch s := s.(type) {
case string:
return s, s != ""
default:
logger.Errorf(context.TODO(), "invalid default-base: %q", s)
return "", false
}
}
// AuthorizedKeys returns the content for ssh's authorized_keys file.
func (c *Config) AuthorizedKeys() string {
value, _ := c.defined[AuthorizedKeysKey].(string)
return value
}
// ProxySSH returns a flag indicating whether SSH commands
// should be proxied through the API server.
func (c *Config) ProxySSH() bool {
value, _ := c.defined["proxy-ssh"].(bool)
return value
}
// NetBondReconfigureDelay returns the duration in seconds that should be
// passed to the bridge script when bridging bonded interfaces.
func (c *Config) NetBondReconfigureDelay() int {
value, _ := c.defined[NetBondReconfigureDelayKey].(int)
return value
}
// ContainerNetworkingMethod returns the method with which
// containers network should be set up.
func (c *Config) ContainerNetworkingMethod() coremodelconfig.ContainerNetworkingMethod {
return coremodelconfig.ContainerNetworkingMethod(c.asString(ContainerNetworkingMethodKey))
}
// LegacyProxySettings returns all four proxy settings; http, https, ftp, and no
// proxy. These are considered legacy as using these values will cause the environment
// to be updated, which has shown to not work in many cases. It is being kept to avoid
// breaking environments where it is sufficient.
func (c *Config) LegacyProxySettings() proxy.Settings {
return proxy.Settings{
Http: c.HTTPProxy(),
Https: c.HTTPSProxy(),
Ftp: c.FTPProxy(),
NoProxy: c.NoProxy(),
}
}
// HasLegacyProxy returns true if there is any proxy set using the old legacy proxy keys.
func (c *Config) HasLegacyProxy() bool {
// We exclude the no proxy value as it has default value.
return c.HTTPProxy() != "" ||
c.HTTPSProxy() != "" ||
c.FTPProxy() != ""
}
// HasJujuProxy returns true if there is any proxy set using the new juju-proxy keys.
func (c *Config) HasJujuProxy() bool {
// We exclude the no proxy value as it has default value.
return c.JujuHTTPProxy() != "" ||
c.JujuHTTPSProxy() != "" ||
c.JujuFTPProxy() != ""
}
// JujuProxySettings returns all four proxy settings that have been set using the
// juju- prefixed proxy settings. These values determine the current best practice
// for proxies.
func (c *Config) JujuProxySettings() proxy.Settings {
return proxy.Settings{
Http: c.JujuHTTPProxy(),
Https: c.JujuHTTPSProxy(),
Ftp: c.JujuFTPProxy(),
NoProxy: c.JujuNoProxy(),
}
}
// HTTPProxy returns the legacy http proxy for the model.
func (c *Config) HTTPProxy() string {
return c.asString(HTTPProxyKey)
}
// HTTPSProxy returns the legacy https proxy for the model.
func (c *Config) HTTPSProxy() string {
return c.asString(HTTPSProxyKey)
}
// FTPProxy returns the legacy ftp proxy for the model.
func (c *Config) FTPProxy() string {
return c.asString(FTPProxyKey)
}
// NoProxy returns the legacy 'no-proxy' for the model.
func (c *Config) NoProxy() string {
return c.asString(NoProxyKey)
}
// JujuHTTPProxy returns the http proxy for the model.
func (c *Config) JujuHTTPProxy() string {
return c.asString(JujuHTTPProxyKey)
}
// JujuHTTPSProxy returns the https proxy for the model.
func (c *Config) JujuHTTPSProxy() string {
return c.asString(JujuHTTPSProxyKey)
}
// JujuFTPProxy returns the ftp proxy for the model.
func (c *Config) JujuFTPProxy() string {
return c.asString(JujuFTPProxyKey)
}
// JujuNoProxy returns the 'no-proxy' for the model.
// This value can contain CIDR values.
func (c *Config) JujuNoProxy() string {
return c.asString(JujuNoProxyKey)
}
func (c *Config) getWithFallback(key, fallback1, fallback2 string) string {
value := c.asString(key)
if value == "" {
value = c.asString(fallback1)
}
if value == "" {
value = c.asString(fallback2)
}
return value
}
// addSchemeIfMissing adds a scheme to a URL if it is missing
func addSchemeIfMissing(defaultScheme string, url string) string {
if url != "" && !strings.Contains(url, "://") {
url = defaultScheme + "://" + url
}
return url
}
// AptProxySettings returns all three proxy settings; http, https and ftp.
func (c *Config) AptProxySettings() proxy.Settings {
return proxy.Settings{
Http: c.AptHTTPProxy(),
Https: c.AptHTTPSProxy(),
Ftp: c.AptFTPProxy(),
NoProxy: c.AptNoProxy(),
}
}
// AptHTTPProxy returns the apt http proxy for the model.
// Falls back to the default http-proxy if not specified.
func (c *Config) AptHTTPProxy() string {
return addSchemeIfMissing("http", c.getWithFallback(AptHTTPProxyKey, JujuHTTPProxyKey, HTTPProxyKey))
}
// AptHTTPSProxy returns the apt https proxy for the model.
// Falls back to the default https-proxy if not specified.
func (c *Config) AptHTTPSProxy() string {
return addSchemeIfMissing("https", c.getWithFallback(AptHTTPSProxyKey, JujuHTTPSProxyKey, HTTPSProxyKey))
}
// AptFTPProxy returns the apt ftp proxy for the model.
// Falls back to the default ftp-proxy if not specified.
func (c *Config) AptFTPProxy() string {
return addSchemeIfMissing("ftp", c.getWithFallback(AptFTPProxyKey, JujuFTPProxyKey, FTPProxyKey))
}
// AptNoProxy returns the 'apt-no-proxy' for the model.
func (c *Config) AptNoProxy() string {
value := c.asString(AptNoProxyKey)
if value == "" {
if c.HasLegacyProxy() {
value = c.asString(NoProxyKey)
} else {
value = c.asString(JujuNoProxyKey)
}
}
return value
}
// AptMirror sets the apt mirror for the model.
func (c *Config) AptMirror() string {
return c.asString(AptMirrorKey)
}
// SnapProxySettings returns the two proxy settings; http, and https.
func (c *Config) SnapProxySettings() proxy.Settings {
return proxy.Settings{
Http: c.SnapHTTPProxy(),
Https: c.SnapHTTPSProxy(),
}
}
// SnapHTTPProxy returns the snap http proxy for the model.
func (c *Config) SnapHTTPProxy() string {
return c.asString(SnapHTTPProxyKey)
}
// SnapHTTPSProxy returns the snap https proxy for the model.
func (c *Config) SnapHTTPSProxy() string {
return c.asString(SnapHTTPSProxyKey)
}
// SnapStoreProxy returns the snap store proxy for the model.
func (c *Config) SnapStoreProxy() string {
return c.asString(SnapStoreProxyKey)
}
// SnapStoreAssertions returns the snap store assertions for the model.
func (c *Config) SnapStoreAssertions() string {
return c.asString(SnapStoreAssertionsKey)
}
// SnapStoreProxyURL returns the snap store proxy URL for the model.
func (c *Config) SnapStoreProxyURL() string {
return c.asString(SnapStoreProxyURLKey)
}
// FirewallMode returns whether the firewall should
// manage ports per machine, globally, or not at all.
// (FwInstance, FwGlobal, or FwNone).
func (c *Config) FirewallMode() string {
return c.mustString("firewall-mode")
}
// AgentVersion returns the proposed version number for the agent tools,
// and whether it has been set. Once an environment is bootstrapped, this
// must always be valid.
func (c *Config) AgentVersion() (version.Number, bool) {
if v, ok := c.defined[AgentVersionKey].(string); ok {
n, err := version.Parse(v)
if err != nil {
panic(err) // We should have checked it earlier.
}
return n, true
}
return version.Zero, false
}
// AgentMetadataURL returns the URL that locates the agent tarballs and metadata,
// and whether it has been set.
func (c *Config) AgentMetadataURL() (string, bool) {
if url, ok := c.defined[AgentMetadataURLKey]; ok && url != "" {
return url.(string), true
}
return "", false
}
// ImageMetadataURL returns the URL at which the metadata used to locate image
// ids is located, and whether it has been set.
func (c *Config) ImageMetadataURL() (string, bool) {
if url, ok := c.defined[ImageMetadataURLKey]; ok && url != "" {
return url.(string), true
}
return "", false
}
// ImageMetadataDefaultsDisabled returns whether or not default image metadata
// sources are disabled. Useful for airgapped installations.
func (c *Config) ImageMetadataDefaultsDisabled() bool {
val, ok := c.defined[ImageMetadataDefaultsDisabledKey].(bool)
if !ok {
// defaults to false.
return false
}
return val
}
// ContainerImageMetadataURL returns the URL at which the metadata used to
// locate container OS image ids is located, and whether it has been set.
func (c *Config) ContainerImageMetadataURL() (string, bool) {
if url, ok := c.defined[ContainerImageMetadataURLKey]; ok && url != "" {
return url.(string), true
}
return "", false
}
// ContainerImageMetadataDefaultsDisabled returns whether or not default image metadata
// sources are disabled for containers. Useful for airgapped installations.
func (c *Config) ContainerImageMetadataDefaultsDisabled() bool {
val, ok := c.defined[ContainerImageMetadataDefaultsDisabledKey].(bool)
if !ok {
// defaults to false.
return false
}
return val
}
// Development returns whether the environment is in development mode.
func (c *Config) Development() bool {
value, _ := c.defined[DevelopmentKey].(bool)
return value
}
// EnableOSRefreshUpdate returns whether or not newly provisioned
// instances should run their respective OS's update capability.
func (c *Config) EnableOSRefreshUpdate() bool {
val, ok := c.defined[EnableOSRefreshUpdateKey].(bool)
if !ok {
return true
}
return val
}
// EnableOSUpgrade returns whether or not newly provisioned instances
// should run their respective OS's upgrade capability.
func (c *Config) EnableOSUpgrade() bool {
val, ok := c.defined[EnableOSUpgradeKey].(bool)
if !ok {
return true
}
return val
}
// SSLHostnameVerification returns whether the environment has requested
// SSL hostname verification to be enabled.
func (c *Config) SSLHostnameVerification() bool {
val, ok := c.defined["ssl-hostname-verification"].(bool)
if !ok {
return true
}
return val
}
// LoggingConfig returns the configuration string for the loggers.
func (c *Config) LoggingConfig() string {
return c.asString(LoggingConfigKey)
}
// BackupDir returns the configuration string for the temporary files
// backup.
func (c *Config) BackupDir() string {
return c.asString(BackupDirKey)
}
// AutomaticallyRetryHooks returns whether we should automatically retry hooks.
// By default this should be true.
func (c *Config) AutomaticallyRetryHooks() bool {
val, ok := c.defined["automatically-retry-hooks"].(bool)
if !ok {
return true
}
return val
}
// TransmitVendorMetrics returns whether the controller sends charm-collected metrics
// in this model for anonymized aggregate analytics. By default this should be true.
func (c *Config) TransmitVendorMetrics() bool {
val, ok := c.defined[TransmitVendorMetricsKey].(bool)
if !ok {
return true
}
return val
}
// ProvisionerHarvestMode reports the harvesting methodology the
// provisioner should take.
func (c *Config) ProvisionerHarvestMode() HarvestMode {
if v, ok := c.defined[ProvisionerHarvestModeKey].(string); ok {
if method, err := ParseHarvestMode(v); err != nil {
// This setting should have already been validated. Don't
// burden the caller with handling any errors.
panic(err)
} else {
return method
}
} else {
return HarvestDestroyed
}
}
// NumProvisionWorkers returns the number of provisioner workers to use.
func (c *Config) NumProvisionWorkers() int {
value, _ := c.defined[NumProvisionWorkersKey].(int)
return value
}
const (
MaxNumProvisionWorkers = 100
MaxNumContainerProvisionWorkers = 25
)
// validateNumProvisionWorkers ensures the number cannot be set to
// more than 100.
// TODO: (hml) 26-Feb-2024
// Once we can better link the controller config and the model config,
// allow the max value to be set in the controller config.
func (c *Config) validateNumProvisionWorkers() error {
value, ok := c.defined[NumProvisionWorkersKey].(int)
if ok && value > MaxNumProvisionWorkers {
return errors.Errorf("%s: must be less than %d", NumProvisionWorkersKey, MaxNumProvisionWorkers)
}
return nil
}
// NumContainerProvisionWorkers returns the number of container provisioner
// workers to use.
func (c *Config) NumContainerProvisionWorkers() int {
value, _ := c.defined[NumContainerProvisionWorkersKey].(int)
return value
}
// validateNumContainerProvisionWorkers ensures the number cannot be set to
// more than 25.
// TODO: (hml) 26-Feb-2024
// Once we can better link the controller config and the model config,
// allow the max value to be set in the controller config.
func (c *Config) validateNumContainerProvisionWorkers() error {
value, ok := c.defined[NumContainerProvisionWorkersKey].(int)
if ok && value > MaxNumContainerProvisionWorkers {
return errors.Errorf("%s: must be less than %d", NumContainerProvisionWorkersKey, MaxNumContainerProvisionWorkers)
}
return nil
}
// ImageStream returns the simplestreams stream
// used to identify which image ids to search
// when starting an instance.
func (c *Config) ImageStream() string {
v, _ := c.defined["image-stream"].(string)
if v != "" {
return v
}
return "released"
}
// AgentStream returns the simplestreams stream
// used to identify which tools to use when
// bootstrapping or upgrading an environment.
func (c *Config) AgentStream() string {
v, _ := c.defined[AgentStreamKey].(string)
if v != "" {
return v
}
return "released"
}
// ContainerImageStream returns the simplestreams stream used to identify which
// image ids to search when starting a container.
func (c *Config) ContainerImageStream() string {
v, _ := c.defined[ContainerImageStreamKey].(string)
if v != "" {
return v
}
return "released"
}
// CharmHubURL returns the URL to use for CharmHub API calls.
func (c *Config) CharmHubURL() (string, bool) {
if v, ok := c.defined[CharmHubURLKey].(string); ok && v != "" {
return v, true
}
return charmhub.DefaultServerURL, false
}
func (c *Config) validateCharmHubURL() error {
if v, ok := c.defined[CharmHubURLKey].(string); ok {
if v == "" {
return errors.NotValidf("charm-hub url")
}
if _, err := url.ParseRequestURI(v); err != nil {
return errors.NotValidf("charm-hub url %q", v)
}
}
return nil
}
const (
// RequiresPromptsMode is used to tell clients interacting with
// model that confirmation prompts are required when removing
// potentially important resources
RequiresPromptsMode = "requires-prompts"
// StrictMode is currently unused
// TODO(jack-w-shaw) remove this mode
StrictMode = "strict"
)
var allModes = set.NewStrings(RequiresPromptsMode, StrictMode)
// Mode returns a set of mode types for the configuration.
// Only one option exists at the moment ('requires-prompts')
func (c *Config) Mode() (set.Strings, bool) {
modes, ok := c.defined[ModeKey]
if !ok {
return set.NewStrings(), false
}
if m, ok := modes.(string); ok {
s := set.NewStrings()
for _, v := range strings.Split(strings.TrimSpace(m), ",") {
if v == "" {
continue
}
s.Add(strings.TrimSpace(v))
}
if s.Size() > 0 {
return s, true
}
}
return set.NewStrings(), false
}
func (c *Config) validateMode() error {
modes, _ := c.Mode()
difference := modes.Difference(allModes)
if !difference.IsEmpty() {
return errors.NotValidf("mode(s) %q", strings.Join(difference.SortedValues(), ", "))
}
return nil
}
// SSHAllow returns a slice of CIDRs from which machines in
// this model will accept connections to the SSH service
func (c *Config) SSHAllow() []string {
allowList, ok := c.defined[SSHAllowKey].(string)
if !ok {
return []string{"0.0.0.0/0", "::/0"}
}
if allowList == "" {
return []string{}
}
return strings.Split(allowList, ",")
}
// SAASIngressAllow returns a slice of CIDRs specifying what
// ingress can be applied to offers in this model
func (c *Config) SAASIngressAllow() []string {
allowList, ok := c.defined[SAASIngressAllowKey].(string)
if !ok {
return []string{"0.0.0.0/0"}
}
if allowList == "" {
return []string{}
}
return strings.Split(allowList, ",")
}
func (c *Config) validateCIDRs(cidrs []string, allowEmpty bool) error {
if len(cidrs) == 0 && !allowEmpty {
return errors.NotValidf("empty cidrs")
}
for _, cidr := range cidrs {
if _, _, err := net.ParseCIDR(cidr); err != nil {
return errors.NotValidf("cidr %q", cidr)
}
}
return nil
}
// DisableNetworkManagement reports whether Juju is allowed to
// configure and manage networking inside the environment.
func (c *Config) DisableNetworkManagement() (bool, bool) {
v, ok := c.defined["disable-network-management"].(bool)
return v, ok
}
// IgnoreMachineAddresses reports whether Juju will discover
// and store machine addresses on startup.
func (c *Config) IgnoreMachineAddresses() (bool, bool) {
v, ok := c.defined[IgnoreMachineAddresses].(bool)
return v, ok
}
// StorageDefaultBlockSource returns the default block storage
// source for the model.
func (c *Config) StorageDefaultBlockSource() (string, bool) {
bs := c.asString(StorageDefaultBlockSourceKey)
return bs, bs != ""
}
// StorageDefaultFilesystemSource returns the default filesystem
// storage source for the model.
func (c *Config) StorageDefaultFilesystemSource() (string, bool) {
bs := c.asString(StorageDefaultFilesystemSourceKey)
return bs, bs != ""
}
// ResourceTags returns a set of tags to set on environment resources
// that Juju creates and manages, if the provider supports them. These
// tags have no special meaning to Juju, but may be used for existing
// chargeback accounting schemes or other identification purposes.
func (c *Config) ResourceTags() (map[string]string, bool) {
tags, err := c.resourceTags()
if err != nil {
panic(err) // should be prevented by Validate
}
return tags, tags != nil
}
func (c *Config) resourceTags() (map[string]string, error) {
v, ok := c.defined[ResourceTagsKey].(map[string]string)
if !ok {
return nil, nil
}
for k := range v {
if strings.HasPrefix(k, tags.JujuTagPrefix) {
return nil, errors.Errorf("tag %q uses reserved prefix %q", k, tags.JujuTagPrefix)
}
}
return v, nil
}
func (c *Config) MaxActionResultsAge() time.Duration {
// Value has already been validated.
val, _ := time.ParseDuration(c.mustString(MaxActionResultsAge))
return val
}
func (c *Config) MaxActionResultsSizeMB() uint {
// Value has already been validated.
val, _ := utils.ParseSize(c.mustString(MaxActionResultsSize))
return uint(val)
}
// UpdateStatusHookInterval is how often to run the charm
// update-status hook.
func (c *Config) UpdateStatusHookInterval() time.Duration {
// Value has already been validated.
val, _ := time.ParseDuration(c.asString(UpdateStatusHookInterval))
return val
}
// EgressSubnets are the source addresses from which traffic from this model
// originates if the model is deployed such that NAT or similar is in use.
func (c *Config) EgressSubnets() []string {
raw := c.asString(EgressSubnets)
if raw == "" {
return []string{}
}
// Value has already been validated.
rawAddr := strings.Split(raw, ",")
result := make([]string, len(rawAddr))
for i, addr := range rawAddr {
result[i] = strings.TrimSpace(addr)
}
return result
}
// CloudInitUserData returns a copy of the raw user data attributes
// that were specified by the user.
func (c *Config) CloudInitUserData() map[string]any {
raw := c.asString(CloudInitUserDataKey)
if raw == "" {
return nil
}
// The raw data has already passed Validate()
conformingUserDataMap, _ := ensureStringMaps(raw)
return conformingUserDataMap
}
// ContainerInheritProperties returns a copy of the raw user data keys
// that were specified by the user.
func (c *Config) ContainerInheritProperties() string {
return c.asString(ContainerInheritPropertiesKey)
}
// LXDSnapChannel returns the channel to be used when installing LXD from a snap.
func (c *Config) LXDSnapChannel() string {
return c.asString(LXDSnapChannel)
}
// Telemetry returns whether telemetry is enabled for the model.
func (c *Config) Telemetry() bool {
value, _ := c.defined[DisableTelemetryKey].(bool)
return !value
}
// UnknownAttrs returns a copy of the raw configuration attributes
// that are supposedly specific to the environment type. They could
// also be wrong attributes, though. Only the specific environment
// implementation can tell.
func (c *Config) UnknownAttrs() map[string]any {
newAttrs := make(map[string]any)
for k, v := range c.unknown {
newAttrs[k] = v
}
return newAttrs
}
// AllAttrs returns a copy of the raw configuration attributes.
func (c *Config) AllAttrs() map[string]any {
allAttrs := c.UnknownAttrs()
for k, v := range c.defined {
allAttrs[k] = v
}
return allAttrs
}
// Remove returns a new configuration that has the attributes of c minus attrs.
func (c *Config) Remove(attrs []string) (*Config, error) {
defined := c.AllAttrs()
for _, k := range attrs {
delete(defined, k)
}
return New(NoDefaults, defined)
}
// Apply returns a new configuration that has the attributes of c plus attrs.
func (c *Config) Apply(attrs map[string]any) (*Config, error) {
defined := c.AllAttrs()
for k, v := range attrs {
defined[k] = v
}
return New(NoDefaults, defined)
}
// fields holds the validation schema fields derived from configSchema.
var fields = func() schema.Fields {
combinedSchema, err := Schema(nil)
if err != nil {
panic(err)
}
fs, _, err := combinedSchema.ValidationSchema()
if err != nil {
panic(err)
}
return fs
}
// alwaysOptional holds configuration defaults for attributes that may
// be unspecified even after a configuration has been created with all
// defaults filled out.
//
// This table is not definitive: it specifies those attributes which are
// optional when the config goes through its initial schema coercion,
// but some fields listed as optional here are actually mandatory
// with NoDefaults and are checked at the later Validate stage.
var alwaysOptional = schema.Defaults{
AgentVersionKey: schema.Omit,
AuthorizedKeysKey: schema.Omit,
ExtraInfoKey: schema.Omit,
// Storage related config.
// Environ providers will specify their own defaults.
StorageDefaultBlockSourceKey: schema.Omit,
StorageDefaultFilesystemSourceKey: schema.Omit,
"firewall-mode": schema.Omit,
SSHAllowKey: schema.Omit,
SAASIngressAllowKey: schema.Omit,
"logging-config": schema.Omit,
ProvisionerHarvestModeKey: schema.Omit,
NumProvisionWorkersKey: schema.Omit,
NumContainerProvisionWorkersKey: schema.Omit,
HTTPProxyKey: schema.Omit,
HTTPSProxyKey: schema.Omit,
FTPProxyKey: schema.Omit,
NoProxyKey: schema.Omit,
JujuHTTPProxyKey: schema.Omit,
JujuHTTPSProxyKey: schema.Omit,
JujuFTPProxyKey: schema.Omit,
JujuNoProxyKey: schema.Omit,
AptHTTPProxyKey: schema.Omit,
AptHTTPSProxyKey: schema.Omit,
AptFTPProxyKey: schema.Omit,
AptNoProxyKey: schema.Omit,
SnapHTTPProxyKey: schema.Omit,
SnapHTTPSProxyKey: schema.Omit,
SnapStoreProxyKey: schema.Omit,
SnapStoreAssertionsKey: schema.Omit,
SnapStoreProxyURLKey: schema.Omit,
AptMirrorKey: schema.Omit,
AgentStreamKey: schema.Omit,
ResourceTagsKey: schema.Omit,
"cloudimg-base-url": schema.Omit,
EnableOSRefreshUpdateKey: schema.Omit,
EnableOSUpgradeKey: schema.Omit,
DefaultBaseKey: schema.Omit,
DevelopmentKey: schema.Omit,
SSLHostnameVerificationKey: schema.Omit,
"proxy-ssh": schema.Omit,
"disable-network-management": schema.Omit,
IgnoreMachineAddresses: schema.Omit,
AutomaticallyRetryHooks: schema.Omit,
TestModeKey: schema.Omit,
DisableTelemetryKey: schema.Omit,
ModeKey: schema.Omit,
TransmitVendorMetricsKey: schema.Omit,
NetBondReconfigureDelayKey: schema.Omit,
ContainerNetworkingMethodKey: schema.Omit,
MaxActionResultsAge: schema.Omit,
MaxActionResultsSize: schema.Omit,
UpdateStatusHookInterval: schema.Omit,
EgressSubnets: schema.Omit,
CloudInitUserDataKey: schema.Omit,
ContainerInheritPropertiesKey: schema.Omit,
BackupDirKey: schema.Omit,
DefaultSpaceKey: schema.Omit,
LXDSnapChannel: schema.Omit,
CharmHubURLKey: schema.Omit,
AgentMetadataURLKey: schema.Omit,
ImageStreamKey: schema.Omit,
ImageMetadataURLKey: schema.Omit,
ImageMetadataDefaultsDisabledKey: schema.Omit,
ContainerImageStreamKey: schema.Omit,
ContainerImageMetadataURLKey: schema.Omit,
ContainerImageMetadataDefaultsDisabledKey: schema.Omit,
}
func allowEmpty(attr string) bool {
return alwaysOptional[attr] == "" || alwaysOptional[attr] == schema.Omit
}
// allDefaults returns a schema.Defaults that contains
// defaults to be used when creating a new config with
// UseDefaults.
func allDefaults() schema.Defaults {
d := schema.Defaults{}
configDefaults := ConfigDefaults()
for attr, val := range configDefaults {
d[attr] = val
}
for attr, val := range alwaysOptional {
if developerConfigValue(attr) {
continue
}
if _, ok := d[attr]; !ok {
d[attr] = val
}
}
return d
}
// immutableAttributes holds those attributes
// which are not allowed to change in the lifetime
// of an environment.
var immutableAttributes = []string{
NameKey,
TypeKey,
UUIDKey,
"firewall-mode",
CharmHubURLKey,
}
var (
initSchema sync.Once
allFields schema.Fields
defaultsWhenParsing schema.Defaults
withDefaultsChecker schema.Checker
noDefaultsChecker schema.Checker
coerceChecker schema.Checker
)
// ValidateUnknownAttrs checks the unknown attributes of the config against
// the supplied fields and defaults, and returns an error if any fails to
// validate. Unknown fields are warned about, but preserved, on the basis
// that they are reasonably likely to have been written by or for a version
// of juju that does recognise the fields, but that their presence is still
// anomalous to some degree and should be flagged (and that there is thereby
// a mechanism for observing fields that really are typos etc).
func (c *Config) ValidateUnknownAttrs(extrafields schema.Fields, defaults schema.Defaults) (map[string]any, error) {
attrs := c.UnknownAttrs()
checker := schema.FieldMap(extrafields, defaults)
coerced, err := checker.Coerce(attrs, nil)
if err != nil {
logger.Debugf(context.TODO(), "coercion failed attributes: %#v, checker: %#v, %v", attrs, checker, err)
return nil, err
}
result := coerced.(map[string]any)
for name, value := range attrs {
if extrafields[name] == nil {
// We know this name isn't in the global fields, or it wouldn't be
// an UnknownAttr, it also appears to not be in the extra fields
// that are provider specific. Check to see if an alternative
// spelling is in either the extra fields or the core fields.
if val, isString := value.(string); isString && val != "" {
// only warn about attributes with non-empty string values
altName := strings.Replace(name, "_", "-", -1)
if extrafields[altName] != nil || allFields[altName] != nil {
logger.Warningf(context.TODO(), "unknown config field %q, did you mean %q?", name, altName)
} else {
logger.Warningf(context.TODO(), "unknown config field %q", name)
}
}
result[name] = value
// The only allowed types for unknown attributes are string, int,
// float, bool and []any (which is really []string)
switch t := value.(type) {
case string:
continue
case int:
continue
case bool:
continue
case float32:
continue
case float64:
continue
case []any:
for _, val := range t {
if _, ok := val.(string); !ok {
return nil, errors.Errorf("%s: unknown type (%v)", name, value)
}
}
continue
default:
return nil, errors.Errorf("%s: unknown type (%q)", name, value)
}
}
}
return result, nil
}
func addIfNotEmpty(settings map[string]any, key, value string) {
if value != "" {
settings[key] = value
}
}
// ProxyConfigMap returns a map suitable to be applied to a Config to update
// proxy settings.
func ProxyConfigMap(proxySettings proxy.Settings) map[string]any {
settings := make(map[string]any)
addIfNotEmpty(settings, HTTPProxyKey, proxySettings.Http)
addIfNotEmpty(settings, HTTPSProxyKey, proxySettings.Https)
addIfNotEmpty(settings, FTPProxyKey, proxySettings.Ftp)
addIfNotEmpty(settings, NoProxyKey, proxySettings.NoProxy)
return settings
}
// AptProxyConfigMap returns a map suitable to be applied to a Config to update
// proxy settings.
func AptProxyConfigMap(proxySettings proxy.Settings) map[string]any {
settings := make(map[string]any)
addIfNotEmpty(settings, AptHTTPProxyKey, proxySettings.Http)
addIfNotEmpty(settings, AptHTTPSProxyKey, proxySettings.Https)
addIfNotEmpty(settings, AptFTPProxyKey, proxySettings.Ftp)
addIfNotEmpty(settings, AptNoProxyKey, proxySettings.NoProxy)
return settings
}
func developerConfigValue(name string) bool {
if !featureflag.Enabled(featureflag.DeveloperMode) {
switch name {
// Add developer-mode keys here.
}
}
return false
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package config
import (
"github.com/juju/errors"
"github.com/juju/juju/controller"
"github.com/juju/juju/internal/configschema"
)
// Schema returns a configuration schema that includes both
// the given extra fields and all the fields defined in this package.
// It returns an error if extra defines any fields defined in this
// package.
func Schema(extra configschema.Fields) (configschema.Fields, error) {
fields := make(configschema.Fields)
for name, field := range configSchema {
if developerConfigValue(name) {
continue
}
if controller.ControllerOnlyAttribute(name) {
return nil, errors.Errorf(
"config field %q clashes with controller config",
name,
)
}
fields[name] = field
}
for name, field := range extra {
if controller.ControllerOnlyAttribute(name) {
return nil, errors.Errorf(
"config field %q clashes with controller config",
name,
)
}
if _, ok := fields[name]; ok {
return nil, errors.Errorf(
"config field %q clashes with global config",
name,
)
}
fields[name] = field
}
return fields, nil
}
// configSchema holds information on all the fields defined by
// the config package.
var configSchema = configschema.Fields{
AgentMetadataURLKey: {
Description: "URL of private stream",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
AgentStreamKey: {
Description: `Version of Juju to use for deploy/upgrades.`,
Documentation: `
The agent-stream key specifies the “stream” to use when a Juju agent is to be
installed or upgraded. This setting reflects the general stability of the
software and defaults to ‘released’, indicating that only the latest stable
version is to be used.
To run the upcoming stable release (before it has passed the normal QA process)
you can set:
agent-stream: proposed
For testing purposes, you can use the latest unstable version by setting:
agent-stream: devel
The agent-version option specifies a “patch version” for the agent that is to be
installed on a new controller relative to the Juju client’s current major.minor
version (Juju uses a major.minor.patch numbering scheme).
For example, Juju 3.6.2 means major version 3, minor version 6, and patch
version 2. On a client system with this release of Juju installed, the machine
agent’s version for a newly-created controller would be the same. To specify a
patch version of 1 (instead of 2), the following would be run:
juju bootstrap aws --agent-version='3.6.1'
If a patch version is available that is greater than that of the client then it
can be targeted in this way:
juju bootstrap aws --auto-upgrade
`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
AgentVersionKey: {
Description: "The desired Juju agent version to use",
Type: configschema.Tstring,
Group: configschema.JujuGroup,
Immutable: true,
},
AptFTPProxyKey: {
// TODO document acceptable format
Description: "The APT FTP proxy for the model",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
AptHTTPProxyKey: {
// TODO document acceptable format
Description: "The APT HTTP proxy for the model",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
AptHTTPSProxyKey: {
// TODO document acceptable format
Description: "The APT HTTPS proxy for the model",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
AptNoProxyKey: {
Description: "List of domain addresses not to be proxied for APT (comma-separated)",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
AptMirrorKey: {
Description: "The APT mirror for the model",
Documentation: `
The APT packaging system is used to install and upgrade software on machines
provisioned in the model, and many charms also use APT to install software for
the applications they deploy. It is possible to set a specific mirror for the
APT packages to use, by setting ‘apt-mirror’:
juju model-config apt-mirror=http://archive.ubuntu.com/ubuntu/
To restore the default behaviour you would run:
juju model-config --reset apt-mirror
The apt-mirror option is often used to point to a local mirror.
`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
DefaultBaseKey: {
Description: "The default base image to use for deploying charms, will act like --base when deploying charms",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
// TODO (jack-w-shaw) integrate this into mode
DevelopmentKey: {
Description: "Whether the model is in development mode",
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
"disable-network-management": {
Description: `Whether the provider should control networks (on MAAS models, set to true for MAAS to control networks`,
Documentation: `
This key can only be used with MAAS models and should otherwise be set to
‘false’ (default) unless you want to take over network control from Juju because
you have unique and well-defined needs. Setting this to ‘true’ with MAAS gives
you the same behaviour with containers as you already have with other providers:
one machine-local address on a single network interface, bridged to the default
bridge.
`,
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
IgnoreMachineAddresses: {
Description: "Whether the machine worker should discover machine addresses on startup",
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
EnableOSRefreshUpdateKey: {
Description: `Whether newly provisioned instances should run their respective OS's update capability.`,
Documentation: `
When Juju provisions a machine, its default behaviour is to upgrade existing
packages to their latest version. If your OS images are fresh and/or your
deployed applications do not require the latest package versions, you can
disable upgrades in order to provision machines faster.
Two boolean configuration options are available to disable APT updates and
upgrades: enable-os-refresh-update (apt update) and enable-os-upgrade (apt
upgrade), respectively.
enable-os-refresh-update: false
enable-os-upgrade: false
You may also want to just update the package list to ensure a charm has the
latest software available to it by disabling upgrades but enabling updates.
`,
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
EnableOSUpgradeKey: {
Description: `Whether newly provisioned instances should run their respective OS's upgrade capability.`,
Documentation: `
When Juju provisions a machine, its default behaviour is to upgrade existing
packages to their latest version. If your OS images are fresh and/or your
deployed applications do not require the latest package versions, you can
disable upgrades in order to provision machines faster.
Two Boolean configuration options are available to disable APT updates and
upgrades: enable-os-refresh-update (apt update) and enable-os-upgrade (apt
upgrade), respectively.
enable-os-refresh-update: false
enable-os-upgrade: false
You may also want to just update the package list to ensure a charm has the
latest software available to it by disabling upgrades but enabling updates.
`,
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
ExtraInfoKey: {
Description: "Arbitrary user specified string data that is stored against the model.",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
"firewall-mode": {
Description: `The mode to use for network firewalling.`,
Documentation: `
- 'instance' requests the use of an individual firewall per instance.
- 'global' uses a single firewall for all instances (access
for a network port is enabled to one instance if any instance requires
that port).
- 'none' requests that no firewalling should be performed
inside the model. It's useful for clouds without support for either
global or per instance security groups.`,
Type: configschema.Tstring,
Values: []interface{}{FwInstance, FwGlobal, FwNone},
Immutable: true,
Group: configschema.EnvironGroup,
},
FTPProxyKey: {
Description: "The FTP proxy value to configure on instances, in the `FTP_PROXY` environment variable",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
HTTPProxyKey: {
Description: "The HTTP proxy value to configure on instances, in the `HTTP_PROXY` environment variable",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
HTTPSProxyKey: {
Description: "The HTTPS proxy value to configure on instances, in the `HTTPS_PROXY` environment variable",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
NoProxyKey: {
Description: "List of domain addresses not to be proxied (comma-separated)",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
JujuFTPProxyKey: {
Description: "The FTP proxy value to pass to charms in the `JUJU_CHARM_FTP_PROXY` environment variable",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
JujuHTTPProxyKey: {
Description: "The HTTP proxy value to pass to charms in the `JUJU_CHARM_HTTP_PROXY` environment variable",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
JujuHTTPSProxyKey: {
Description: "The HTTPS proxy value to pass to charms in the `JUJU_CHARM_HTTPS_PROXY` environment variable",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
JujuNoProxyKey: {
Description: "List of domain addresses not to be proxied (comma-separated), may contain CIDRs. Passed to charms in the `JUJU_CHARM_NO_PROXY` environment variable",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
SnapHTTPProxyKey: {
Description: "The HTTP proxy value for installing snaps",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
SnapHTTPSProxyKey: {
Description: "The HTTPS proxy value for installing snaps",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
SnapStoreProxyKey: {
Description: "The snap store proxy for installing snaps",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
SnapStoreAssertionsKey: {
Description: "The assertions for the defined snap store proxy",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
SnapStoreProxyURLKey: {
Description: "The URL for the defined snap store proxy",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
ImageMetadataURLKey: {
Description: "The URL at which the metadata used to locate OS image ids is located",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
ImageStreamKey: {
Description: `The simplestreams stream used to identify which image ids to search when starting an instance.`,
Documentation: `
Juju, by default, uses the slow-changing ‘released’ images when provisioning
machines. However, the image-stream option can be set to ‘daily’ to use more
up-to-date images, thus shortening the time it takes to perform APT package
upgrades.
`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
ImageMetadataDefaultsDisabledKey: {
Description: `Whether default simplestreams sources are used for image metadata.`,
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
ContainerImageMetadataURLKey: {
Description: "The URL at which the metadata used to locate container OS image ids is located",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
ContainerImageStreamKey: {
Description: `The simplestreams stream used to identify which image ids to search when starting a container.`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
ContainerImageMetadataDefaultsDisabledKey: {
Description: `Whether default simplestreams sources are used for image metadata with containers.`,
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
"logging-config": {
Description: `The configuration string to use when configuring Juju agent logging`,
Documentation: "The logging config can be set to a (list of semicolon-separated)\n" +
"`<filter>=<verbosity level>` pairs, where `<filter>` can be any of the following:\n" +
" - `<root>` - matches all machine agent logs\n" +
" - `unit` - matches all unit agent logs\n" +
" - a module name, e.g. `juju.worker.apiserver`\n" +
" A module represents a single component of Juju, e.g. a worker. Generally,\n" +
" modules correspond one-to-one with Go packages in the Juju source tree. The\n" +
" module name is the value passed to `loggo.GetLogger` or\n" +
" `loggo.GetLoggerWithLabels`.\n" +
"\n" +
" Modules have a nested tree structure - for example, the `juju.api` module\n" +
" includes submodules `juju.api.application`, `juju.api.cloud`, etc. `<root>` is the\n" +
" root of this module tree.\n" +
"\n" +
" - a label, e.g. `#charmhub`\n" +
" Labels cut across the module tree, grouping various modules which deal with\n" +
" a certain feature or information flow. For example, the `#charmhub` label\n" +
" includes all modules involved in making a request to Charmhub.\n" +
"\n" +
"The currently supported labels are:\n" +
"| Label | Description |\n" +
"|-|-|\n" +
"| `#http` | HTTP requests |\n" +
"| `#metrics` | Metric outputs - use as a fallback when Prometheus isn't available |\n" +
"| `#charmhub` | Charmhub client and callers. |\n" +
"| `#cmr` | Cross model relations |\n" +
"| `#cmr-auth` | Authentication for cross model relations |\n" +
"| `#secrets` | Juju secrets |\n" +
"\n" +
"and where <verbosity level> can be, in decreasing order of severity:\n" +
"\n" +
"| Level | Description |\n" +
"|-|-|\n" +
"| `CRITICAL` | Indicates a severe failure which could bring down the system. |\n" +
"| `ERROR` | Indicates failure to complete a routine operation.\n" +
"| `WARNING` | Indicates something is not as expected, but this is not necessarily going to cause an error.\n" +
"| `INFO` | A regular log message intended for the user.\n" +
"| `DEBUG` | Information intended to assist developers in debugging.\n" +
"| `TRACE` | The lowest level - includes the full details of input args, return values, HTTP requests sent/received, etc. |\n" +
"\n" +
"When you set `logging-config` to `module=level`, then Juju saves that module's logs\n" +
"for the given severity level **and above.** For example, setting `logging-config`\n" +
"to `juju.worker.uniter=WARNING` will capture all `CRITICAL`, `ERROR` and `WARNING` logs\n" +
"for the uniter, but discard logs for lower severity levels (`INFO`, `DEBUG`, `TRACE`).\n" +
"\n" +
"**Examples:**\n" +
"\n" +
"To collect debug logs for the dbaccessor worker:\n" +
"\n" +
" juju model-config -m controller logging-config=\"juju.worker.dbaccessor=DEBUG\"\n" +
"\n" +
"To collect debug logs for the mysql/0 unit:\n" +
"\n" +
" juju model-config -m foo logging-config=\"unit.mysql/0=DEBUG\"\n" +
"\n" +
"To collect trace logs for Charmhub requests:\n" +
"\n" +
" juju model-config -m controller logging-config=\"#charmhub=TRACE\"\n" +
"\n" +
"To see what API requests are being made:\n" +
"\n" +
" juju model-config -m controller logging-config=\"juju.apiserver=DEBUG\"\n" +
"\n" +
"To view details about each API request:\n" +
"\n" +
" juju model-config -m controller logging-config=\"juju.apiserver=TRACE\"\n",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
NameKey: {
Description: "The name of the current model",
Type: configschema.Tstring,
Mandatory: true,
Immutable: true,
Group: configschema.EnvironGroup,
},
ProvisionerHarvestModeKey: {
// default: destroyed, but also depends on current setting of ProvisionerSafeModeKey
Description: `What to do with unknown machines (default destroyed)`,
Documentation: `
Juju keeps state on the running model and it can harvest (remove) machines which it deems are no longer required. This can help reduce running costs and keep the model tidy. Harvesting is guided by what "harvesting mode" has been set.
A Juju machine can be in one of four states:
- **Alive:** The machine is running and being used.
- **Dying:** The machine is in the process of being terminated by Juju, but hasn't yet finished.
- **Dead:** The machine has been successfully brought down by Juju, but is still being tracked for removal.
- **Unknown:** The machine exists, but Juju knows nothing about it.
Juju can be in one of several harvesting modes, in order of most conservative to most aggressive:
- **none:** Machines will never be harvested. This is a good choice if machines are managed via a process outside of Juju.
- **destroyed:** Machines will be harvested if i) Juju "knows" about them and
ii) they are 'Dead'. - **unknown:** Machines will be harvested if Juju does not "know" about them ('Unknown' state). Use with caution in a mixed environment or one which may contain multiple instances of Juju. - **all:** Machines will be harvested if Juju considers them to be 'destroyed' or 'unknown'.
The default mode is **destroyed**.
Below, the harvest mode key for the current model is set to 'none':
juju model-config provisioner-harvest-mode=none
`,
Type: configschema.Tstring,
Values: []interface{}{"all", "none", "unknown", "destroyed"},
Group: configschema.EnvironGroup,
},
NumProvisionWorkersKey: {
Description: "The number of provisioning workers to use per model",
Type: configschema.Tint,
Group: configschema.EnvironGroup,
},
NumContainerProvisionWorkersKey: {
Description: "The number of container provisioning workers to use per machine",
Type: configschema.Tint,
Group: configschema.EnvironGroup,
},
"proxy-ssh": {
// default: true
Description: `Whether SSH commands should be proxied through the API server`,
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
ResourceTagsKey: {
Description: "resource tags",
Type: configschema.Tattrs,
Group: configschema.EnvironGroup,
},
SSLHostnameVerificationKey: {
Description: "Whether SSL hostname verification is enabled (default true)",
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
StorageDefaultBlockSourceKey: {
Description: "The default block storage source for the model",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
StorageDefaultFilesystemSourceKey: {
Description: "The default filesystem storage source for the model",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
TestModeKey: {
Description: `Whether the model is intended for testing.
If true, accessing the charm store does not affect statistical
data of the store. (default false)`,
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
DisableTelemetryKey: {
Description: `Disable telemetry reporting of model information`,
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
ModeKey: {
Description: `Mode is a comma-separated list which sets the
mode the model should run in. So far only one is implemented
- If 'requires-prompts' is present, clients will ask for confirmation before removing
potentially valuable resources.
(default "")`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
SSHAllowKey: {
Description: `SSH allowlist is a comma-separated list of CIDRs from
which machines in this model will accept connections to the SSH service.
Currently only the aws & openstack providers support ssh-allow`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
SAASIngressAllowKey: {
Description: `Application-offer ingress allowlist is a comma-separated list of
CIDRs specifying what ingress can be applied to offers in this model.`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
TypeKey: {
Description: "Type of model, e.g. local, ec2",
Type: configschema.Tstring,
Mandatory: true,
Immutable: true,
Group: configschema.EnvironGroup,
},
UUIDKey: {
Description: "The UUID of the model",
Type: configschema.Tstring,
Group: configschema.JujuGroup,
Immutable: true,
},
AutomaticallyRetryHooks: {
Description: `Determines whether the uniter should automatically retry failed hooks`,
Documentation: `
Juju retries failed hooks automatically using an exponential backoff algorithm.
They will be retried after 5, 10, 20, 40 seconds up to a period of 5 minutes,
and then every 5 minutes. The logic behind this is that some hook errors are
caused by timing issues or the temporary unavailability of other applications -
automatic retry enables the Juju model to heal itself without troubling the
user.
However, in some circumstances, such as debugging charms, this behaviour can be
distracting and unwelcome. For this reason, it is possible to set the
automatically-retry-hooks option to ‘false’ to disable this behaviour. In this
case, users will have to manually retry any hook which fails, using the command
above, as with earlier versions of Juju.
Even with the automatic retry enabled, it is still possible to use retry
manually using:
juju resolved unit-name/#
`,
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
TransmitVendorMetricsKey: {
Description: "Determines whether metrics declared by charms deployed into this model are sent for anonymized aggregate analytics",
Type: configschema.Tbool,
Group: configschema.EnvironGroup,
},
NetBondReconfigureDelayKey: {
Description: "The amount of time in seconds to sleep between ifdown and ifup when bridging",
Type: configschema.Tint,
Group: configschema.EnvironGroup,
},
ContainerNetworkingMethodKey: {
Description: `Method of container networking setup - one of "provider", "local", or "" (auto-configure).`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
MaxActionResultsAge: {
Description: "The maximum age for action entries before they are pruned, in human-readable time format",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
MaxActionResultsSize: {
Description: "The maximum size for the action collection, in human-readable memory format",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
UpdateStatusHookInterval: {
Description: "How often to run the charm update-status hook, in human-readable time format (default 5m, range 1-60m)",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
EgressSubnets: {
Description: "Source address(es) for traffic originating from this model",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
CloudInitUserDataKey: {
Description: `Cloud-init user-data (in yaml format) to be added to userdata for new machines created in this model`,
Documentation: `
The cloudinit-userdata allows the user to provide additional cloudinit data to
be included in the cloudinit data created by Juju.
Specifying a key will overwrite what juju puts in the cloudinit file with the
following caveats:
1. users and bootcmd keys will cause an error
2. The packages key will be appended to the packages listed by juju
3. The runcmds key will cause an error. You can specify preruncmd and
postruncmd keys to prepend and append the runcmd created by Juju.
**Use cases**
- setting a default locale for deployments that wish to use their own locale settings
- adding custom CA certificates for models that are sitting behind an HTTPS proxy
- adding a private apt mirror to enable private packages to be installed
- add SSH fingerprints to a deny list to prevent them from being printed to the console for security-focused deployments
**Background**
Juju uses cloud-init to customise instances once they have been provisioned by
the cloud. The cloudinit-userdata model configuration setting (model config)
allows you to tweak what happens to machines when they are created up via the
“user data” feature.
From the website:
> Cloud images are operating system templates and every instance starts out as
an identical clone of every other instance. It is the user data that gives
every cloud instance its personality and cloud-init is the tool that applies
user data to your instances automatically.
**How to provide custom user data to cloudinit**
Create a file, cloudinit-userdata.yaml, which starts with the cloudinit-userdata
key and data you wish to include in the cloudinit file. Note: juju reads the
value as a string, though formatted as YAML.
Template cloudinit-userdata.yaml:
cloudinit-userdata: |
<key>: <value>
<key>: <value>
Provide the path your file to the model-config command:
juju model-config --file cloudinit-userdata.yaml
**How to read the current setting**
To read the current value, provide the cloudinit-userdata key to the
model-config command as a command-line parameter. Adding the --format yaml
option ensures that it is properly formatted.
juju model-config cloudinit-userdata --format yaml
Sample output:
cloudinit-userdata: |
packages:
- 'python-keystoneclient'
- 'python-glanceclient'
**How to clear the current custom user data**
Use the --reset option to the model-config command to clear anything that has
been previously set.
juju model-config --reset cloudinit-userdata
**Known issues**
- custom cloudinit-userdata must be passed via file, not as options on the command
line (like the config command)
`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
ContainerInheritPropertiesKey: {
Description: `List of properties to be copied from the host machine to new containers created in this model (comma-separated)`,
Documentation: `
The container-inherit-properties key allows for a limited set of parameters
enabled on a Juju machine to be inherited by any hosted containers (KVM guests
or LXD containers). The machine and container must be running the same series.
This key is only supported by the MAAS provider.
The parameters are:
- apt-primary
- apt-security
- apt-sources
- ca-certs
For MAAS v.2.5 or greater the parameters are:
- apt-sources
- ca-certs
For example:
juju model-config container-inherit-properties="ca-certs, apt-sources"
`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
BackupDirKey: {
Description: "Directory used to store the backup working directory",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
DefaultSpaceKey: {
Description: "The default network space used for application endpoints in this model",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
LXDSnapChannel: {
Description: "The channel to use when installing LXD from a snap (cosmic and later)",
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
CharmHubURLKey: {
Description: `The url for CharmHub API calls`,
Type: configschema.Tstring,
Group: configschema.EnvironGroup,
},
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package config
import (
"context"
"github.com/juju/schema"
)
// These constants define named sources of model config attributes.
// After a call to UpdateModelConfig, any attributes added/removed
// will have a source of JujuModelConfigSource.
const (
// JujuDefaultSource is used to label model config attributes that
// come from hard coded defaults.
JujuDefaultSource = "default"
// JujuControllerSource is used to label model config attributes that
// come from those associated with the controller.
JujuControllerSource = "controller"
// JujuRegionSource is used to label model config attributes that come from
// those associated with the region where the model is
// running.
JujuRegionSource = "region"
// JujuModelConfigSource is used to label model config attributes that
// have been explicitly set by the user.
JujuModelConfigSource = "model"
)
// ConfigValue encapsulates a configuration
// value and its source.
type ConfigValue struct {
// Value is the configuration value.
Value interface{}
// Source is the name of the inherited config
// source from where the value originates.
Source string
}
// ConfigValues is a map of configuration values keyed by attribute name.
type ConfigValues map[string]ConfigValue
// AllAttrs returns just the attribute values from the config.
func (c ConfigValues) AllAttrs() map[string]interface{} {
result := make(map[string]interface{})
for attr, val := range c {
result[attr] = val
}
return result
}
// ConfigSchemaSourceGetter is a type for getting a ConfigSchemaSource.
type ConfigSchemaSourceGetter func(context.Context, string) (ConfigSchemaSource, error)
// ConfigSchemaSource instances provide information on config attributes
// and the default attribute values.
type ConfigSchemaSource interface {
// ConfigSchema returns extra config attributes specific
// to this provider only.
ConfigSchema() schema.Fields
// ConfigDefaults returns the default values for the
// provider specific config attributes.
ConfigDefaults() schema.Defaults
}
// ModelDefaultAttributes is a map of configuration values to a list of possible
// values.
type ModelDefaultAttributes map[string]AttributeDefaultValues
// AttributeDefaultValues represents all the default values at each level for a given
// setting.
type AttributeDefaultValues struct {
// Default and Controller represent the values as set at those levels.
Default interface{} `json:"default,omitempty" yaml:"default,omitempty"`
Controller interface{} `json:"controller,omitempty" yaml:"controller,omitempty"`
// Regions is a slice of Region representing the values as set in each
// region.
Regions []RegionDefaultValue `json:"regions,omitempty" yaml:"regions,omitempty"`
}
// RegionDefaultValue holds the region information for each region in DefaultSetting.
type RegionDefaultValue struct {
// Name represents the region name for this specific setting.
Name string `json:"name" yaml:"name"`
// Value is the value of the setting this represents in the named region.
Value interface{} `json:"value" yaml:"value"`
}
// Copyright 2016 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package config
import (
"context"
"fmt"
"github.com/juju/errors"
"github.com/juju/juju/controller"
)
// AggregateValidator is a Validator that will run zero or more validators,
// making sure that all succeed or returning the first error encountered.
type AggregateValidator struct {
// Validators is the slice of Validator's to run as part of the aggregate
// check.
Validators []Validator
}
// Validator is an interface for validating model configuration.
type Validator interface {
// Validate ensures that cfg is a valid configuration.
// If old is not nil, Validate should use it to determine
// whether a configuration change is valid.
//
// TODO(axw) Validate should just return an error. We should
// use a separate mechanism for updating config.
Validate(ctx context.Context, cfg, old *Config) (valid *Config, _ error)
}
// ValidatorFunc is utility type for declaring funcs that implement the
// Validator interface.
type ValidatorFunc func(ctx context.Context, cfg, old *Config) (*Config, error)
// ValidationError represents a specific error that has occurred were validating
// Config. It allows for the placement of one or more attributes with a reason
// as to why either their keys or values are not valid in the current context.
type ValidationError struct {
// InvalidAttrs is the list of attributes in the config that where invalid.
InvalidAttrs []string
// Reason is the human readable message for why the attributes are invalid.
Reason string
}
var (
// disallowedModelConfigAttrs is the set of config attributes that should
// not be allowed to appear in model config.
disallowedModelConfigAttrs = [...]string{
AuthorizedKeysKey,
AgentVersionKey,
AdminSecretKey,
CAPrivateKeyKey,
}
)
// Is implements errors.Is interface. We implement is so that the
// ValidationError also satisfies NotValid.
func (v *ValidationError) Is(err error) bool {
return err == errors.NotValid || err == v
}
// Error implements Error interface.
func (v *ValidationError) Error() string {
return fmt.Sprintf("config attributes %v not valid because %s", v.InvalidAttrs, v.Reason)
}
// Validate implements Validator validate interface. This func will run all the
// validators in the aggregate till either a validator errors or there are no
// more validators to run. The returned config from each validator is passed
// into the subsequent validator.
func (a *AggregateValidator) Validate(ctx context.Context, cfg, old *Config) (*Config, error) {
var err error
for i, validator := range a.Validators {
cfg, err = validator.Validate(ctx, cfg, old)
if err != nil {
return cfg, fmt.Errorf("config validator %d failed: %w", i, err)
}
}
return cfg, nil
}
// Validate implements the Validator interface.
func (v ValidatorFunc) Validate(ctx context.Context, cfg, old *Config) (*Config, error) {
return v(ctx, cfg, old)
}
// NoControllerAttributesValidator implements a validator that asserts if the
// supplied config contains any controller specific configuration attributes. A
// ValidationError is returned if the config contains controller attributes.
func NoControllerAttributesValidator() Validator {
return ValidatorFunc(func(ctx context.Context, cfg, _ *Config) (*Config, error) {
invalidKeysError := ValidationError{
InvalidAttrs: []string{},
Reason: "controller only attributes not allowed",
}
allAttrs := cfg.AllAttrs()
for _, attr := range controller.ControllerOnlyConfigAttributes {
if _, has := allAttrs[attr]; has {
invalidKeysError.InvalidAttrs = append(invalidKeysError.InvalidAttrs, attr)
}
}
if len(invalidKeysError.InvalidAttrs) != 0 {
return cfg, &invalidKeysError
}
return cfg, nil
})
}
// ModelValidator returns a validator that is suitable for validating model
// config. Any attributes found that are not supported in Model Configuration
// are returned in a ValidationError.
func ModelValidator() Validator {
modelConfigValidator := ValidatorFunc(func(ctx context.Context, cfg, _ *Config) (*Config, error) {
invalidKeysError := ValidationError{
InvalidAttrs: []string{},
Reason: "attributes not allowed in model config",
}
allAttrs := cfg.AllAttrs()
for _, attr := range disallowedModelConfigAttrs {
if _, has := allAttrs[attr]; has {
invalidKeysError.InvalidAttrs = append(invalidKeysError.InvalidAttrs, attr)
}
}
if len(invalidKeysError.InvalidAttrs) != 0 {
return cfg, &invalidKeysError
}
return cfg, nil
})
return &AggregateValidator{
Validators: []Validator{
modelConfigValidator,
NoControllerAttributesValidator(),
},
}
}
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package tags
import "github.com/juju/names/v6"
const (
// JujuTagPrefix is the prefix for Juju-managed tags.
JujuTagPrefix = "juju-"
// JujuModel is the tag name used for identifying the
// Juju model a resource is part of.
JujuModel = JujuTagPrefix + "model-uuid"
// JujuController is the tag name used for identifying the
// Juju controller that manages a resource.
JujuController = JujuTagPrefix + "controller-uuid"
// JujuIsController is the tag name used for determining
// whether a machine instance is a controller or not.
JujuIsController = JujuTagPrefix + "is-controller"
// JujuUnitsDeployed is the tag name used for identifying
// the units deployed to a machine instance. The value is
// a space-separated list of the unit names.
JujuUnitsDeployed = JujuTagPrefix + "units-deployed"
// JujuStorageInstance is the tag name used for identifying
// the Juju storage instance that an IaaS storage resource
// is assigned to.
JujuStorageInstance = JujuTagPrefix + "storage-instance"
// JujuStorageOwner is the tag name used for identifying
// the application or unit that owns the Juju storage instance
// that an IaaS storage resource is assigned to.
JujuStorageOwner = JujuTagPrefix + "storage-owner"
// JujuMachine is the tag name used for identifying
// the model and machine id corresponding to the
// provisioned machine instance.
JujuMachine = JujuTagPrefix + "machine-id"
)
// ResourceTagger is an interface that can provide resource tags.
type ResourceTagger interface {
// ResourceTags returns a set of resource tags, and a
// flag indicating whether or not any resource tags are
// available.
ResourceTags() (map[string]string, bool)
}
// ResourceTags returns tags to set on an infrastructure resource
// for the specified Juju environment.
func ResourceTags(modelTag names.ModelTag, controllerTag names.ControllerTag, taggers ...ResourceTagger) map[string]string {
allTags := make(map[string]string)
for _, tagger := range taggers {
tags, ok := tagger.ResourceTags()
if !ok {
continue
}
for k, v := range tags {
allTags[k] = v
}
}
// Some resources might only be for the controller.
if modelTag.Id() != "" {
allTags[JujuModel] = modelTag.Id()
}
allTags[JujuController] = controllerTag.Id()
return allTags
}
// Copyright 2011-2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"fmt"
"io"
"regexp"
"strings"
"github.com/juju/errors"
gjs "github.com/juju/gojsonschema"
"gopkg.in/yaml.v2"
)
var prohibitedSchemaKeys = map[string]bool{"$ref": true, "$schema": true}
var actionNameRule = regexp.MustCompile("^[a-z0-9](?:[a-z0-9-]*[a-z0-9])?$")
// Export `actionNameRule` variable to different contexts.
func GetActionNameRule() *regexp.Regexp {
return actionNameRule
}
// Actions defines the available actions for the charm. Additional params
// may be added as metadata at a future time (e.g. version.)
type Actions struct {
ActionSpecs map[string]ActionSpec `yaml:"actions,omitempty"`
}
// Build this out further if it becomes necessary.
func NewActions() *Actions {
return &Actions{}
}
// ActionSpec is a definition of the parameters and traits of an Action.
// The Params map is expected to conform to JSON-Schema Draft 4 as defined at
// http://json-schema.org/draft-04/schema# (see http://json-schema.org/latest/json-schema-core.html)
type ActionSpec struct {
Description string
Parallel bool
ExecutionGroup string
Params map[string]interface{}
}
// ValidateParams validates the passed params map against the given ActionSpec
// and returns any error encountered.
// Usage:
//
// err := ch.Actions().ActionSpecs["snapshot"].ValidateParams(someMap)
func (spec *ActionSpec) ValidateParams(params map[string]interface{}) error {
// Load the schema from the Charm.
specLoader := gjs.NewGoLoader(spec.Params)
schema, err := gjs.NewSchema(specLoader)
if err != nil {
return err
}
// Load the params as a document to validate.
// If an empty map was passed, we need an empty map to validate against.
p := map[string]interface{}{}
if len(params) > 0 {
p = params
}
docLoader := gjs.NewGoLoader(p)
results, err := schema.Validate(docLoader)
if err != nil {
return err
}
if results.Valid() {
return nil
}
// Handle any errors generated by the Validate().
var errorStrings []string
for _, validationError := range results.Errors() {
errorStrings = append(errorStrings, validationError.String())
}
return errors.Errorf("validation failed: %s", strings.Join(errorStrings, "; "))
}
// InsertDefaults inserts the schema's default values in target using
// github.com/juju/gojsonschema. If a nil target is received, an empty map
// will be created as the target. The target is then mutated to include the
// defaults.
//
// The returned map will be the transformed or created target map.
func (spec *ActionSpec) InsertDefaults(target map[string]interface{}) (map[string]interface{}, error) {
specLoader := gjs.NewGoLoader(spec.Params)
schema, err := gjs.NewSchema(specLoader)
if err != nil {
return target, err
}
return schema.InsertDefaults(target)
}
// ReadActionsYaml builds an Actions spec from a charm's actions.yaml.
func ReadActionsYaml(charmName string, r io.Reader) (*Actions, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
result := &Actions{
ActionSpecs: map[string]ActionSpec{},
}
var unmarshaledActions map[string]map[string]interface{}
if err := yaml.Unmarshal(data, &unmarshaledActions); err != nil {
return nil, err
}
for name, actionSpec := range unmarshaledActions {
if valid := actionNameRule.MatchString(name); !valid {
return nil, fmt.Errorf("bad action name %s", name)
}
if reserved, reason := reservedName(charmName, name); reserved {
return nil, fmt.Errorf(
"cannot use action name %s: %s",
name, reason,
)
}
desc := "No description"
parallel := false
executionGroup := ""
thisActionSchema := map[string]interface{}{
"description": desc,
"type": "object",
"title": name,
"properties": map[string]interface{}{},
"additionalProperties": false,
}
for key, value := range actionSpec {
switch key {
case "description":
// These fields must be strings.
typed, ok := value.(string)
if !ok {
return nil, errors.Errorf("value for schema key %q must be a string", key)
}
thisActionSchema[key] = typed
desc = typed
case "title":
// These fields must be strings.
typed, ok := value.(string)
if !ok {
return nil, errors.Errorf("value for schema key %q must be a string", key)
}
thisActionSchema[key] = typed
case "required":
typed, ok := value.([]interface{})
if !ok {
return nil, errors.Errorf("value for schema key %q must be a YAML list", key)
}
thisActionSchema[key] = typed
case "parallel":
typed, ok := value.(bool)
if !ok {
return nil, errors.Errorf("value for schema key %q must be a bool", key)
}
parallel = typed
case "execution-group":
typed, ok := value.(string)
if !ok {
return nil, errors.Errorf("value for schema key %q must be a string", key)
}
executionGroup = typed
case "params":
// Clean any map[interface{}]interface{}s out so they don't
// cause problems with BSON serialization later.
cleansedParams, err := cleanse(value)
if err != nil {
return nil, err
}
// JSON-Schema must be a map
typed, ok := cleansedParams.(map[string]interface{})
if !ok {
return nil, errors.New("params failed to parse as a map")
}
thisActionSchema["properties"] = typed
default:
// In case this has nested maps, we must clean them out.
typed, err := cleanse(value)
if err != nil {
return nil, err
}
thisActionSchema[key] = typed
}
}
// Make sure the new Params doc conforms to JSON-Schema
// Draft 4 (http://json-schema.org/latest/json-schema-core.html)
schemaLoader := gjs.NewGoLoader(thisActionSchema)
_, err := gjs.NewSchema(schemaLoader)
if err != nil {
return nil, errors.Annotatef(err, "invalid params schema for action schema %s", name)
}
// Now assign the resulting schema to the final entry for the result.
result.ActionSpecs[name] = ActionSpec{
Description: desc,
Parallel: parallel,
ExecutionGroup: executionGroup,
Params: thisActionSchema,
}
}
return result, nil
}
// cleanse rejects schemas containing references or maps keyed with non-
// strings, and coerces acceptable maps to contain only maps with string keys.
func cleanse(input interface{}) (interface{}, error) {
switch typedInput := input.(type) {
// In this case, recurse in.
case map[string]interface{}:
newMap := make(map[string]interface{})
for key, value := range typedInput {
if prohibitedSchemaKeys[key] {
return nil, fmt.Errorf("schema key %q not compatible with this version of juju", key)
}
newValue, err := cleanse(value)
if err != nil {
return nil, err
}
newMap[key] = newValue
}
return newMap, nil
// Coerce keys to strings and error out if there's a problem; then recurse.
case map[interface{}]interface{}:
newMap := make(map[string]interface{})
for key, value := range typedInput {
typedKey, ok := key.(string)
if !ok {
return nil, errors.New("map keyed with non-string value")
}
newMap[typedKey] = value
}
return cleanse(newMap)
// Recurse
case []interface{}:
newSlice := make([]interface{}, 0)
for _, sliceValue := range typedInput {
newSliceValue, err := cleanse(sliceValue)
if err != nil {
return nil, errors.New("map keyed with non-string value")
}
newSlice = append(newSlice, newSliceValue)
}
return newSlice, nil
// Other kinds of values are OK.
default:
return input, nil
}
}
// recurseMapOnKeys returns the value of a map keyed recursively by the
// strings given in "keys". Thus, recurseMapOnKeys({a,b}, {a:{b:{c:d}}})
// would return {c:d}.
func recurseMapOnKeys(keys []string, params map[string]interface{}) (interface{}, bool) {
key, rest := keys[0], keys[1:]
answer, ok := params[key]
// If we're out of keys, we have our answer.
if len(rest) == 0 {
return answer, ok
}
// If we're not out of keys, but we tried a key that wasn't in the
// map, there's no answer.
if !ok {
return nil, false
}
switch typed := answer.(type) {
// If our value is a map[s]i{}, we can keep recursing.
case map[string]interface{}:
return recurseMapOnKeys(keys[1:], typed)
// If it's a map[i{}]i{}, we need to check whether it's a map[s]i{}.
case map[interface{}]interface{}:
m := make(map[string]interface{})
for k, v := range typed {
if tK, ok := k.(string); ok {
m[tK] = v
} else {
// If it's not, we don't have something we
// can work with.
return nil, false
}
}
// If it is, recurse into it.
return recurseMapOnKeys(keys[1:], m)
// Otherwise, we're trying to recurse into something we don't know
// how to deal with, so our answer is that we don't have an answer.
default:
return nil, false
}
}
// Copyright 2021 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package assumes
import "github.com/juju/version/v2"
var (
_ Expression = (*FeatureExpression)(nil)
_ Expression = (*CompositeExpression)(nil)
)
// ExpressionType represents the type of an assumes expression.
type ExpressionType string
const (
AnyOfExpression ExpressionType = "any-of"
AllOfExpression ExpressionType = "all-of"
)
// Expression is an interface implemented by all expression types in this package.
type Expression interface {
Type() ExpressionType
}
// VersionConstraint describes a constraint for required feature versions.
type VersionConstraint string
const (
VersionGTE VersionConstraint = ">="
VersionLT VersionConstraint = "<"
)
// FeatureExpression describes a feature that is required by the charm in order
// to be successfully deployed. Feature expressions may additionally specify a
// version constraint.
type FeatureExpression struct {
// The name of the featureflag.
Name string
// A feature within an assumes block may optionally specify a version
// constraint.
Constraint VersionConstraint
Version *version.Number
// The raw, unprocessed version string for serialization purposes.
rawVersion string
}
// Type implements Expression.
func (FeatureExpression) Type() ExpressionType { return ExpressionType("feature") }
// CompositeExpression describes a composite expression that applies some
// operator to a sub-expression list.
type CompositeExpression struct {
ExprType ExpressionType
SubExpressions []Expression
}
// Type implements Expression.
func (expr CompositeExpression) Type() ExpressionType { return expr.ExprType }
// Copyright 2021 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package assumes
import (
"encoding/json"
"fmt"
"regexp"
"strings"
"github.com/juju/errors"
"github.com/juju/version/v2"
"gopkg.in/yaml.v2"
)
var (
featureWithoutVersion = regexp.MustCompile(`^[a-z][a-z0-9-]*?[a-z0-9]+$`)
featureWithVersion = regexp.MustCompile(`^([a-z][a-z0-9-]*?[a-z0-9]+)\s*?(>=|<)\s*?([\S\.]+)$`)
)
// ExpressionTree is a wrapper for representing a (possibly nested) "assumes"
// block declaration.
type ExpressionTree struct {
Expression Expression
}
// parseAssumesExpressionTree recursively parses an assumes expression tree
// and returns back an Expression instance for it.
//
// The root of the expression tree consists of a list of (potentially nested)
// assumes expressions that form an implicit All-Of composite expression.
//
// For example:
// assumes:
// - foo
// - bar >= 1.42
// - any-of: ... (nested expr)
// - all-of: ... (nested expr)
func parseAssumesExpressionTree(rootExprList []interface{}) (Expression, error) {
var (
rootExpr = CompositeExpression{
ExprType: AllOfExpression,
SubExpressions: make([]Expression, len(rootExprList)),
}
err error
)
for i, exprDecl := range rootExprList {
if rootExpr.SubExpressions[i], err = parseAssumesExpr(exprDecl); err != nil {
return nil, errors.Annotatef(err, `parsing expression %d in top level "assumes" block`, i+1)
}
}
return rootExpr, nil
}
// parseAssumesExpr returns an Expression instance that corresponds to the
// provided expression declaration. As per the assumes spec, the parser
// supports the following expression types:
//
// 1) feature request expression with optional version constraint (e.g. foo < 1)
// 2) any-of composite expression
// 3) all-of composite expression
func parseAssumesExpr(exprDecl interface{}) (Expression, error) {
// Is it a composite expression?
if exprAsMap, isMap := exprDecl.(map[interface{}]interface{}); isMap {
coercedMap := make(map[string]interface{})
for key, val := range exprAsMap {
keyStr, ok := key.(string)
if !ok {
return nil, errors.New(`malformed composite expression`)
}
coercedMap[keyStr] = val
}
return parseCompositeExpr(coercedMap)
} else if exprAsMap, isMap := exprDecl.(map[string]interface{}); isMap {
return parseCompositeExpr(exprAsMap)
}
// Is it a feature request expression?
if exprAsString, isString := exprDecl.(string); isString {
return parseFeatureExpr(exprAsString)
}
return nil, errors.New(`expected a feature, "any-of" or "all-of" expression`)
}
// parseCompositeExpr extracts and returns a CompositeExpression from the
// provided expression declaration.
//
// The EBNF grammar for a composite expression is:
//
// composite-expr-decl: ("any-of"|"all-of") expr-decl-list
//
// expr-decl-list: expr-decl+
//
// expr-decl: feature-expr-decl |
// composite-expr-decl
//
// The function expects a map with either a "any-of" or "all-of" key and
// a value that is a slice of sub-expressions.
func parseCompositeExpr(exprDecl map[string]interface{}) (CompositeExpression, error) {
if len(exprDecl) != 1 {
return CompositeExpression{}, errors.New("malformed composite expression")
}
var (
compositeExpr CompositeExpression
subExprDecls interface{}
err error
)
if subExprDecls = exprDecl["any-of"]; subExprDecls != nil {
compositeExpr.ExprType = AnyOfExpression
} else if subExprDecls = exprDecl["all-of"]; subExprDecls != nil {
compositeExpr.ExprType = AllOfExpression
} else {
return CompositeExpression{}, errors.New(`malformed composite expression; expected an "any-of" or "all-of" block`)
}
subExprDeclList, isList := subExprDecls.([]interface{})
if !isList {
return CompositeExpression{}, errors.Errorf(`malformed %q expression; expected a list of sub-expressions`, string(compositeExpr.ExprType))
}
compositeExpr.SubExpressions = make([]Expression, len(subExprDeclList))
for i, subExprDecl := range subExprDeclList {
if compositeExpr.SubExpressions[i], err = parseAssumesExpr(subExprDecl); err != nil {
return CompositeExpression{}, errors.Annotatef(err, "parsing %q expression", string(compositeExpr.ExprType))
}
}
return compositeExpr, nil
}
// parseFeatureExpr extracts and returns a FeatureExpression from the provided
// expression declaration.
//
// The EBNF grammar for feature expressions is:
//
// feature-expr-decl: feature-ident |
// feature-ident version-constraint version-number
//
// version-constraint: ">=" | "<"
// feature-ident: [a-z][a-z0-9-]*[a-z0-9]+
// version-number: \d+ (‘.’ \d+ (‘.’ \d+)?)?
func parseFeatureExpr(exprDecl string) (FeatureExpression, error) {
exprDecl = strings.TrimSpace(exprDecl)
// Is this a feature name without a version constraint?
if featureWithoutVersion.MatchString(exprDecl) {
return FeatureExpression{Name: exprDecl}, nil
}
matches := featureWithVersion.FindAllStringSubmatch(exprDecl, 1)
if len(matches) == 1 {
featName, constraint, versionStr := matches[0][1], matches[0][2], matches[0][3]
ver, err := version.ParseNonStrict(versionStr)
if err != nil {
return FeatureExpression{}, errors.Annotatef(err, "malformed feature expression %q", exprDecl)
}
return FeatureExpression{
Name: featName,
Constraint: VersionConstraint(constraint),
Version: &ver,
rawVersion: versionStr,
}, nil
}
return FeatureExpression{}, errors.Errorf("malformed feature expression %q", exprDecl)
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (tree *ExpressionTree) UnmarshalYAML(unmarshalFn func(interface{}) error) error {
var exprTree []interface{}
if err := unmarshalFn(&exprTree); err != nil {
if _, isTypeErr := err.(*yaml.TypeError); isTypeErr {
return errors.New(`malformed "assumes" block; expected an expression list`)
}
return errors.Annotate(err, "decoding assumes block")
}
expr, err := parseAssumesExpressionTree(exprTree)
if err != nil {
return errors.Trace(err)
}
tree.Expression = expr
return nil
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (tree *ExpressionTree) UnmarshalJSON(data []byte) error {
var exprTree []interface{}
if err := json.Unmarshal(data, &exprTree); err != nil {
return errors.Annotate(err, "decoding assumes block")
}
expr, err := parseAssumesExpressionTree(exprTree)
if err != nil {
return errors.Trace(err)
}
tree.Expression = expr
return nil
}
// MarshalYAML implements the yaml.Marshaler interface.
func (tree *ExpressionTree) MarshalYAML() (interface{}, error) {
if tree == nil || tree.Expression == nil {
return nil, nil
}
return marshalAssumesExpressionTree(tree)
}
// MarshalJSON implements the json.Marshaler interface.
func (tree *ExpressionTree) MarshalJSON() ([]byte, error) {
if tree == nil || tree.Expression == nil {
return nil, nil
}
exprList, err := marshalAssumesExpressionTree(tree)
if err != nil {
return nil, errors.Trace(err)
}
return json.Marshal(exprList)
}
func marshalAssumesExpressionTree(tree *ExpressionTree) (interface{}, error) {
// The root of the expression tree (top level of the assumes block) is
// always an implicit "any-of". We need to marshal it into a map and
// extract the expression list.
root, err := marshalExpr(tree.Expression)
if err != nil {
return nil, err
}
rootMap, ok := root.(map[string]interface{})
if !ok {
return nil, errors.New(`unexpected serialized output for top-level "assumes" block`)
}
exprList, ok := rootMap[string(AllOfExpression)]
if !ok {
return nil, errors.New(`unexpected serialized output for top-level "assumes" block`)
}
return exprList, nil
}
func marshalExpr(expr Expression) (interface{}, error) {
featExpr, ok := expr.(FeatureExpression)
if ok {
if featExpr.Version == nil {
return featExpr.Name, nil
}
// If we retained the raw version use that; otherwise convert
// the parsed version to a string.
if featExpr.rawVersion != "" {
return fmt.Sprintf("%s %s %s", featExpr.Name, featExpr.Constraint, featExpr.rawVersion), nil
}
return fmt.Sprintf("%s %s %s", featExpr.Name, featExpr.Constraint, featExpr.Version.String()), nil
}
// This is a composite expression
compExpr, ok := expr.(CompositeExpression)
if !ok {
return nil, errors.Errorf("unexpected expression type %s", expr.Type())
}
var (
exprList = make([]interface{}, len(compExpr.SubExpressions))
err error
)
for i, subExpr := range compExpr.SubExpressions {
if exprList[i], err = marshalExpr(subExpr); err != nil {
return nil, err
}
}
return map[string]interface{}{
string(compExpr.ExprType): exprList,
}, nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"fmt"
"strings"
"github.com/juju/errors"
"github.com/juju/juju/core/arch"
)
// Base represents an OS/Channel.
// Bases can also be converted to and from a series string.
type Base struct {
Name string `json:"name,omitempty"`
Channel Channel `json:"channel,omitempty"`
Architectures []string `json:"architectures,omitempty"`
}
// Validate returns with no error when the Base is valid.
func (b Base) Validate() error {
if b.Name == "" {
return errors.NotValidf("base without name")
}
if b.Channel.Empty() {
return errors.NotValidf("channel")
}
return nil
}
// String representation of the Base.
func (b Base) String() string {
if b.Channel.Empty() {
panic("cannot stringify invalid base. Bases should always be validated before stringifying")
}
str := fmt.Sprintf("%s@%s", b.Name, b.Channel)
if len(b.Architectures) > 0 {
str = fmt.Sprintf("%s on %s", str, strings.Join(b.Architectures, ", "))
}
return str
}
// ParseBase parses a base as string in the form "os@track/risk/branch"
// and an optional list of architectures
func ParseBase(s string, archs ...string) (Base, error) {
var err error
base := Base{}
segments := strings.Split(s, "@")
if len(segments) != 2 {
return Base{}, errors.NotValidf("base string must contain exactly one @. %q", s)
}
base.Name = strings.ToLower(segments[0])
channelName := segments[1]
if channelName != "" {
base.Channel, err = ParseChannelNormalize(channelName)
if err != nil {
return Base{}, errors.Annotatef(err, "malformed channel in base string %q", s)
}
}
base.Architectures = make([]string, len(archs))
for i, v := range archs {
base.Architectures[i] = arch.NormaliseArch(v)
}
err = base.Validate()
if err != nil {
var a string
if len(base.Architectures) > 0 {
a = fmt.Sprintf(" with architectures %q", strings.Join(base.Architectures, ","))
}
return Base{}, errors.Annotatef(err, "invalid base string %q%s", s, a)
}
return base, nil
}
// Copyright 2014 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"path/filepath"
"strings"
)
// The Bundle interface is implemented by any type that
// may be handled as a bundle. It encapsulates all
// the data of a bundle.
type Bundle interface {
// Data returns the contents of the bundle's bundle.yaml file.
Data() *BundleData
// BundleBytes returns the raw bytes content of a bundle
BundleBytes() []byte
// ReadMe returns the contents of the bundle's README.md file.
ReadMe() string
// ContainsOverlays returns true if the bundle contains any overlays.
ContainsOverlays() bool
}
// IsValidLocalCharmOrBundlePath returns true if path is valid for reading a
// local charm or bundle.
func IsValidLocalCharmOrBundlePath(path string) bool {
return strings.HasPrefix(path, ".") || filepath.IsAbs(path)
}
// Copyright 2014 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"bytes"
"io"
ziputil "github.com/juju/utils/v4/zip"
)
type BundleArchive struct {
zopen zipOpener
Path string
data *BundleData
bundleBytes []byte
readMe string
containsOverlays bool
}
// ReadBundleArchive reads a bundle archive from the given file path.
func ReadBundleArchive(path string) (*BundleArchive, error) {
a, err := readBundleArchive(newZipOpenerFromPath(path))
if err != nil {
return nil, err
}
a.Path = path
return a, nil
}
// ReadBundleArchiveBytes reads a bundle archive from the given byte
// slice.
func ReadBundleArchiveBytes(data []byte) (*BundleArchive, error) {
zopener := newZipOpenerFromReader(bytes.NewReader(data), int64(len(data)))
return readBundleArchive(zopener)
}
// ReadBundleArchiveFromReader returns a BundleArchive that uses
// r to read the bundle. The given size must hold the number
// of available bytes in the file.
//
// Note that the caller is responsible for closing r - methods on
// the returned BundleArchive may fail after that.
func ReadBundleArchiveFromReader(r io.ReaderAt, size int64) (*BundleArchive, error) {
return readBundleArchive(newZipOpenerFromReader(r, size))
}
func readBundleArchive(zopen zipOpener) (*BundleArchive, error) {
a := &BundleArchive{
zopen: zopen,
}
zipr, err := zopen.openZip()
if err != nil {
return nil, err
}
defer zipr.Close()
reader, err := zipOpenFile(zipr, "bundle.yaml")
if err != nil {
return nil, err
}
b, err := io.ReadAll(reader)
if err != nil {
return nil, err
}
a.bundleBytes = b
a.data, a.containsOverlays, err = ReadBaseFromMultidocBundle(b)
reader.Close()
if err != nil {
return nil, err
}
reader, err = zipOpenFile(zipr, "README.md")
if err != nil {
return nil, err
}
readMe, err := io.ReadAll(reader)
if err != nil {
return nil, err
}
a.readMe = string(readMe)
return a, nil
}
// Data implements Bundle.Data.
func (a *BundleArchive) Data() *BundleData {
return a.data
}
// BundleBytes implements Bundle.BundleBytes.
func (a *BundleArchive) BundleBytes() []byte {
return a.bundleBytes
}
// ReadMe implements Bundle.ReadMe.
func (a *BundleArchive) ReadMe() string {
return a.readMe
}
// ContainsOverlays implements Bundle.ReadMe.
func (a *BundleArchive) ContainsOverlays() bool {
return a.containsOverlays
}
// ExpandTo expands the bundle archive into dir, creating it if necessary.
// If any errors occur during the expansion procedure, the process will
// abort.
func (a *BundleArchive) ExpandTo(dir string) error {
zipr, err := a.zopen.openZip()
if err != nil {
return err
}
defer zipr.Close()
return ziputil.ExtractAll(zipr.Reader, dir)
}
// Copyright 2014 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"encoding/json"
"fmt"
"io"
"net"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"github.com/juju/errors"
"github.com/juju/names/v6"
"github.com/juju/utils/v4/keyvalues"
)
const kubernetes = "kubernetes"
// BundleData holds the contents of the bundle.
type BundleData struct {
// Type is used to signify whether this bundle is for IAAS or Kubernetes deployments.
// Valid values are "Kubernetes" or "", with empty signifying an IAAS bundle.
Type string `json:"bundle,omitempty" yaml:"bundle,omitempty"`
// Applications holds one entry for each application
// that the bundle will create, indexed by
// the application name.
Applications map[string]*ApplicationSpec `json:"applications,omitempty" yaml:"applications,omitempty"`
// Machines holds one entry for each machine referred to
// by unit placements. These will be mapped onto actual
// machines at bundle deployment time.
// It is an error if a machine is specified but
// not referred to by a unit placement directive.
Machines map[string]*MachineSpec `json:",omitempty" yaml:",omitempty"`
// Saas holds one entry for each software as a service (SAAS) for cross
// model relation (CMR). These will be mapped to the consuming side when
// deploying a bundle.
Saas map[string]*SaasSpec `json:"saas,omitempty" yaml:"saas,omitempty"`
// Base holds the default base to use when the bundle deploys
// applications. A base defined for an application takes precedence.
DefaultBase string `json:"default-base,omitempty" yaml:"default-base,omitempty"`
// Relations holds a slice of 2-element slices,
// each specifying a relation between two applications.
// Each two-element slice holds two endpoints,
// each specified as either colon-separated
// (application, relation) pair or just an application name.
// The relation is made between each. If the relation
// name is omitted, it will be inferred from the available
// relations defined in the applications' charms.
Relations [][]string `json:",omitempty" yaml:",omitempty"`
// White listed set of tags to categorize bundles as we do charms.
Tags []string `json:",omitempty" yaml:",omitempty"`
// Short paragraph explaining what the bundle is useful for.
Description string `json:",omitempty" yaml:",omitempty"`
}
// SaasSpec represents a single software as a service (SAAS) node.
// This will be mapped to consuming of offers from a bundle deployment.
type SaasSpec struct {
URL string `json:",omitempty" yaml:",omitempty"`
}
// MachineSpec represents a notional machine that will be mapped
// onto an actual machine at bundle deployment time.
type MachineSpec struct {
Constraints string `json:",omitempty" yaml:",omitempty"`
Annotations map[string]string `json:",omitempty" yaml:",omitempty"`
Base string `json:",omitempty" yaml:",omitempty"`
}
// ApplicationSpec represents a single application that will
// be deployed as part of the bundle.
type ApplicationSpec struct {
// Charm holds the charm URL of the charm to
// use for the given application.
Charm string `yaml:",omitempty" json:",omitempty"`
// Channel describes the preferred channel to use when deploying a
// remote charm.
Channel string `yaml:"channel,omitempty" json:"channel,omitempty"`
// Revision describes the revision of the charm to use when deploying.
Revision *int `yaml:"revision,omitempty" json:"revision,omitempty"`
// Base is the base to use when deploying the application.
Base string `yaml:",omitempty" json:",omitempty"`
// Resources is the set of resource revisions to deploy for the
// application. Bundles only support charm store resources and not ones
// that were uploaded to the controller.
// A resource value can either be an integer revision number,
// or a string holding a path to a local resource file.
Resources map[string]interface{} `yaml:",omitempty" json:",omitempty"`
// NumUnits holds the number of units of the
// application that will be deployed.
// For Kubernetes bundles, this will be an alias for Scale.
//
// For a subordinate application, this actually represents
// an arbitrary number of units depending on
// the application it is related to.
NumUnits int `yaml:"num_units,omitempty" json:",omitempty"`
// Scale_ holds the number of pods required for the application.
// For IAAS bundles, this will be an alias for NumUnits.
Scale_ int `yaml:"scale,omitempty" json:"scale,omitempty"`
// To is interpreted according to whether this is an
// IAAS or Kubernetes bundle.
//
// For Kubernetes bundles, the use of Placement is preferred.
// To must be a single valued list representing label key values
// used as a node selector.
//
// For IAAS bundles, To may hold up to NumUnits members with
// each member specifying a desired placement
// for the respective unit of the application.
//
// In regular-expression-like notation, each
// element matches the following pattern:
//
// (<containertype>:)?(<unit>|<machine>|new)
//
// If containertype is specified, the unit is deployed
// into a new container of that type, otherwise
// it will be "hulk-smashed" into the specified location,
// by co-locating it with any other units that happen to
// be there, which may result in unintended behavior.
//
// The second part (after the colon) specifies where
// the new unit should be placed - it may refer to
// a unit of another application specified in the bundle,
// a machine id specified in the machines section,
// or the special name "new" which specifies a newly
// created machine.
//
// A unit placement may be specified with an application name only,
// in which case its unit number is assumed to
// be one more than the unit number of the previous
// unit in the list with the same application, or zero
// if there were none.
//
// If there are less elements in To than NumUnits,
// the last element is replicated to fill it. If there
// are no elements (or To is omitted), "new" is replicated.
//
// For example:
//
// wordpress/0 wordpress/1 lxc:0 kvm:new
//
// specifies that the first two units get hulk-smashed
// onto the first two units of the wordpress application,
// the third unit gets allocated onto an lxc container
// on machine 0, and subsequent units get allocated
// on kvm containers on new machines.
//
// The above example is the same as this:
//
// wordpress wordpress lxc:0 kvm:new
To []string `json:",omitempty" yaml:",omitempty"`
// Placement_ holds a model selector/affinity expression used to specify
// pod placement for Kubernetes applications.
// Not relevant for IAAS applications.
Placement_ string `json:"placement,omitempty" yaml:"placement,omitempty"`
// Expose holds whether the application must be exposed.
Expose bool `json:",omitempty" yaml:",omitempty"`
// ExposedEndpoints defines on a per-endpoint basis, the list of space
// names and/or CIDRs that should be able to access the ports opened
// for an endpoint once the application is exposed. The keys of the map
// are endpoint names or the special empty ("") value that is used as a
// placeholder for referring to all endpoints.
//
// This attribute cannot be used in tandem with the 'expose: true'
// flag; a validation error will be raised if both fields are specified.
ExposedEndpoints map[string]ExposedEndpointSpec `json:"exposed-endpoints,omitempty" yaml:"exposed-endpoints,omitempty" source:"overlay-only"`
// Options holds the configuration values
// to apply to the new application. They should
// be compatible with the charm configuration.
Options map[string]interface{} `json:",omitempty" yaml:",omitempty"`
// Annotations holds any annotations to apply to the
// application when deployed.
Annotations map[string]string `json:",omitempty" yaml:",omitempty"`
// Constraints holds the default constraints to apply
// when creating new machines for units of the application.
// This is ignored for units with explicit placement directives.
Constraints string `json:",omitempty" yaml:",omitempty"`
// Storage holds the constraints for storage to assign
// to units of the application.
Storage map[string]string `json:",omitempty" yaml:",omitempty"`
// Devices holds the constraints for devices to assign
// to units of the application.
Devices map[string]string `json:",omitempty" yaml:",omitempty"`
// EndpointBindings maps how endpoints are bound to spaces
EndpointBindings map[string]string `json:"bindings,omitempty" yaml:"bindings,omitempty"`
// Offers holds one entry for each exported offer for this application
// where the key is the offer name.
Offers map[string]*OfferSpec `json:"offers,omitempty" yaml:"offers,omitempty" source:"overlay-only"`
// Plan specifies the plan under which the application is to be deployed.
// If "default", the default plan will be used for the charm
Plan string `json:"plan,omitempty" yaml:"plan,omitempty"`
// RequiresTrust indicates that the application requires access to
// cloud credentials and must therefore be explicitly trusted by the
// operator before it can be deployed.
RequiresTrust bool `json:"trust,omitempty" yaml:"trust,omitempty"`
}
// maskedBundleData and bundleData are here to perform a way to normalize the
// bundle data when unmarshalling via a codec.
// By abusing the types we can prevent a recursive function call so that the
// unmarshalling doesn't call itself.
//
// In reality this is so wrong in so many ways:
// 1. Why has the model type got anything to do with how it's transferred over
// the wire to other consumables? The bundle data should have a package of
// wire protocols (DTOs) that can packed and unpacked into a bundle/charm
// model. That model should be pure!
// 2. This should be a two step process, unmarshal and then normalize.
type maskedBundleData BundleData
type bundleData struct {
maskedBundleData `yaml:",inline" json:",inline"`
}
// UnmarshalJSON implements the json.Unmarshaler interface.
func (bd *BundleData) UnmarshalJSON(b []byte) error {
var in bundleData
if err := json.Unmarshal(b, &in); err != nil {
return err
}
*bd = BundleData(in.maskedBundleData)
return bd.normalizeData()
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
func (bd *BundleData) UnmarshalYAML(f func(interface{}) error) error {
var in bundleData
if err := f(&in); err != nil {
return err
}
*bd = BundleData(in.maskedBundleData)
return bd.normalizeData()
}
func (bd *BundleData) normalizeData() error {
if bd.Applications == nil {
return nil
}
for appName, app := range bd.Applications {
if app == nil {
continue
}
// Kubernetes bundles use "scale" instead of "num_units".
if app.Scale_ > 0 && app.NumUnits > 0 {
return fmt.Errorf("cannot specify both scale and num_units for application %q", appName)
}
if app.Scale_ > 0 && app.NumUnits == 0 {
app.NumUnits = app.Scale_
app.Scale_ = 0
}
// Non-Kubernetes bundles do not use the placement attribute.
if bd.Type != kubernetes && app.Placement_ != "" {
return fmt.Errorf("placement (%s) not valid for non-Kubernetes application %q", app.Placement_, appName)
}
// Kubernetes bundles only use a single placement directive.
if app.Placement_ != "" {
if len(app.To) > 0 {
return fmt.Errorf("cannot specify both placement and to for application %q", appName)
}
app.To = []string{app.Placement_}
app.Placement_ = ""
}
}
return nil
}
// ExposedEndpointSpec describes the expose parameters for an application
// endpoint.
type ExposedEndpointSpec struct {
// ExposeToSpaces contains a list of spaces that should be able to
// access the application ports opened for an endpoint when the
// application is exposed.
ExposeToSpaces []string `json:"expose-to-spaces,omitempty" yaml:"expose-to-spaces,omitempty" source:"overlay-only"`
// ExposeToCIDRs contains a list of CIDRs that should be able to access
// the application ports opened for an endpoint when the application is
// exposed.
ExposeToCIDRs []string `json:"expose-to-cidrs,omitempty" yaml:"expose-to-cidrs,omitempty" source:"overlay-only"`
}
// OfferSpec describes an offer for a particular application.
type OfferSpec struct {
// The list of endpoints exposed via the offer.
Endpoints []string `json:"endpoints" yaml:"endpoints" source:"overlay-only"`
// The access control list for this offer. The keys are users and the
// values are access permissions.
ACL map[string]string `json:"acl,omitempty" yaml:"acl,omitempty" source:"overlay-only"`
}
// ReadBundleData reads bundle data from the given reader.
// The returned data is not verified - call Verify to ensure
// that it is OK.
func ReadBundleData(r io.Reader) (*BundleData, error) {
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
bd, _, err := ReadBaseFromMultidocBundle(b)
if err != nil {
return nil, err
}
return bd, nil
}
// readBaseFromMultidocBundle reads the bundle data corresponding to the first
// (base) bundle off the given reader. The function returns a boolean flag to
// indicate whether the bundle contains additional documents that the parser
// ignored.
//
// Clients that are interested in reading multi-doc bundle data should use the
// new helpers: LocalBundleDataSource and StreamBundleDataSource.
func ReadBaseFromMultidocBundle(b []byte) (*BundleData, bool, error) {
parts, err := parseBundleParts(b)
if err != nil {
return nil, false, err
}
if len(parts) == 0 {
return nil, false, errors.NotValidf("empty bundle")
}
return parts[0].Data, len(parts) > 1, nil
}
// VerificationError holds an error generated by BundleData.Verify,
// holding all the verification errors found when verifying.
type VerificationError struct {
Errors []error
}
func (err *VerificationError) Error() string {
switch len(err.Errors) {
case 0:
return "no verification errors!"
case 1:
return err.Errors[0].Error()
}
return fmt.Sprintf("%s (and %d more errors)", err.Errors[0], len(err.Errors)-1)
}
type bundleDataVerifier struct {
// bundleDir is the directory containing the bundle file
bundleDir string
bd *BundleData
// machines holds the reference counts of all machines
// as referred to by placement directives.
machineRefCounts map[string]int
charms map[string]Charm
errors []error
verifyConstraints func(c string) error
verifyStorage func(s string) error
verifyDevices func(s string) error
}
func (verifier *bundleDataVerifier) addErrorf(f string, a ...interface{}) {
verifier.addError(fmt.Errorf(f, a...))
}
func (verifier *bundleDataVerifier) addError(err error) {
verifier.errors = append(verifier.errors, err)
}
func (verifier *bundleDataVerifier) err() error {
if len(verifier.errors) > 0 {
return &VerificationError{verifier.errors}
}
return nil
}
// RequiredCharms returns a sorted slice of all the charm URLs
// required by the bundle.
func (bd *BundleData) RequiredCharms() []string {
req := make([]string, 0, len(bd.Applications))
for _, svc := range bd.Applications {
req = append(req, svc.Charm)
}
sort.Strings(req)
return req
}
// VerifyLocal verifies that a local bundle file is consistent.
// A local bundle file may contain references to charms which are
// referred to by a directory, either relative or absolute.
//
// bundleDir is used to construct the full path for charms specified
// using a relative directory path. The charm path is therefore expected
// to be relative to the bundle.yaml file.
func (bd *BundleData) VerifyLocal(
bundleDir string,
verifyConstraints func(c string) error,
verifyStorage func(s string) error,
verifyDevices func(s string) error,
) error {
return bd.verifyBundle(bundleDir, verifyConstraints, verifyStorage, verifyDevices, nil)
}
// Verify is a convenience method that calls VerifyWithCharms
// with a nil charms map.
func (bd *BundleData) Verify(
verifyConstraints func(c string) error,
verifyStorage func(s string) error,
verifyDevices func(s string) error,
) error {
return bd.VerifyWithCharms(verifyConstraints, verifyStorage, verifyDevices, nil)
}
// VerifyWithCharms verifies that the bundle is consistent.
// The verifyConstraints function is called to verify any constraints
// that are found. If verifyConstraints is nil, no checking
// of constraints will be done. Similarly, a non-nil verifyStorage, verifyDevices
// function is called to verify any storage constraints.
//
// It verifies the following:
//
// - All defined machines are referred to by placement directives.
// - All applications referred to by placement directives are specified in the bundle.
// - All applications referred to by relations are specified in the bundle.
// - All basic constraints are valid.
// - All storage constraints are valid.
//
// If charms is not nil, it should hold a map with an entry for each
// charm url returned by bd.RequiredCharms. The verification will then
// also check that applications are defined with valid charms,
// relations are correctly made and options are defined correctly.
//
// If the verification fails, Verify returns a *VerificationError describing
// all the problems found.
func (bd *BundleData) VerifyWithCharms(
verifyConstraints func(c string) error,
verifyStorage func(s string) error,
verifyDevices func(s string) error,
charms map[string]Charm,
) error {
return bd.verifyBundle("", verifyConstraints, verifyStorage, verifyDevices, charms)
}
func (bd *BundleData) verifyBundle(
bundleDir string,
verifyConstraints func(c string) error,
verifyStorage func(s string) error,
verifyDevices func(s string) error,
charms map[string]Charm,
) error {
if verifyConstraints == nil {
verifyConstraints = func(string) error {
return nil
}
}
if verifyStorage == nil {
verifyStorage = func(string) error {
return nil
}
}
if verifyDevices == nil {
verifyDevices = func(string) error {
return nil
}
}
verifier := &bundleDataVerifier{
bundleDir: bundleDir,
verifyConstraints: verifyConstraints,
verifyStorage: verifyStorage,
verifyDevices: verifyDevices,
bd: bd,
machineRefCounts: make(map[string]int),
charms: charms,
}
if bd.Type != "" && bd.Type != kubernetes {
verifier.addErrorf("bundle has an invalid type %q", bd.Type)
}
if bd.Type == kubernetes {
if len(bd.Machines) > 0 {
verifier.addErrorf("bundle machines not valid for Kubernetes bundles")
}
bd.Machines = nil
}
for id := range bd.Machines {
verifier.machineRefCounts[id] = 0
}
if bd.DefaultBase != "" {
if _, err := ParseBase(bd.DefaultBase); err != nil {
verifier.addErrorf("bundle declares an invalid base %q", bd.DefaultBase)
}
}
verifier.verifySaas()
verifier.verifyMachines()
verifier.verifyApplications()
verifier.verifyRelations()
verifier.verifyOptions()
verifier.verifyEndpointBindings()
for id, count := range verifier.machineRefCounts {
if count == 0 {
verifier.addErrorf("machine %q is not referred to by a placement directive", id)
}
}
return verifier.err()
}
var (
validMachineId = regexp.MustCompile("^" + names.NumberSnippet + "$")
validStorageName = regexp.MustCompile("^" + names.StorageNameSnippet + "$")
validDeviceName = regexp.MustCompile("^" + "(?:[a-z][a-z0-9]*(?:-[a-z0-9]*[a-z][a-z0-9]*)*)" + "$")
// When the operator consumes the offer a pseudo-application with the
// offer name will be created by the controller. So using the application
// name regex makes sense here. Likewise we can use the relation regex
// to validate the endpoint name.
validOfferName = regexp.MustCompile("^" + names.ApplicationSnippet + "$")
validOfferEndpointName = regexp.MustCompile("^" + names.RelationSnippet + "$")
)
func (verifier *bundleDataVerifier) verifySaas() {
for name, saas := range verifier.bd.Saas {
if _, ok := verifier.bd.Applications[name]; ok {
verifier.addErrorf("application %[1]q already exists with SAAS %[1]q name", name)
}
if !validOfferName.MatchString(name) {
verifier.addErrorf("invalid SAAS name %q found", name)
}
if saas == nil {
continue
}
if saas.URL != "" && !IsValidOfferURL(saas.URL) {
verifier.addErrorf("invalid offer URL %q for SAAS %s", saas.URL, name)
}
}
}
func (verifier *bundleDataVerifier) verifyMachines() {
for id, m := range verifier.bd.Machines {
if !validMachineId.MatchString(id) {
verifier.addErrorf("invalid machine id %q found in machines", id)
}
if m == nil {
continue
}
if m.Constraints != "" {
if err := verifier.verifyConstraints(m.Constraints); err != nil {
verifier.addErrorf("invalid constraints %q in machine %q: %v", m.Constraints, id, err)
}
}
if m.Base != "" {
if _, err := ParseBase(m.Base); err != nil {
verifier.addErrorf("invalid base %q for machine %q", m.Base, id)
}
}
}
}
func (verifier *bundleDataVerifier) verifyApplications() {
if len(verifier.bd.Applications) == 0 {
verifier.addErrorf("at least one application must be specified")
return
}
for name, app := range verifier.bd.Applications {
if app == nil {
verifier.addErrorf("bundle application for key %q is undefined", name)
continue
}
if app.Charm == "" {
verifier.addErrorf("empty charm path")
}
if _, ok := verifier.bd.Saas[name]; ok {
verifier.addErrorf("SAAS %[1]q already exists with application %[1]q name", name)
}
// Charm may be a local directory or a charm URL.
var curl *URL
var err error
if strings.HasPrefix(app.Charm, ".") || filepath.IsAbs(app.Charm) {
charmPath := app.Charm
if !filepath.IsAbs(charmPath) {
charmPath = filepath.Join(verifier.bundleDir, charmPath)
}
if _, err := os.Stat(charmPath); err != nil {
if os.IsNotExist(err) {
verifier.addErrorf("charm path in application %q does not exist: %v", name, charmPath)
} else {
verifier.addErrorf("invalid charm path in application %q: %v", name, err)
}
}
} else if curl, err = ParseURL(app.Charm); err != nil {
verifier.addErrorf("invalid charm URL in application %q: %v", name, err)
}
// Check the revision.
if curl != nil {
if CharmHub.Matches(curl.Schema) && curl.Revision != -1 {
verifier.addErrorf("cannot specify revision in %q, please use revision", curl.String())
}
if app.Revision != nil {
if CharmHub.Matches(curl.Schema) && app.Channel == "" {
verifier.addErrorf("application %q with a revision requires a channel for future upgrades, please use channel", name)
}
if *app.Revision < 0 {
verifier.addErrorf("the revision for application %q must be zero or greater", name)
}
}
}
// Check the Base
if app.Base != "" {
if _, err := ParseBase(app.Base); err != nil {
verifier.addErrorf("application %q declares an invalid base %q", name, app.Base)
}
}
// Check the Constraints.
if err := verifier.verifyConstraints(app.Constraints); err != nil {
verifier.addErrorf("invalid constraints %q in application %q: %v", app.Constraints, name, err)
}
// Check the Storage.
for storageName, storageConstraints := range app.Storage {
if !validStorageName.MatchString(storageName) {
verifier.addErrorf("invalid storage name %q in application %q", storageName, name)
}
if err := verifier.verifyStorage(storageConstraints); err != nil {
verifier.addErrorf("invalid storage %q in application %q: %v", storageName, name, err)
}
}
// Check the Devices.
for deviceName, deviceConstraints := range app.Devices {
if !validDeviceName.MatchString(deviceName) {
verifier.addErrorf("invalid device name %q in application %q", deviceName, name)
}
if err := verifier.verifyDevices(deviceConstraints); err != nil {
verifier.addErrorf("invalid device %q in application %q: %v", deviceName, name, err)
}
}
// Check the offers.
for offerName, oSpec := range app.Offers {
if !validOfferName.MatchString(offerName) {
verifier.addErrorf("invalid offer name %q in application %q", offerName, name)
}
for _, endpoint := range oSpec.Endpoints {
if !validOfferEndpointName.MatchString(endpoint) {
verifier.addErrorf("invalid endpoint name %q for offer %q in application %q", endpoint, offerName, name)
}
}
}
if verifier.charms != nil {
if ch, ok := verifier.charms[app.Charm]; ok {
if ch.Meta().Subordinate {
if len(app.To) > 0 {
verifier.addErrorf("application %q is subordinate but specifies unit placement", name)
}
if app.NumUnits > 0 {
verifier.addErrorf("application %q is subordinate but has non-zero num_units", name)
}
}
} else {
verifier.addErrorf("application %q refers to non-existent charm %q", name, app.Charm)
}
}
for resName, rev := range app.Resources {
if resName == "" {
verifier.addErrorf("missing resource name on application %q", name)
}
switch rev.(type) {
case int, string:
default:
verifier.addErrorf("resource revision %q is not int or string", name)
}
}
if app.NumUnits < 0 {
verifier.addErrorf("negative number of units specified on application %q", name)
}
if verifier.bd.Type == kubernetes {
verifier.verifyKubernetesPlacement(name, app.To)
} else {
verifier.verifyPlacement(name, app.NumUnits, app.To)
}
// Check expose parameters. We do not allow both the expose and
// the exposed-endpoints fields to be specified at the same
// time. Otherwise, an operator might export a 2.9 bundle
// containing an exposed application with endpoint-specific
// rules and them import it into a 2.8 controller which is not
// aware of this field causing the application to be exposed
// to 0.0.0.0/0!
if len(app.ExposedEndpoints) != 0 {
if app.Expose {
verifier.addErrorf(`exposed-endpoints cannot be specified together with "exposed:true" in application %q as this poses a security risk when deploying bundles to older controllers`, name)
} else {
for epName, expDetails := range app.ExposedEndpoints {
for _, cidr := range expDetails.ExposeToCIDRs {
if _, _, err := net.ParseCIDR(cidr); err != nil {
verifier.addErrorf("invalid CIDR %q for expose to CIDRs field for endpoint %q in application %q", cidr, epName, name)
}
}
}
}
}
}
}
func (verifier *bundleDataVerifier) verifyPlacement(name string, numUnits int, to []string) {
if numUnits >= 0 && len(to) > numUnits {
verifier.addErrorf("too many units specified in unit placement for application %q", name)
}
for _, p := range to {
up, err := ParsePlacement(p)
if err != nil {
verifier.addError(err)
continue
}
switch {
case up.Application != "":
spec, ok := verifier.bd.Applications[up.Application]
if !ok {
verifier.addErrorf("placement %q refers to an application not defined in this bundle", p)
continue
}
if up.Unit >= 0 && up.Unit >= spec.NumUnits {
verifier.addErrorf("placement %q specifies a unit greater than the %d unit(s) started by the target application", p, spec.NumUnits)
}
case up.Machine == "new":
default:
_, ok := verifier.bd.Machines[up.Machine]
if !ok {
verifier.addErrorf("placement %q refers to a machine not defined in this bundle", p)
continue
}
verifier.machineRefCounts[up.Machine]++
}
}
}
func (verifier *bundleDataVerifier) verifyKubernetesPlacement(name string, to []string) {
if len(to) > 1 {
verifier.addErrorf("too many placement directives for application %q", name)
return
}
if len(to) == 0 {
return
}
_, err := keyvalues.Parse(strings.Split(to[0], ","), false)
if err != nil {
verifier.addErrorf("%v for application %q", err, name)
}
}
func (verifier *bundleDataVerifier) getCharmMetaForApplication(appName string) (*Meta, error) {
svc, ok := verifier.bd.Applications[appName]
if !ok {
return nil, fmt.Errorf("application %q not found", appName)
}
ch, ok := verifier.charms[svc.Charm]
if !ok {
return nil, fmt.Errorf("charm %q from application %q not found", svc.Charm, appName)
}
return ch.Meta(), nil
}
func (verifier *bundleDataVerifier) verifyRelations() {
seen := make(map[[2]endpoint]bool)
for _, relPair := range verifier.bd.Relations {
if len(relPair) != 2 {
verifier.addErrorf("relation %q has %d endpoint(s), not 2", relPair, len(relPair))
continue
}
var epPair [2]endpoint
relParseErr := false
for i, svcRel := range relPair {
ep, err := parseEndpoint(svcRel)
if err != nil {
verifier.addError(err)
relParseErr = true
continue
}
// with the introduction of the SAAS block to bundles, we should
// test that not only is the expected application is in the
// applications block, but if it's not, is it in the SAAS offering.
_, foundApp := verifier.bd.Applications[ep.application]
_, foundSaas := verifier.bd.Saas[ep.application]
if !foundApp && !foundSaas {
verifier.addErrorf("relation %q refers to application %q not defined in this bundle", relPair, ep.application)
}
if foundApp && foundSaas {
verifier.addErrorf("ambiguous relation %q refers to a application and a SAAS in this bundle", ep.application)
}
epPair[i] = ep
}
if relParseErr {
// We failed to parse at least one relation, so don't
// bother checking further.
continue
}
if epPair[0].application == epPair[1].application {
verifier.addErrorf("relation %q relates an application to itself", relPair)
}
// Resolve endpoint relations if necessary and we have
// the necessary charm information.
if (epPair[0].relation == "" || epPair[1].relation == "") && verifier.charms != nil {
iep0, iep1, err := inferEndpoints(epPair[0], epPair[1], verifier.getCharmMetaForApplication)
if err != nil {
verifier.addErrorf("cannot infer endpoint between %s and %s: %v", epPair[0], epPair[1], err)
} else {
// Change the endpoints that get recorded
// as seen, so we'll diagnose a duplicate
// relation even if one relation specifies
// the relations explicitly and the other does
// not.
epPair[0], epPair[1] = iep0, iep1
}
}
// Re-order pairs so that we diagnose duplicate relations
// whichever way they're specified.
if epPair[1].less(epPair[0]) {
epPair[1], epPair[0] = epPair[0], epPair[1]
}
if _, ok := seen[epPair]; ok {
verifier.addErrorf("relation %q is defined more than once", relPair)
}
if verifier.charms != nil && epPair[0].relation != "" && epPair[1].relation != "" {
// We have charms to verify against, and the
// endpoint has been fully specified or inferred.
verifier.verifyRelation(epPair[0], epPair[1])
}
seen[epPair] = true
}
}
func (verifier *bundleDataVerifier) verifyEndpointBindings() {
for name, svc := range verifier.bd.Applications {
if svc == nil {
continue
}
// Verify the endpoint bindings from the fully qualified charm URL and
// not just the application name. Fallback to the charm name as the
// application name, but in reality this shouldn't be the case.
var (
charm Charm
ok bool
)
if charm, ok = verifier.charms[svc.Charm]; !ok {
if charm, ok = verifier.charms[name]; !ok {
continue
}
}
for endpoint, space := range svc.EndpointBindings {
_, isInProvides := charm.Meta().Provides[endpoint]
_, isInRequires := charm.Meta().Requires[endpoint]
_, isInPeers := charm.Meta().Peers[endpoint]
_, isInExtraBindings := charm.Meta().ExtraBindings[endpoint]
if !(isInProvides || isInRequires || isInPeers || isInExtraBindings) {
verifier.addErrorf(
"application %q wants to bind endpoint %q to space %q, "+
"but the endpoint is not defined by the charm",
name, endpoint, space)
}
}
}
}
var infoRelation = Relation{
Name: "juju-info",
Role: RoleProvider,
Interface: "juju-info",
Scope: ScopeContainer,
}
// verifyRelation verifies a single relation.
// It checks that both endpoints of the relation are
// defined, and that the relationship is correctly
// symmetrical (provider to requirer) and shares
// the same interface.
func (verifier *bundleDataVerifier) verifyRelation(ep0, ep1 endpoint) {
svc0 := verifier.bd.Applications[ep0.application]
svc1 := verifier.bd.Applications[ep1.application]
if svc0 == nil || svc1 == nil || svc0 == svc1 {
// An error will be produced by verifyRelations for this case.
return
}
charm0 := verifier.charms[svc0.Charm]
charm1 := verifier.charms[svc1.Charm]
if charm0 == nil || charm1 == nil {
// An error will be produced by verifyApplications for this case.
return
}
relProv0, okProv0 := charm0.Meta().Provides[ep0.relation]
// The juju-info relation is provided implicitly by every
// charm - use it if required.
if !okProv0 && ep0.relation == infoRelation.Name {
relProv0, okProv0 = infoRelation, true
}
relReq0, okReq0 := charm0.Meta().Requires[ep0.relation]
if !okProv0 && !okReq0 {
verifier.addErrorf("charm %q used by application %q does not define relation %q", svc0.Charm, ep0.application, ep0.relation)
}
relProv1, okProv1 := charm1.Meta().Provides[ep1.relation]
// The juju-info relation is provided implicitly by every
// charm - use it if required.
if !okProv1 && ep1.relation == infoRelation.Name {
relProv1, okProv1 = infoRelation, true
}
relReq1, okReq1 := charm1.Meta().Requires[ep1.relation]
if !okProv1 && !okReq1 {
verifier.addErrorf("charm %q used by application %q does not define relation %q", svc1.Charm, ep1.application, ep1.relation)
}
var relProv, relReq Relation
var epProv, epReq endpoint
switch {
case okProv0 && okReq1:
relProv, relReq = relProv0, relReq1
epProv, epReq = ep0, ep1
case okReq0 && okProv1:
relProv, relReq = relProv1, relReq0
epProv, epReq = ep1, ep0
case okProv0 && okProv1:
verifier.addErrorf("relation %q to %q relates provider to provider", ep0, ep1)
return
case okReq0 && okReq1:
verifier.addErrorf("relation %q to %q relates requirer to requirer", ep0, ep1)
return
default:
// Errors were added above.
return
}
if relProv.Interface != relReq.Interface {
verifier.addErrorf("mismatched interface between %q and %q (%q vs %q)", epProv, epReq, relProv.Interface, relReq.Interface)
}
}
// verifyOptions verifies that the options are correctly defined
// with respect to the charm config options.
func (verifier *bundleDataVerifier) verifyOptions() {
if verifier.charms == nil {
return
}
for appName, svc := range verifier.bd.Applications {
charm := verifier.charms[svc.Charm]
if charm == nil {
// An error will be produced by verifyApplications for this case.
continue
}
config := charm.Config()
for name, value := range svc.Options {
opt, ok := config.Options[name]
if !ok {
verifier.addErrorf("cannot validate application %q: configuration option %q not found in charm %q", appName, name, svc.Charm)
continue
}
_, err := opt.validate(name, value)
if err != nil {
verifier.addErrorf("cannot validate application %q: %v", appName, err)
}
}
}
}
var validApplicationRelation = regexp.MustCompile("^(" + names.ApplicationSnippet + "):(" + names.RelationSnippet + ")$")
type endpoint struct {
application string
relation string
}
func (ep endpoint) String() string {
if ep.relation == "" {
return ep.application
}
return fmt.Sprintf("%s:%s", ep.application, ep.relation)
}
func (ep endpoint) less(other endpoint) bool {
if ep.application == other.application {
return ep.relation < other.relation
}
return ep.application < other.application
}
func parseEndpoint(ep string) (endpoint, error) {
m := validApplicationRelation.FindStringSubmatch(ep)
if m != nil {
return endpoint{
application: m[1],
relation: m[2],
}, nil
}
if !names.IsValidApplication(ep) {
return endpoint{}, fmt.Errorf("invalid relation syntax %q", ep)
}
return endpoint{
application: ep,
}, nil
}
// endpointInfo holds information about one endpoint of a relation.
type endpointInfo struct {
applicationName string
Relation
}
// String returns the unique identifier of the relation endpoint.
func (ep endpointInfo) String() string {
return ep.applicationName + ":" + ep.Name
}
// canRelateTo returns whether a relation may be established between ep
// and other.
func (ep endpointInfo) canRelateTo(other endpointInfo) bool {
return ep.applicationName != other.applicationName &&
ep.Interface == other.Interface &&
ep.Role != RolePeer &&
counterpartRole(ep.Role) == other.Role
}
// endpoint returns the endpoint specifier for ep.
func (ep endpointInfo) endpoint() endpoint {
return endpoint{
application: ep.applicationName,
relation: ep.Name,
}
}
// counterpartRole returns the RelationRole that the given RelationRole
// can relate to.
func counterpartRole(r RelationRole) RelationRole {
switch r {
case RoleProvider:
return RoleRequirer
case RoleRequirer:
return RoleProvider
case RolePeer:
return RolePeer
}
panic(fmt.Errorf("unknown relation role %q", r))
}
type UnitPlacement struct {
// ContainerType holds the container type of the new
// new unit, or empty if unspecified.
ContainerType string
// Machine holds the numeric machine id, or "new",
// or empty if the placement specifies an application.
Machine string
// application holds the application name, or empty if
// the placement specifies a machine.
Application string
// Unit holds the unit number of the application, or -1
// if unspecified.
Unit int
}
var snippetReplacer = strings.NewReplacer(
"container", names.ContainerTypeSnippet,
"number", names.NumberSnippet,
"application", names.ApplicationSnippet,
)
// validPlacement holds regexp that matches valid placement requests. To
// make the expression easier to comprehend and maintain, we replace
// symbolic snippet references in the regexp by their actual regexps
// using snippetReplacer.
var validPlacement = regexp.MustCompile(
snippetReplacer.Replace(
"^(?:(container):)?(?:(application)(?:/(number))?|(number))$",
),
)
// ParsePlacement parses a unit placement directive, as
// specified in the To clause of an application entry in the
// applications section of a bundle.
func ParsePlacement(p string) (*UnitPlacement, error) {
m := validPlacement.FindStringSubmatch(p)
if m == nil {
return nil, fmt.Errorf("invalid placement syntax %q", p)
}
up := UnitPlacement{
ContainerType: m[1],
Application: m[2],
Machine: m[4],
}
if unitStr := m[3]; unitStr != "" {
// We know that unitStr must be a valid integer because
// it's specified as such in the regexp.
up.Unit, _ = strconv.Atoi(unitStr)
} else {
up.Unit = -1
}
if up.Application == "new" {
if up.Unit != -1 {
return nil, fmt.Errorf("invalid placement syntax %q", p)
}
up.Machine, up.Application = "new", ""
}
return &up, nil
}
// inferEndpoints infers missing relation names from the given endpoint
// specifications, using the given get function to retrieve charm
// data if necessary. It returns the fully specified endpoints.
func inferEndpoints(epSpec0, epSpec1 endpoint, get func(svc string) (*Meta, error)) (endpoint, endpoint, error) {
if epSpec0.relation != "" && epSpec1.relation != "" {
// The endpoints are already specified explicitly so
// there is no need to fetch any charm data to infer
// them.
return epSpec0, epSpec1, nil
}
eps0, err := possibleEndpoints(epSpec0, get)
if err != nil {
return endpoint{}, endpoint{}, err
}
eps1, err := possibleEndpoints(epSpec1, get)
if err != nil {
return endpoint{}, endpoint{}, err
}
var candidates [][]endpointInfo
for _, ep0 := range eps0 {
for _, ep1 := range eps1 {
if ep0.canRelateTo(ep1) {
candidates = append(candidates, []endpointInfo{ep0, ep1})
}
}
}
switch len(candidates) {
case 0:
return endpoint{}, endpoint{}, fmt.Errorf("no relations found")
case 1:
return candidates[0][0].endpoint(), candidates[0][1].endpoint(), nil
}
// There's ambiguity; try discarding implicit relations.
filtered := discardImplicitRelations(candidates)
if len(filtered) == 1 {
return filtered[0][0].endpoint(), filtered[0][1].endpoint(), nil
}
// The ambiguity cannot be resolved, so return an error.
var keys []string
for _, cand := range candidates {
keys = append(keys, fmt.Sprintf("%q", relationKey(cand)))
}
sort.Strings(keys)
return endpoint{}, endpoint{}, fmt.Errorf("ambiguous relation: %s %s could refer to %s",
epSpec0, epSpec1, strings.Join(keys, "; "))
}
func discardImplicitRelations(candidates [][]endpointInfo) [][]endpointInfo {
var filtered [][]endpointInfo
outer:
for _, cand := range candidates {
for _, ep := range cand {
if ep.IsImplicit() {
continue outer
}
}
filtered = append(filtered, cand)
}
return filtered
}
// relationKey returns a string describing the relation defined by
// endpoints, for use in various contexts (including error messages).
func relationKey(endpoints []endpointInfo) string {
var names []string
for _, ep := range endpoints {
names = append(names, ep.String())
}
sort.Strings(names)
return strings.Join(names, " ")
}
// possibleEndpoints returns all the endpoints that the given endpoint spec
// could refer to.
func possibleEndpoints(epSpec endpoint, get func(svc string) (*Meta, error)) ([]endpointInfo, error) {
meta, err := get(epSpec.application)
if err != nil {
return nil, err
}
var eps []endpointInfo
add := func(r Relation) {
if epSpec.relation == "" || epSpec.relation == r.Name {
eps = append(eps, endpointInfo{
applicationName: epSpec.application,
Relation: r,
})
}
}
for _, r := range meta.Provides {
add(r)
}
for _, r := range meta.Requires {
add(r)
}
// Every application implicitly provides a juju-info relation.
add(Relation{
Name: "juju-info",
Role: RoleProvider,
Interface: "juju-info",
Scope: ScopeGlobal,
})
return eps, nil
}
// Copyright 2019 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"bytes"
"io"
"os"
"path/filepath"
"strings"
"github.com/juju/errors"
"gopkg.in/yaml.v2"
)
// FieldPresenceMap indicates which keys of a parsed bundle yaml document were
// present when the document was parsed. This map is used by the overlay merge
// code to figure out whether empty/nil field values were actually specified as
// such in the yaml document.
type FieldPresenceMap map[interface{}]interface{}
func (fpm FieldPresenceMap) fieldPresent(fieldName string) bool {
_, exists := fpm[fieldName]
return exists
}
func (fpm FieldPresenceMap) forField(fieldName string) FieldPresenceMap {
v, exists := fpm[fieldName]
if !exists {
return nil
}
// Always returns a FieldPresenceMap even if the underlying type is empty.
// As the only way to interact with the map is through the use of the two
// methods, then it will allow you to walk over the map in a much saner way.
asMap, _ := v.(FieldPresenceMap)
if asMap == nil {
return FieldPresenceMap{}
}
return asMap
}
// BundleDataPart combines a parsed BundleData instance with a nested map that
// can be used to discriminate between fields that are missing from the data
// and those that are present but defined to be empty.
type BundleDataPart struct {
Data *BundleData
PresenceMap FieldPresenceMap
UnmarshallError error
}
// BundleDataSource is implemented by types that can parse bundle data into a
// list of composable parts.
type BundleDataSource interface {
Parts() []*BundleDataPart
BundleBytes() []byte
BasePath() string
ResolveInclude(path string) ([]byte, error)
}
type resolvedBundleDataSource struct {
basePath string
bundleBytes []byte
parts []*BundleDataPart
}
func (s *resolvedBundleDataSource) Parts() []*BundleDataPart {
return s.parts
}
func (s *resolvedBundleDataSource) BundleBytes() []byte {
return s.bundleBytes
}
func (s *resolvedBundleDataSource) BasePath() string {
return s.basePath
}
func (s *resolvedBundleDataSource) ResolveInclude(path string) ([]byte, error) {
absPath := path
if !filepath.IsAbs(absPath) {
var err error
absPath, err = filepath.Abs(filepath.Clean(filepath.Join(s.basePath, absPath)))
if err != nil {
return nil, errors.Annotatef(err, "resolving relative include %q", path)
}
}
info, err := os.Stat(absPath)
if err != nil {
if isNotExistsError(err) {
return nil, errors.NotFoundf("include file %q", absPath)
}
return nil, errors.Annotatef(err, "stat failed for %q", absPath)
}
if info.IsDir() {
return nil, errors.Errorf("include path %q resolves to a folder", absPath)
}
data, err := os.ReadFile(absPath)
if err != nil {
return nil, errors.Annotatef(err, "reading include file at %q", absPath)
}
return data, nil
}
// LocalBundleDataSource reads a (potentially multi-part) bundle from path and
// returns a BundleDataSource for it. Path may point to a yaml file, a bundle
// directory or a bundle archive.
func LocalBundleDataSource(path string) (BundleDataSource, error) {
info, err := os.Stat(path)
if err != nil {
if isNotExistsError(err) {
return nil, errors.NotFoundf("%q", path)
}
return nil, errors.Annotatef(err, "stat failed for %q", path)
}
// Treat as an exploded bundle archive directory
if info.IsDir() {
path = filepath.Join(path, "bundle.yaml")
}
// Try parsing as a yaml file first
f, err := os.Open(path)
if err != nil {
if isNotExistsError(err) {
return nil, errors.NotFoundf("%q", path)
}
return nil, errors.Annotatef(err, "access bundle data at %q", path)
}
defer func() { _ = f.Close() }()
b, err := io.ReadAll(f)
if err != nil {
return nil, err
}
parts, pErr := parseBundleParts(b)
if pErr == nil {
absPath, err := filepath.Abs(path)
if err != nil {
return nil, errors.Annotatef(err, "resolve absolute path to %s", path)
}
return &resolvedBundleDataSource{
basePath: filepath.Dir(absPath),
parts: parts,
bundleBytes: b,
}, nil
}
// As a fallback, try to parse as a bundle archive
zo := newZipOpenerFromPath(path)
zrc, err := zo.openZip()
if err != nil {
// Not a zip file; return the original parse error
return nil, errors.NewNotValid(pErr, "cannot unmarshal bundle contents")
}
defer func() { _ = zrc.Close() }()
r, err := zipOpenFile(zrc, "bundle.yaml")
if err != nil {
// It is a zip file but not one that contains a bundle.yaml
return nil, errors.NotFoundf("interpret bundle contents as a bundle archive: %v", err)
}
defer func() { _ = r.Close() }()
b, err = io.ReadAll(r)
if err != nil {
return nil, err
}
if parts, pErr = parseBundleParts(b); pErr == nil {
return &resolvedBundleDataSource{
basePath: "", // use empty base path for archives
parts: parts,
bundleBytes: b,
}, nil
}
return nil, errors.NewNotValid(pErr, "cannot unmarshal bundle contents")
}
func isNotExistsError(err error) bool {
if os.IsNotExist(err) {
return true
}
// On Windows, we get a path error due to a GetFileAttributesEx syscall.
// To avoid being too proscriptive, we'll simply check for the error
// type and not any content.
if _, ok := err.(*os.PathError); ok {
return true
}
return false
}
// StreamBundleDataSource reads a (potentially multi-part) bundle from r and
// returns a BundleDataSource for it.
func StreamBundleDataSource(r io.Reader, basePath string) (BundleDataSource, error) {
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
parts, err := parseBundleParts(b)
if err != nil {
return nil, errors.NotValidf("cannot unmarshal bundle contents: %v", err)
}
return &resolvedBundleDataSource{parts: parts, bundleBytes: b, basePath: basePath}, nil
}
func parseBundleParts(b []byte) ([]*BundleDataPart, error) {
var (
// Ideally, we would be using a single reader and we would
// rewind it to read each block in structured and raw mode.
// Unfortunately, the yaml parser seems to parse all documents
// at once so we need to use two decoders. The third is to allow
// for validation of the yaml by using strict decoding. However
// we still want to return non strict bundle parts so that
// force may be used in deploy.
structDec = yaml.NewDecoder(bytes.NewReader(b))
strictDec = yaml.NewDecoder(bytes.NewReader(b))
rawDec = yaml.NewDecoder(bytes.NewReader(b))
parts []*BundleDataPart
)
for docIdx := 0; ; docIdx++ {
var part BundleDataPart
err := structDec.Decode(&part.Data)
if err == io.EOF {
break
} else if err != nil && !strings.HasPrefix(err.Error(), "yaml: unmarshal errors:") {
return nil, errors.Annotatef(err, "unmarshal document %d", docIdx)
}
var data *BundleData
strictDec.SetStrict(true)
err = strictDec.Decode(&data)
if err == io.EOF {
break
} else if err != nil {
if strings.HasPrefix(err.Error(), "yaml: unmarshal errors:") {
friendlyErrors := userFriendlyUnmarshalErrors(err)
part.UnmarshallError = errors.Annotatef(friendlyErrors, "unmarshal document %d", docIdx)
} else {
return nil, errors.Annotatef(err, "unmarshal document %d", docIdx)
}
}
// We have already checked for errors for the previous unmarshal attempt
_ = rawDec.Decode(&part.PresenceMap)
parts = append(parts, &part)
}
return parts, nil
}
func userFriendlyUnmarshalErrors(err error) error {
friendlyText := err.Error()
friendlyText = strings.ReplaceAll(friendlyText, "type charm.ApplicationSpec", "applications")
friendlyText = strings.ReplaceAll(friendlyText, "type charm.legacyBundleData", "bundle")
friendlyText = strings.ReplaceAll(friendlyText, "type charm.RelationSpec", "relations")
friendlyText = strings.ReplaceAll(friendlyText, "type charm.MachineSpec", "machines")
friendlyText = strings.ReplaceAll(friendlyText, "type charm.SaasSpec", "saas")
return errors.New(friendlyText)
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charm
import (
"fmt"
"strings"
"github.com/juju/errors"
)
// Risk describes the type of risk in a current channel.
type Risk string
const (
Stable Risk = "stable"
Candidate Risk = "candidate"
Beta Risk = "beta"
Edge Risk = "edge"
)
// String returns the string representation of the risk.
func (r Risk) String() string {
return string(r)
}
// Risks is a list of the available channel risks.
var Risks = []Risk{
Stable,
Candidate,
Beta,
Edge,
}
func isRisk(potential string) bool {
for _, risk := range Risks {
if potential == string(risk) {
return true
}
}
return false
}
// Channel identifies and describes completely a store channel.
//
// A channel consists of, and is subdivided by, tracks, risk-levels and
// branches:
// - Tracks enable snap developers to publish multiple supported releases of
// their application under the same snap name.
// - Risk-levels represent a progressive potential trade-off between stability
// and new features.
// - Branches are _optional_ and hold temporary releases intended to help with
// bug-fixing.
//
// The complete channel name can be structured as three distinct parts separated
// by slashes:
//
// <track>/<risk>/<branch>
type Channel struct {
Track string `json:"track,omitempty"`
Risk Risk `json:"risk,omitempty"`
Branch string `json:"branch,omitempty"`
}
// MakeChannel creates a core charm Channel from a set of component parts.
func MakeChannel(track, risk, branch string) (Channel, error) {
if !isRisk(risk) {
return Channel{}, errors.NotValidf("risk %q", risk)
}
return Channel{
Track: track,
Risk: Risk(risk),
Branch: branch,
}, nil
}
// MakePermissiveChannel creates a normalized core charm channel which
// never fails. It assumes that the risk has been prechecked.
func MakePermissiveChannel(track, risk, branch string) Channel {
ch := Channel{
Track: track,
Risk: Risk(risk),
Branch: branch,
}
return ch.Normalize()
}
// ParseChannel parses a string representing a store channel.
func ParseChannel(s string) (Channel, error) {
if s == "" {
return Channel{}, errors.NotValidf("empty channel")
}
p := strings.Split(s, "/")
var risk, track, branch *string
switch len(p) {
case 1:
if isRisk(p[0]) {
risk = &p[0]
} else {
track = &p[0]
}
case 2:
if isRisk(p[0]) {
risk, branch = &p[0], &p[1]
} else {
track, risk = &p[0], &p[1]
}
case 3:
track, risk, branch = &p[0], &p[1], &p[2]
default:
return Channel{}, errors.Errorf("channel is malformed and has too many components %q", s)
}
ch := Channel{}
if risk != nil {
if !isRisk(*risk) {
return Channel{}, errors.NotValidf("risk in channel %q", s)
}
// We can lift this into a risk, as we've validated prior to this to
// ensure it's a valid risk.
ch.Risk = Risk(*risk)
}
if track != nil {
if *track == "" {
return Channel{}, errors.NotValidf("track in channel %q", s)
}
ch.Track = *track
}
if branch != nil {
if *branch == "" {
return Channel{}, errors.NotValidf("branch in channel %q", s)
}
ch.Branch = *branch
}
return ch, nil
}
// ParseChannelNormalize parses a string representing a store channel.
// The returned channel's track, risk and name are normalized.
func ParseChannelNormalize(s string) (Channel, error) {
ch, err := ParseChannel(s)
if err != nil {
return Channel{}, errors.Trace(err)
}
return ch.Normalize(), nil
}
// Normalize the channel with normalized track, risk and names.
func (ch Channel) Normalize() Channel {
track := ch.Track
risk := ch.Risk
if risk == "" {
risk = "stable"
}
return Channel{
Track: track,
Risk: risk,
Branch: ch.Branch,
}
}
// Empty returns true if all it's components are empty.
func (ch Channel) Empty() bool {
return ch.Track == "" && ch.Risk == "" && ch.Branch == ""
}
func (ch Channel) String() string {
path := string(ch.Risk)
if track := ch.Track; track != "" {
path = fmt.Sprintf("%s/%s", track, path)
}
if branch := ch.Branch; branch != "" {
path = fmt.Sprintf("%s/%s", path, branch)
}
return path
}
// Copyright 2011, 2012, 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"github.com/juju/collections/set"
)
// CharmMeta describes methods that inform charm operation.
type CharmMeta interface {
Meta() *Meta
Manifest() *Manifest
}
// The Charm interface is implemented by any type that
// may be handled as a charm.
type Charm interface {
CharmMeta
Config() *Config
Actions() *Actions
Revision() int
Version() string
}
// FormatSelectionReason represents the reason for a format version selection.
type FormatSelectionReason = string
const (
// SelectionManifest states that it found a manifest.
SelectionManifest FormatSelectionReason = "manifest"
// SelectionBases states that there was at least 1 base.
SelectionBases FormatSelectionReason = "bases"
// SelectionContainers states that there was at least 1 container.
SelectionContainers FormatSelectionReason = "containers"
)
var (
// formatV2Set defines what in reality is a v2 metadata.
formatV2Set = set.NewStrings(SelectionBases, SelectionContainers)
)
// metaFormatReasons returns the format and why the selection was done. We can
// then inspect the reasons to understand the reasoning.
func metaFormatReasons(ch CharmMeta) (Format, []FormatSelectionReason) {
manifest := ch.Manifest()
// To better inform users of why a metadata selection was preferred over
// another, we deduce why a format is selected over another.
reasons := set.NewStrings()
if manifest != nil {
reasons.Add(SelectionManifest)
if len(manifest.Bases) > 0 {
reasons.Add(SelectionBases)
}
}
if len(ch.Meta().Containers) > 0 {
reasons.Add(SelectionContainers)
}
// To be a format v1 you can have no series with no bases or containers, or
// just have a series slice.
format := FormatV1
if reasons.Intersection(formatV2Set).Size() > 0 {
format = FormatV2
}
return format, reasons.SortedValues()
}
// MetaFormat returns the underlying format from checking the charm for the
// right values.
func MetaFormat(ch CharmMeta) Format {
format, _ := metaFormatReasons(ch)
return format
}
// CheckMeta determines the version of the metadata used by this charm,
// then checks that it is valid as appropriate.
func CheckMeta(ch CharmMeta) error {
format, reasons := metaFormatReasons(ch)
return ch.Meta().Check(format, reasons...)
}
// Copyright 2011, 2012, 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"archive/zip"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"github.com/juju/collections/set"
"github.com/juju/errors"
ziputil "github.com/juju/utils/v4/zip"
)
// CharmArchive type encapsulates access to data and operations
// on a charm archive.
type CharmArchive struct {
zopen zipOpener
Path string // May be empty if CharmArchive wasn't read from a file
*charmBase
}
// Trick to ensure *CharmArchive implements the Charm interface.
var _ Charm = (*CharmArchive)(nil)
// ReadCharmArchive returns a CharmArchive for the charm in path.
func ReadCharmArchive(path string) (*CharmArchive, error) {
a, err := readCharmArchive(newZipOpenerFromPath(path))
if err != nil {
return nil, err
}
a.Path = path
return a, nil
}
// ReadCharmArchiveBytes returns a CharmArchive read from the given data.
// Make sure the archive fits in memory before using this.
func ReadCharmArchiveBytes(data []byte) (archive *CharmArchive, err error) {
zopener := newZipOpenerFromReader(bytes.NewReader(data), int64(len(data)))
return readCharmArchive(zopener)
}
// ReadCharmArchiveFromReader returns a CharmArchive that uses
// r to read the charm. The given size must hold the number
// of available bytes in the file.
//
// Note that the caller is responsible for closing r - methods on
// the returned CharmArchive may fail after that.
func ReadCharmArchiveFromReader(r io.ReaderAt, size int64) (archive *CharmArchive, err error) {
return readCharmArchive(newZipOpenerFromReader(r, size))
}
func readCharmArchive(zopen zipOpener) (archive *CharmArchive, err error) {
b := &CharmArchive{
zopen: zopen,
charmBase: &charmBase{},
}
zipr, err := zopen.openZip()
if err != nil {
return nil, err
}
defer func() { _ = zipr.Close() }()
reader, err := zipOpenFile(zipr, "metadata.yaml")
if err != nil {
return nil, err
}
b.meta, err = ReadMeta(reader)
_ = reader.Close()
if err != nil {
return nil, err
}
// Try to read the optional manifest.yaml, it's required to determine if
// this charm is v1 or not.
reader, err = zipOpenFile(zipr, "manifest.yaml")
if _, ok := err.(*noCharmArchiveFile); ok {
b.manifest = nil
} else if err != nil {
return nil, errors.Annotatef(err, `opening "manifest.yaml" file`)
} else {
b.manifest, err = ReadManifest(reader)
_ = reader.Close()
if err != nil {
return nil, errors.Annotatef(err, `parsing "manifest.yaml" file`)
}
}
reader, err = zipOpenFile(zipr, "config.yaml")
if _, ok := err.(*noCharmArchiveFile); ok {
b.config = NewConfig()
} else if err != nil {
return nil, err
} else {
b.config, err = ReadConfig(reader)
_ = reader.Close()
if err != nil {
return nil, err
}
}
if b.actions, err = getActions(
b.meta.Name,
func(file string) (io.ReadCloser, error) {
return zipOpenFile(zipr, file)
},
func(err error) bool {
_, ok := err.(*noCharmArchiveFile)
return ok
},
); err != nil {
return nil, err
}
reader, err = zipOpenFile(zipr, "revision")
if err != nil {
if _, ok := err.(*noCharmArchiveFile); !ok {
return nil, err
}
} else {
_, err = fmt.Fscan(reader, &b.revision)
if err != nil {
return nil, errors.New("invalid revision file")
}
}
reader, err = zipOpenFile(zipr, "lxd-profile.yaml")
if _, ok := err.(*noCharmArchiveFile); ok {
b.lxdProfile = NewLXDProfile()
} else if err != nil {
return nil, err
} else {
b.lxdProfile, err = ReadLXDProfile(reader)
_ = reader.Close()
if err != nil {
return nil, err
}
}
reader, err = zipOpenFile(zipr, "version")
if err != nil {
if _, ok := err.(*noCharmArchiveFile); !ok {
return nil, err
}
} else {
b.version, err = readVersion(reader)
_ = reader.Close()
if err != nil {
return nil, err
}
}
return b, nil
}
type fileOpener func(string) (io.ReadCloser, error)
func getActions(charmName string, open fileOpener, isNotFound func(error) bool) (actions *Actions, err error) {
reader, err := open("actions.yaml")
if err == nil {
defer reader.Close()
return ReadActionsYaml(charmName, reader)
} else if !isNotFound(err) {
return nil, err
}
return NewActions(), nil
}
func zipOpenFile(zipr *zipReadCloser, path string) (rc io.ReadCloser, err error) {
for _, fh := range zipr.File {
if fh.Name == path {
return fh.Open()
}
}
return nil, &noCharmArchiveFile{path}
}
type noCharmArchiveFile struct {
path string
}
func (err noCharmArchiveFile) Error() string {
return fmt.Sprintf("archive file %q not found", err.path)
}
type zipReadCloser struct {
io.Closer
*zip.Reader
}
// zipOpener holds the information needed to open a zip
// file.
type zipOpener interface {
openZip() (*zipReadCloser, error)
}
// newZipOpenerFromPath returns a zipOpener that can be
// used to read the archive from the given path.
func newZipOpenerFromPath(path string) zipOpener {
return &zipPathOpener{path: path}
}
// newZipOpenerFromReader returns a zipOpener that can be
// used to read the archive from the given ReaderAt
// holding the given number of bytes.
func newZipOpenerFromReader(r io.ReaderAt, size int64) zipOpener {
return &zipReaderOpener{
r: r,
size: size,
}
}
type zipPathOpener struct {
path string
}
func (zo *zipPathOpener) openZip() (*zipReadCloser, error) {
f, err := os.Open(zo.path)
if err != nil {
return nil, err
}
fi, err := f.Stat()
if err != nil {
f.Close()
return nil, err
}
r, err := zip.NewReader(f, fi.Size())
if err != nil {
f.Close()
return nil, err
}
return &zipReadCloser{Closer: f, Reader: r}, nil
}
type zipReaderOpener struct {
r io.ReaderAt
size int64
}
func (zo *zipReaderOpener) openZip() (*zipReadCloser, error) {
r, err := zip.NewReader(zo.r, zo.size)
if err != nil {
return nil, err
}
return &zipReadCloser{Closer: ioutil.NopCloser(nil), Reader: r}, nil
}
// ArchiveMembers returns a set of the charm's contents.
func (a *CharmArchive) ArchiveMembers() (set.Strings, error) {
zipr, err := a.zopen.openZip()
if err != nil {
return set.NewStrings(), err
}
defer zipr.Close()
paths, err := ziputil.Find(zipr.Reader, "*")
if err != nil {
return set.NewStrings(), err
}
manifest := set.NewStrings(paths...)
// We always write out a revision file, even if there isn't one in the
// archive; and we always strip ".", because that's sometimes not present.
manifest.Add("revision")
manifest.Remove(".")
return manifest, nil
}
// ExpandTo expands the charm archive into dir, creating it if necessary.
// If any errors occur during the expansion procedure, the process will
// abort.
func (a *CharmArchive) ExpandTo(dir string) error {
zipr, err := a.zopen.openZip()
if err != nil {
return err
}
defer zipr.Close()
if err := ziputil.ExtractAll(zipr.Reader, dir); err != nil {
return err
}
hooksDir := filepath.Join(dir, "hooks")
fixHook := fixHookFunc(hooksDir, a.meta.Hooks())
if err := filepath.Walk(hooksDir, fixHook); err != nil {
if !os.IsNotExist(err) {
return err
}
}
revFile, err := os.Create(filepath.Join(dir, "revision"))
if err != nil {
return err
}
if _, err := revFile.Write([]byte(strconv.Itoa(a.revision))); err != nil {
return err
}
if err := revFile.Sync(); err != nil {
return err
}
if err := revFile.Close(); err != nil {
return err
}
return nil
}
// fixHookFunc returns a WalkFunc that makes sure hooks are owner-executable.
func fixHookFunc(hooksDir string, hookNames map[string]bool) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
mode := info.Mode()
if path != hooksDir && mode.IsDir() {
return filepath.SkipDir
}
if name := filepath.Base(path); hookNames[name] {
if mode&0100 == 0 {
return os.Chmod(path, mode|0100)
}
}
return nil
}
}
// Copyright 2021 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charm
// charmBase implements the Charm interface with commonality between
// a charm archive and directory.
type charmBase struct {
meta *Meta
config *Config
actions *Actions
lxdProfile *LXDProfile
manifest *Manifest
version string
revision int
}
// NewCharmBase creates a new charmBase with the given metadata, config,
// actions, lxdProfile, and manifest.
func NewCharmBase(meta *Meta, manifest *Manifest, config *Config, actions *Actions, lxdProfile *LXDProfile) *charmBase {
return &charmBase{
meta: meta,
manifest: manifest,
config: config,
actions: actions,
lxdProfile: lxdProfile,
}
}
// Revision returns the revision number for the charm
// expanded in dir.
func (c *charmBase) Revision() int {
return c.revision
}
// Version returns the VCS version representing the version file from archive.
func (c *charmBase) Version() string {
return c.version
}
// Meta returns the Meta representing the metadata.yaml file
// for the charm expanded in dir.
func (c *charmBase) Meta() *Meta {
return c.meta
}
// Config returns the Config representing the config.yaml file
// for the charm expanded in dir.
func (c *charmBase) Config() *Config {
return c.config
}
// Actions returns the Actions representing the actions.yaml file
// for the charm expanded in dir.
func (c *charmBase) Actions() *Actions {
return c.actions
}
// LXDProfile returns the LXDProfile representing the lxd-profile.yaml file
// for the charm expanded in dir.
func (c *charmBase) LXDProfile() *LXDProfile {
return c.lxdProfile
}
// Manifest returns the Manifest representing the manifest.yaml file
// for the charm expanded in dir.
func (c *charmBase) Manifest() *Manifest {
return c.manifest
}
// SetVersion changes the charm version. This affects
// the version reported by Version and the version of the
// charm created.
func (c *charmBase) SetVersion(version string) {
c.version = version
}
// Copyright 2011, 2012, 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"fmt"
"os"
"path/filepath"
"github.com/juju/juju/internal/errors"
)
// ReadCharmDirMetadata reads and parses the metadata file for a charm directory.
func ReadCharmDirMetadata(path string) (*Meta, error) {
reader, err := os.Open(filepath.Join(path, "metadata.yaml"))
if _, ok := err.(*os.PathError); ok {
return nil, errors.Errorf("metadata.yaml: %w", FileNotFound)
} else if err != nil {
return nil, errors.Errorf(`reading "metadata.yaml" file: %w`, err)
}
defer reader.Close()
meta, err := ReadMeta(reader)
if err != nil {
return nil, errors.Errorf(`parsing "metadata.yaml" file: %w`, err)
}
return meta, nil
}
// ReadCharmDirManifest reads and parses the manifest file for a charm directory.
func ReadCharmDirManifest(path string) (*Manifest, error) {
reader, err := os.Open(filepath.Join(path, "manifest.yaml"))
if _, ok := err.(*os.PathError); ok {
return nil, errors.Errorf("manifest.yaml: %w", FileNotFound)
} else if err != nil {
return nil, errors.Errorf(`reading "manifest.yaml" file: %w`, err)
}
defer reader.Close()
manifest, err := ReadManifest(reader)
if err != nil {
return nil, errors.Errorf(`parsing "manifest.yaml" file: %w`, err)
}
return manifest, nil
}
// ReadCharmDirConfig reads and parses the config file for a charm directory.
func ReadCharmDirConfig(path string) (*Config, error) {
reader, err := os.Open(filepath.Join(path, "config.yaml"))
if _, ok := err.(*os.PathError); ok {
return nil, errors.Errorf("config.yaml: %w", FileNotFound)
} else if err != nil {
return nil, errors.Errorf(`reading "config.yaml" file: %w`, err)
}
defer reader.Close()
config, err := ReadConfig(reader)
if err != nil {
return nil, errors.Errorf(`parsing "config.yaml" file: %w`, err)
}
return config, nil
}
// ReadCharmDirActions reads and parses the actions file for a charm directory.
func ReadCharmDirActions(charmName string, path string) (*Actions, error) {
reader, err := os.Open(filepath.Join(path, "actions.yaml"))
if _, ok := err.(*os.PathError); ok {
return nil, errors.Errorf("actions.yaml: %w", FileNotFound)
} else if err != nil {
return nil, errors.Errorf(`reading "actions.yaml" file: %w`, err)
}
defer reader.Close()
actions, err := ReadActionsYaml(charmName, reader)
if err != nil {
return nil, errors.Errorf(`parsing "actions.yaml" file: %w`, err)
}
return actions, nil
}
// ReadCharmDirRevision reads the revision file for a charm directory.
func ReadCharmDirRevision(path string) (int, error) {
reader, err := os.Open(filepath.Join(path, "revision"))
if _, ok := err.(*os.PathError); ok {
return 0, errors.Errorf("revision: %w", FileNotFound)
} else if err != nil {
return 0, errors.Errorf(`reading "revision" file: %w`, err)
}
defer reader.Close()
var revision int
_, err = fmt.Fscan(reader, &revision)
if err != nil {
return 0, errors.Errorf(`parsing "revision" file: %w`, err)
}
return revision, nil
}
func ReadCharmDirLXDProfile(path string) (*LXDProfile, error) {
reader, err := os.Open(filepath.Join(path, "lxd-profile.yaml"))
if _, ok := err.(*os.PathError); ok {
return nil, errors.Errorf("lxd-profile.yaml: %w", FileNotFound)
} else if err != nil {
return nil, errors.Errorf(`reading "lxd-profile.yaml" file: %w`, err)
}
defer reader.Close()
lxdProfile, err := ReadLXDProfile(reader)
if err != nil {
return nil, errors.Errorf(`parsing "lxd-profile.yaml" file: %w`, err)
}
return lxdProfile, nil
}
func ReadCharmDirVersion(path string) (string, error) {
reader, err := os.Open(filepath.Join(path, "version"))
if _, ok := err.(*os.PathError); ok {
return "", errors.Errorf("version: %w", FileNotFound)
} else if err != nil {
return "", errors.Errorf(`reading "version" file: %w`, err)
}
defer reader.Close()
return readVersion(reader)
}
// Copyright 2011, 2012, 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"fmt"
"io"
"net/url"
"strconv"
"github.com/juju/errors"
"github.com/juju/schema"
"gopkg.in/yaml.v2"
)
const (
// ErrUnknownOption is returned when an unknown option is encountered.
ErrUnknownOption = errors.ConstError("unknown option")
)
// Settings is a group of charm config option names and values. A Settings
// S is considered valid by the Config C if every key in S is an option in
// C, and every value either has the correct type or is nil.
type Settings map[string]interface{}
// Option represents a single charm config option.
type Option struct {
Type string `yaml:"type"`
Description string `yaml:"description,omitempty"`
Default interface{} `yaml:"default,omitempty"`
}
// error replaces any supplied non-nil error with a new error describing a
// validation failure for the supplied value.
func (option Option) error(err *error, name string, value interface{}) {
if *err != nil {
*err = fmt.Errorf("option %q expected %s, got %#v", name, option.Type, value)
}
}
const secretScheme = "secret"
type secretC struct{}
// Coerce implements schema.Checker.Coerce for secretC.
func (c secretC) Coerce(v interface{}, path []string) (interface{}, error) {
s, err := schema.String().Coerce(v, path)
if err != nil {
return nil, err
}
str := s.(string)
if str == "" {
return "", nil
}
u, err := url.Parse(str)
if err != nil {
return nil, errors.Trace(err)
}
if u.Scheme == "" {
return nil, errors.NotValidf("secret URI scheme missing")
}
if u.Scheme != secretScheme {
return nil, errors.NotValidf("secret URI scheme %q", u.Scheme)
}
return str, nil
}
// validate returns an appropriately-typed value for the supplied value, or
// returns an error if it cannot be converted to the correct type. Nil values
// are always considered valid.
func (option Option) validate(name string, value interface{}) (_ interface{}, err error) {
if value == nil {
return nil, nil
}
if checker := optionTypeCheckers[option.Type]; checker != nil {
defer option.error(&err, name, value)
if value, err = checker.Coerce(value, nil); err != nil {
return nil, err
}
return value, nil
}
return nil, fmt.Errorf("option %q has unknown type %q", name, option.Type)
}
var optionTypeCheckers = map[string]schema.Checker{
"string": schema.String(),
"int": schema.Int(),
"float": schema.Float(),
"boolean": schema.Bool(),
"secret": secretC{},
}
func (option Option) parse(name, str string) (val interface{}, err error) {
switch option.Type {
case "string", "secret":
return str, nil
case "int":
val, err = strconv.ParseInt(str, 10, 64)
case "float":
val, err = strconv.ParseFloat(str, 64)
case "boolean":
val, err = strconv.ParseBool(str)
default:
return nil, fmt.Errorf("option %q has unknown type %q", name, option.Type)
}
defer option.error(&err, name, str)
return
}
// Config represents the supported configuration options for a charm,
// as declared in its config.yaml file.
type Config struct {
Options map[string]Option
}
// NewConfig returns a new Config without any options.
func NewConfig() *Config {
return &Config{map[string]Option{}}
}
// ReadConfig reads a Config in YAML format.
func ReadConfig(r io.Reader) (*Config, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
var config *Config
if err := yaml.Unmarshal(data, &config); err != nil {
return nil, err
}
if config == nil {
return nil, fmt.Errorf("invalid config: empty configuration")
}
if config.Options == nil {
// We are allowed an empty configuration if the options
// field is explicitly specified, but there is no easy way
// to tell if it was specified or not without unmarshaling
// into interface{} and explicitly checking the field.
var configInterface interface{}
if err := yaml.Unmarshal(data, &configInterface); err != nil {
return nil, err
}
m, _ := configInterface.(map[interface{}]interface{})
if _, ok := m["options"]; !ok {
return nil, fmt.Errorf("invalid config: empty configuration")
}
}
for name, option := range config.Options {
switch option.Type {
case "string", "secret", "int", "float", "boolean":
case "":
// Missing type is valid in python.
option.Type = "string"
default:
return nil, fmt.Errorf("invalid config: option %q has unknown type %q", name, option.Type)
}
def := option.Default
if def == "" && (option.Type == "string" || option.Type == "secret") {
// Skip normal validation for compatibility with pyjuju.
} else if option.Default, err = option.validate(name, def); err != nil {
option.error(&err, name, def)
return nil, fmt.Errorf("invalid config default: %v", err)
}
config.Options[name] = option
}
return config, nil
}
// option returns the named option from the config, or an error if none
// such exists.
func (c *Config) option(name string) (Option, error) {
if option, ok := c.Options[name]; ok {
return option, nil
}
return Option{}, fmt.Errorf("%w %q", ErrUnknownOption, name)
}
// DefaultSettings returns settings containing the default value of every
// option in the config. Default values may be nil.
func (c *Config) DefaultSettings() Settings {
out := make(Settings)
for name, option := range c.Options {
out[name] = option.Default
}
return out
}
// ValidateSettings returns a copy of the supplied settings with a consistent type
// for each value. It returns an error if the settings contain unknown keys
// or invalid values.
func (c *Config) ValidateSettings(settings Settings) (Settings, error) {
out := make(Settings)
for name, value := range settings {
if option, err := c.option(name); err != nil {
return nil, err
} else if value, err = option.validate(name, value); err != nil {
return nil, err
}
out[name] = value
}
return out, nil
}
// FilterSettings returns the subset of the supplied settings that are valid.
func (c *Config) FilterSettings(settings Settings) Settings {
out := make(Settings)
for name, value := range settings {
if option, err := c.option(name); err == nil {
if value, err := option.validate(name, value); err == nil {
out[name] = value
}
}
}
return out
}
// ParseSettingsStrings returns settings derived from the supplied map. Every
// value in the map must be parseable to the correct type for the option
// identified by its key. Empty values are interpreted as nil.
func (c *Config) ParseSettingsStrings(values map[string]string) (Settings, error) {
out := make(Settings)
for name, str := range values {
option, err := c.option(name)
if err != nil {
return nil, err
}
value, err := option.parse(name, str)
if err != nil {
return nil, err
}
out[name] = value
}
return out, nil
}
// ParseSettingsYAML returns settings derived from the supplied YAML data. The
// YAML must unmarshal to a map of strings to settings data; the supplied key
// must be present in the map, and must point to a map in which every value
// must have, or be a string parseable to, the correct type for the associated
// config option. Empty strings and nil values are both interpreted as nil.
func (c *Config) ParseSettingsYAML(yamlData []byte, key string) (Settings, error) {
var allSettings map[string]Settings
if err := yaml.Unmarshal(yamlData, &allSettings); err != nil {
return nil, fmt.Errorf("cannot parse settings data: %v", err)
}
settings, ok := allSettings[key]
if !ok {
return nil, fmt.Errorf("no settings found for %q", key)
}
out := make(Settings)
for name, value := range settings {
option, err := c.option(name)
if err != nil {
return nil, err
}
// Accept string values for compatibility with python.
if str, ok := value.(string); ok {
if value, err = option.parse(name, str); err != nil {
return nil, err
}
} else if value, err = option.validate(name, value); err != nil {
return nil, err
}
out[name] = value
}
return out, nil
}
// Copyright 2016 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"fmt"
"strings"
"github.com/juju/collections/set"
"github.com/juju/schema"
)
// ExtraBinding represents an extra bindable endpoint that is not a relation.
type ExtraBinding struct {
Name string `json:"Name"`
}
// When specified, the "extra-bindings" section in the metadata.yaml
// should have the following format:
//
// extra-bindings:
//
// "<endpoint-name>":
// ...
//
// Endpoint names are strings and must not match existing relation names from
// the Provides, Requires, or Peers metadata sections. The values beside each
// endpoint name must be left out (i.e. "foo": <anything> is invalid).
var extraBindingsSchema = schema.Map(schema.NonEmptyString("binding name"), schema.Nil(""))
func parseMetaExtraBindings(data interface{}) (map[string]ExtraBinding, error) {
if data == nil {
return nil, nil
}
bindingsMap := data.(map[interface{}]interface{})
result := make(map[string]ExtraBinding)
for name := range bindingsMap {
stringName := name.(string)
result[stringName] = ExtraBinding{Name: stringName}
}
return result, nil
}
func validateMetaExtraBindings(meta Meta) error {
extraBindings := meta.ExtraBindings
if extraBindings == nil {
return nil
} else if len(extraBindings) == 0 {
return fmt.Errorf("extra bindings cannot be empty when specified")
}
usedExtraNames := set.NewStrings()
for name, binding := range extraBindings {
if binding.Name == "" || name == "" {
return fmt.Errorf("missing binding name")
}
if binding.Name != name {
return fmt.Errorf("mismatched extra binding name: got %q, expected %q", binding.Name, name)
}
usedExtraNames.Add(name)
}
usedRelationNames := set.NewStrings()
for relationName := range meta.CombinedRelations() {
usedRelationNames.Add(relationName)
}
notAllowedNames := usedExtraNames.Intersection(usedRelationNames)
if !notAllowedNames.IsEmpty() {
notAllowedList := strings.Join(notAllowedNames.SortedValues(), ", ")
return fmt.Errorf("relation names (%s) cannot be used in extra bindings", notAllowedList)
}
return nil
}
// Copyright 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package hooks
// Kind enumerates the different kinds of hooks that exist.
type Kind string
const (
// None of these hooks are ever associated with a relation; each of them
// represents a change to the state of the unit as a whole. The values
// themselves are all valid hook names.
// In normal operation, a unit will run at least the `install`, `start`, `config-changed`
// and `stop` hooks over the course of its lifetime.
// The `install` hook always runs once, and only once, before any other hook.
Install Kind = "install"
// The `start` hook always runs once immediately after the first config-changed
// hook. Also, on kubernetes charms, whenever a unit’s pod churns, `start` will
// be fired again on that unit.
Start Kind = "start"
// The `config-changed` hook always runs once immediately after the install hook,
// and likewise after the upgrade-charm hook. It also runs whenever the service
// configuration changes, and when recovering from transient unit agent errors.
ConfigChanged Kind = "config-changed"
// The `upgrade-charm` hook always runs once immediately after the charm directory
// contents have been changed by an unforced charm upgrade operation, and *may* do
// so after a forced upgrade; but will *not* be run after a forced upgrade from an
// existing error state. (Consequently, neither will the config-changed hook that
// would ordinarily follow the upgrade-charm.)
UpgradeCharm Kind = "upgrade-charm"
// The `stop` hook is the last hook to be run before the unit is destroyed. In the
// future, it may be called in other situations.
Stop Kind = "stop"
Remove Kind = "remove"
Action Kind = "action"
LeaderElected Kind = "leader-elected"
LeaderDeposed Kind = "leader-deposed"
UpdateStatus Kind = "update-status"
// These hooks require an associated secret.
SecretChanged Kind = "secret-changed"
SecretExpired Kind = "secret-expired"
SecretRemove Kind = "secret-remove"
SecretRotate Kind = "secret-rotate"
// These 5 hooks require an associated relation, and the name of the relation
// unit whose change triggered the hook. The hook file names that these
// kinds represent will be prefixed by the relation name; for example,
// "db-relation-joined".
RelationCreated Kind = "relation-created"
// The "relation-joined" hook always runs once when a related unit is first seen.
RelationJoined Kind = "relation-joined"
// The "relation-changed" hook for a given unit always runs once immediately
// following the relation-joined hook for that unit, and subsequently whenever
// the related unit changes its settings (by calling relation-set and exiting
// without error). Note that "immediately" only applies within the context of
// this particular runtime relation -- that is, when "foo-relation-joined" is
// run for unit "bar/99" in relation id "foo:123", the only guarantee is that
// the next hook to be run *in relation id "foo:123"* will be "foo-relation-changed"
// for "bar/99". Unit hooks may intervene, as may hooks for other relations,
// and even for other "foo" relations.
RelationChanged Kind = "relation-changed"
// The "relation-departed" hook for a given unit always runs once when a related
// unit is no longer related. After the "relation-departed" hook has run, no
// further notifications will be received from that unit; however, its settings
// will remain accessible via relation-get for the complete lifetime of the
// relation.
RelationDeparted Kind = "relation-departed"
// The "relation-broken" hook is not specific to any unit, and always runs once
// when the local unit is ready to depart the relation itself. Before this hook
// is run, a relation-departed hook will be executed for every unit known to be
// related; it will never run while the relation appears to have members, but it
// may be the first and only hook to run for a given relation. The stop hook will
// not run while relations remain to be broken.
RelationBroken Kind = "relation-broken"
// These hooks require an associated storage. The hook file names that these
// kinds represent will be prefixed by the storage name; for example,
// "shared-fs-storage-attached".
StorageAttached Kind = "storage-attached"
StorageDetaching Kind = "storage-detaching"
// These hooks require an associated workload/container, and the name of the workload/container
// whose change triggered the hook. The hook file names that these
// kinds represent will be prefixed by the workload/container name; for example,
// "mycontainer-pebble-ready".
PebbleCustomNotice Kind = "pebble-custom-notice"
PebbleReady Kind = "pebble-ready"
PebbleCheckFailed Kind = "pebble-check-failed"
PebbleCheckRecovered Kind = "pebble-check-recovered"
)
var unitHooks = []Kind{
Install,
Start,
ConfigChanged,
UpgradeCharm,
Stop,
Remove,
LeaderElected,
LeaderDeposed,
UpdateStatus,
}
// UnitHooks returns all known unit hook kinds.
func UnitHooks() []Kind {
hooks := make([]Kind, len(unitHooks))
copy(hooks, unitHooks)
return hooks
}
var relationHooks = []Kind{
RelationCreated,
RelationJoined,
RelationChanged,
RelationDeparted,
RelationBroken,
}
// RelationHooks returns all known relation hook kinds.
func RelationHooks() []Kind {
hooks := make([]Kind, len(relationHooks))
copy(hooks, relationHooks)
return hooks
}
var storageHooks = []Kind{
StorageAttached,
StorageDetaching,
}
// StorageHooks returns all known storage hook kinds.
func StorageHooks() []Kind {
hooks := make([]Kind, len(storageHooks))
copy(hooks, storageHooks)
return hooks
}
var workloadHooks = []Kind{
PebbleCheckFailed,
PebbleCheckRecovered,
PebbleCustomNotice,
PebbleReady,
}
// WorkloadHooks returns all known container hook kinds.
func WorkloadHooks() []Kind {
hooks := make([]Kind, len(workloadHooks))
copy(hooks, workloadHooks)
return hooks
}
// IsRelation returns whether the Kind represents a relation hook.
func (kind Kind) IsRelation() bool {
switch kind {
case RelationCreated, RelationJoined, RelationChanged, RelationDeparted, RelationBroken:
return true
}
return false
}
// IsStorage returns whether the Kind represents a storage hook.
func (kind Kind) IsStorage() bool {
switch kind {
case StorageAttached, StorageDetaching:
return true
}
return false
}
// IsWorkload returns whether the Kind represents a workload hook.
func (kind Kind) IsWorkload() bool {
switch kind {
case PebbleCheckFailed, PebbleCheckRecovered, PebbleCustomNotice, PebbleReady:
return true
}
return false
}
var secretHooks = []Kind{
SecretChanged, SecretExpired, SecretRemove, SecretRotate,
}
// SecretHooks returns all secret hook kinds.
func SecretHooks() []Kind {
hooks := make([]Kind, len(secretHooks))
copy(hooks, secretHooks)
return hooks
}
// IsSecret returns whether the Kind represents a secret hook.
func (kind Kind) IsSecret() bool {
switch kind {
case SecretChanged, SecretExpired, SecretRemove, SecretRotate:
return true
}
return false
}
// Copyright 2018 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"fmt"
"io"
"strings"
"github.com/juju/collections/set"
"github.com/juju/errors"
"gopkg.in/yaml.v2"
)
// LXDProfiler defines a way to access a LXDProfile from a charm.
type LXDProfiler interface {
// LXDProfile returns the LXDProfile found in lxd-profile.yaml of the charm
LXDProfile() *LXDProfile
}
// LXDProfile is the same as ProfilePut defined in github.com/lxc/lxd/shared/api/profile.go
type LXDProfile struct {
Config map[string]string `json:"config" yaml:"config"`
Description string `json:"description" yaml:"description"`
Devices map[string]map[string]string `json:"devices" yaml:"devices"`
}
// NewLXDProfile creates a LXDProfile with the internal data structures
// initialised to non nil values.
func NewLXDProfile() *LXDProfile {
return &LXDProfile{
Config: map[string]string{},
Devices: map[string]map[string]string{},
}
}
// ReadLXDProfile reads in a LXDProfile from a charm's lxd-profile.yaml.
// It is not validated at this point so that the caller can choose to override
// any validation.
func ReadLXDProfile(r io.Reader) (*LXDProfile, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
profile := NewLXDProfile()
if err := yaml.Unmarshal(data, profile); err != nil {
return nil, errors.Annotate(err, "failed to unmarshall lxd-profile.yaml")
}
return profile, nil
}
// ValidateConfigDevices validates the Config and Devices properties of the LXDProfile.
// WhiteList devices: unix-char, unix-block, gpu, usb.
// BlackList config: boot*, limits* and migration*.
// An empty profile will not return an error.
func (profile *LXDProfile) ValidateConfigDevices() error {
for _, val := range profile.Devices {
goodDevs := set.NewStrings("unix-char", "unix-block", "gpu", "usb")
if devType, ok := val["type"]; ok {
if !goodDevs.Contains(devType) {
return fmt.Errorf("invalid lxd-profile.yaml: contains device type %q", devType)
}
}
}
for key := range profile.Config {
if strings.HasPrefix(key, "boot") ||
strings.HasPrefix(key, "limits") ||
strings.HasPrefix(key, "migration") {
return fmt.Errorf("invalid lxd-profile.yaml: contains config value %q", key)
}
}
return nil
}
// Empty returns true if neither devices nor config have been defined in the profile.
func (profile *LXDProfile) Empty() bool {
return len(profile.Devices) < 1 && len(profile.Config) < 1
}
// Copyright 2021 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"io"
"github.com/juju/errors"
"github.com/juju/schema"
"gopkg.in/yaml.v2"
"github.com/juju/juju/core/arch"
)
// Manifest represents the recording of the building of the charm or bundle.
// The manifest file should represent the metadata.yaml, but a lot more
// information.
type Manifest struct {
Bases []Base `yaml:"bases"`
}
// Validate checks the manifest to ensure there are no empty names, nor channels,
// and that architectures are supported.
func (m *Manifest) Validate() error {
for _, b := range m.Bases {
if err := b.Validate(); err != nil {
return errors.Annotate(err, "validating manifest")
}
}
return nil
}
func (m *Manifest) UnmarshalYAML(f func(interface{}) error) error {
raw := make(map[interface{}]interface{})
err := f(&raw)
if err != nil {
return err
}
v, err := schema.List(baseSchema).Coerce(raw["bases"], nil)
if err != nil {
return errors.Annotatef(err, "coerce")
}
newV, ok := v.([]interface{})
if !ok {
return errors.Annotatef(err, "converting")
}
bases, err := parseBases(newV)
if err != nil {
return err
}
*m = Manifest{Bases: bases}
return nil
}
func parseBases(input interface{}) ([]Base, error) {
var err error
if input == nil {
return nil, nil
}
var res []Base
for _, v := range input.([]interface{}) {
var base Base
baseMap := v.(map[string]interface{})
if value, ok := baseMap["name"]; ok {
base.Name = value.(string)
}
if value, ok := baseMap["channel"]; ok {
base.Channel, err = ParseChannelNormalize(value.(string))
if err != nil {
return nil, errors.Annotatef(err, "parsing channel %q", value.(string))
}
}
base.Architectures = parseArchitectureList(baseMap["architectures"])
err = base.Validate()
if err != nil {
return nil, errors.Trace(err)
}
res = append(res, base)
}
return res, nil
}
// ReadManifest reads in a Manifest from a charm's manifest.yaml. Some of
// validation is done when unmarshalling the manifest, including
// verification that the base.Name is a supported operating system. Full
// validation done by calling Validate().
func ReadManifest(r io.Reader) (*Manifest, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
var manifest *Manifest
if err := yaml.Unmarshal(data, &manifest); err != nil {
return nil, errors.Annotatef(err, "manifest")
}
if manifest == nil {
return nil, errors.Annotatef(err, "invalid base in manifest")
}
return manifest, nil
}
var baseSchema = schema.FieldMap(
schema.Fields{
"name": schema.String(),
"channel": schema.String(),
"architectures": schema.List(schema.String()),
}, schema.Defaults{
"name": schema.Omit,
"channel": schema.Omit,
"architectures": schema.Omit,
})
func parseArchitectureList(list interface{}) []string {
if list == nil {
return nil
}
slice := list.([]interface{})
result := make([]string, 0, len(slice))
for _, elem := range slice {
result = append(result, arch.NormaliseArch(elem.(string)))
}
return result
}
// Copyright 2011, 2012, 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"fmt"
"io"
"regexp"
"sort"
"strconv"
"strings"
"github.com/juju/errors"
"github.com/juju/names/v6"
"github.com/juju/schema"
"github.com/juju/utils/v4"
"github.com/juju/version/v2"
"gopkg.in/yaml.v2"
"github.com/juju/juju/internal/charm/assumes"
"github.com/juju/juju/internal/charm/hooks"
"github.com/juju/juju/internal/charm/resource"
)
// RelationScope describes the scope of a relation.
type RelationScope string
// Note that schema doesn't support custom string types,
// so when we use these values in a schema.Checker,
// we must store them as strings, not RelationScopes.
const (
ScopeGlobal RelationScope = "global"
ScopeContainer RelationScope = "container"
)
// RelationRole defines the role of a relation.
type RelationRole string
const (
RoleProvider RelationRole = "provider"
RoleRequirer RelationRole = "requirer"
RolePeer RelationRole = "peer"
)
// StorageType defines a storage type.
type StorageType string
const (
StorageBlock StorageType = "block"
StorageFilesystem StorageType = "filesystem"
)
// Storage represents a charm's storage requirement.
type Storage struct {
// Name is the name of the store.
//
// Name has no default, and must be specified.
Name string
// Description is a description of the store.
//
// Description has no default, and is optional.
Description string
// Type is the storage type: filesystem or block-device.
//
// Type has no default, and must be specified.
Type StorageType
// Shared indicates that the storage is shared between all units of
// an application deployed from the charm. It is an error to attempt to
// assign non-shareable storage to a "shared" storage requirement.
//
// Shared defaults to false.
Shared bool
// ReadOnly indicates that the storage should be made read-only if
// possible. If the storage cannot be made read-only, Juju will warn
// the user.
//
// ReadOnly defaults to false.
ReadOnly bool
// CountMin is the number of storage instances that must be attached
// to the charm for it to be useful; the charm will not install until
// this number has been satisfied. This must be a non-negative number.
//
// CountMin defaults to 1 for singleton stores.
CountMin int
// CountMax is the largest number of storage instances that can be
// attached to the charm. If CountMax is -1, then there is no upper
// bound.
//
// CountMax defaults to 1 for singleton stores.
CountMax int
// MinimumSize is the minimum size of store that the charm needs to
// work at all. This is not a recommended size or a comfortable size
// or a will-work-well size, just a bare minimum below which the charm
// is going to break.
// MinimumSize requires a unit, one of MGTPEZY, and is stored as MiB.
//
// There is no default MinimumSize; if left unspecified, a provider
// specific default will be used, typically 1GB for block storage.
MinimumSize uint64
// Location is the mount location for filesystem stores. For multi-
// stores, the location acts as the parent directory for each mounted
// store.
//
// Location has no default, and is optional.
Location string
// Properties allow the charm author to characterise the relative storage
// performance requirements and sensitivities for each store.
// eg “transient” is used to indicate that non persistent storage is acceptable,
// such as tmpfs or ephemeral instance disks.
//
// Properties has no default, and is optional.
Properties []string
}
// DeviceType defines a device type.
type DeviceType string
// Device represents a charm's device requirement (GPU for example).
type Device struct {
// Name is the name of the device.
Name string
// Description is a description of the device.
Description string
// Type is the device type.
// currently supported types are
// - gpu
// - nvidia.com/gpu
// - amd.com/gpu
Type DeviceType
// CountMin is the min number of devices that the charm requires.
CountMin int64
// CountMax is the max number of devices that the charm requires.
CountMax int64
}
// Relation represents a single relation defined in the charm
// metadata.yaml file.
type Relation struct {
Name string
Role RelationRole
Interface string
Optional bool
Limit int
Scope RelationScope
}
// ImplementedBy returns whether the relation is implemented by the supplied charm.
func (r Relation) ImplementedBy(meta *Meta) bool {
if r.IsImplicit() {
return true
}
var m map[string]Relation
switch r.Role {
case RoleProvider:
m = meta.Provides
case RoleRequirer:
m = meta.Requires
case RolePeer:
m = meta.Peers
default:
panic(errors.Errorf("unknown relation role %q", r.Role))
}
rel, found := m[r.Name]
if !found {
return false
}
if rel.Interface == r.Interface {
switch r.Scope {
case ScopeGlobal:
return rel.Scope != ScopeContainer
case ScopeContainer:
return true
default:
panic(errors.Errorf("unknown relation scope %q", r.Scope))
}
}
return false
}
// IsImplicit returns whether the relation is supplied by juju itself,
// rather than by a charm.
func (r Relation) IsImplicit() bool {
return (r.Name == "juju-info" &&
r.Interface == "juju-info" &&
r.Role == RoleProvider)
}
// RunAs defines which user to run a certain process as.
type RunAs string
const (
RunAsDefault RunAs = ""
RunAsRoot RunAs = "root"
RunAsSudoer RunAs = "sudoer"
RunAsNonRoot RunAs = "non-root"
)
// Meta represents all the known content that may be defined
// within a charm's metadata.yaml file.
type Meta struct {
Name string `json:"Name"`
Summary string `json:"Summary"`
Description string `json:"Description"`
Subordinate bool `json:"Subordinate"`
Provides map[string]Relation `json:"Provides,omitempty"`
Requires map[string]Relation `json:"Requires,omitempty"`
Peers map[string]Relation `json:"Peers,omitempty"`
ExtraBindings map[string]ExtraBinding `json:"ExtraBindings,omitempty"`
Categories []string `json:"Categories,omitempty"`
Tags []string `json:"Tags,omitempty"`
Storage map[string]Storage `json:"Storage,omitempty"`
Devices map[string]Device `json:"Devices,omitempty"`
Resources map[string]resource.Meta `json:"Resources,omitempty"`
Terms []string `json:"Terms,omitempty"`
MinJujuVersion version.Number `json:"min-juju-version,omitempty"`
// v2
Containers map[string]Container `json:"containers,omitempty" yaml:"containers,omitempty"`
Assumes *assumes.ExpressionTree `json:"assumes,omitempty" yaml:"assumes,omitempty"`
CharmUser RunAs `json:"charm-user,omitempty" yaml:"charm-user,omitempty"`
}
// Container specifies the possible systems it supports and mounts it wants.
type Container struct {
Resource string `json:"resource,omitempty" yaml:"resource,omitempty"`
Mounts []Mount `json:"mounts,omitempty" yaml:"mounts,omitempty"`
Uid *int `json:"uid,omitempty" yaml:"uid,omitempty"`
Gid *int `json:"gid,omitempty" yaml:"gid,omitempty"`
}
// Mount allows a container to mount a storage filesystem from the storage top-level directive.
type Mount struct {
Storage string `json:"storage,omitempty" yaml:"storage,omitempty"`
Location string `json:"location,omitempty" yaml:"location,omitempty"`
}
func generateRelationHooks(relName string, allHooks map[string]bool) {
for _, hookName := range hooks.RelationHooks() {
allHooks[fmt.Sprintf("%s-%s", relName, hookName)] = true
}
}
func generateContainerHooks(containerName string, allHooks map[string]bool) {
// Containers using pebble trigger workload hooks.
for _, hookName := range hooks.WorkloadHooks() {
allHooks[fmt.Sprintf("%s-%s", containerName, hookName)] = true
}
}
func generateStorageHooks(storageName string, allHooks map[string]bool) {
for _, hookName := range hooks.StorageHooks() {
allHooks[fmt.Sprintf("%s-%s", storageName, hookName)] = true
}
}
// Hooks returns a map of all possible valid hooks, taking relations
// into account. It's a map to enable fast lookups, and the value is
// always true.
func (m Meta) Hooks() map[string]bool {
allHooks := make(map[string]bool)
// Unit hooks
for _, hookName := range hooks.UnitHooks() {
allHooks[string(hookName)] = true
}
// Secret hooks
for _, hookName := range hooks.SecretHooks() {
allHooks[string(hookName)] = true
}
// Relation hooks
for hookName := range m.Provides {
generateRelationHooks(hookName, allHooks)
}
for hookName := range m.Requires {
generateRelationHooks(hookName, allHooks)
}
for hookName := range m.Peers {
generateRelationHooks(hookName, allHooks)
}
for storageName := range m.Storage {
generateStorageHooks(storageName, allHooks)
}
for containerName := range m.Containers {
generateContainerHooks(containerName, allHooks)
}
return allHooks
}
// Used for parsing Categories and Tags.
func parseStringList(list interface{}) []string {
if list == nil {
return nil
}
slice := list.([]interface{})
result := make([]string, 0, len(slice))
for _, elem := range slice {
result = append(result, elem.(string))
}
return result
}
var validTermName = regexp.MustCompile(`^[a-z](-?[a-z0-9]+)+$`)
// TermsId represents a single term id. The term can either be owned
// or "public" (meaning there is no owner).
// The Revision starts at 1. Therefore a value of 0 means the revision
// is unset.
type TermsId struct {
Tenant string
Owner string
Name string
Revision int
}
// Validate returns an error if the Term contains invalid data.
func (t *TermsId) Validate() error {
if t.Tenant != "" && t.Tenant != "cs" {
if !validTermName.MatchString(t.Tenant) {
return errors.Errorf("wrong term tenant format %q", t.Tenant)
}
}
if t.Owner != "" && !names.IsValidUser(t.Owner) {
return errors.Errorf("wrong owner format %q", t.Owner)
}
if !validTermName.MatchString(t.Name) {
return errors.Errorf("wrong term name format %q", t.Name)
}
if t.Revision < 0 {
return errors.Errorf("negative term revision")
}
return nil
}
// String returns the term in canonical form.
// This would be one of:
//
// tenant:owner/name/revision
// tenant:name
// owner/name/revision
// owner/name
// name/revision
// name
func (t *TermsId) String() string {
id := make([]byte, 0, len(t.Tenant)+1+len(t.Owner)+1+len(t.Name)+4)
if t.Tenant != "" {
id = append(id, t.Tenant...)
id = append(id, ':')
}
if t.Owner != "" {
id = append(id, t.Owner...)
id = append(id, '/')
}
id = append(id, t.Name...)
if t.Revision != 0 {
id = append(id, '/')
id = strconv.AppendInt(id, int64(t.Revision), 10)
}
return string(id)
}
// ParseTerm takes a termID as a string and parses it into a Term.
// A complete term is in the form:
// tenant:owner/name/revision
// This function accepts partially specified identifiers
// typically in one of the following forms:
// name
// owner/name
// owner/name/27 # Revision 27
// name/283 # Revision 283
// cs:owner/name # Tenant cs
func ParseTerm(s string) (*TermsId, error) {
tenant := ""
termid := s
if t := strings.SplitN(s, ":", 2); len(t) == 2 {
tenant = t[0]
termid = t[1]
}
tokens := strings.Split(termid, "/")
var term TermsId
switch len(tokens) {
case 1: // "name"
term = TermsId{
Tenant: tenant,
Name: tokens[0],
}
case 2: // owner/name or name/123
termRevision, err := strconv.Atoi(tokens[1])
if err != nil { // owner/name
term = TermsId{
Tenant: tenant,
Owner: tokens[0],
Name: tokens[1],
}
} else { // name/123
term = TermsId{
Tenant: tenant,
Name: tokens[0],
Revision: termRevision,
}
}
case 3: // owner/name/123
termRevision, err := strconv.Atoi(tokens[2])
if err != nil {
return nil, errors.Errorf("invalid revision number %q %v", tokens[2], err)
}
term = TermsId{
Tenant: tenant,
Owner: tokens[0],
Name: tokens[1],
Revision: termRevision,
}
default:
return nil, errors.Errorf("unknown term id format %q", s)
}
if err := term.Validate(); err != nil {
return nil, errors.Trace(err)
}
return &term, nil
}
// ReadMeta reads the content of a metadata.yaml file and returns
// its representation.
// The data has verified as unambiguous, but not validated.
func ReadMeta(r io.Reader) (*Meta, error) {
data, err := io.ReadAll(r)
if err != nil {
return nil, err
}
var meta Meta
err = yaml.Unmarshal(data, &meta)
if err != nil {
return nil, err
}
return &meta, nil
}
// UnmarshalYAML
func (meta *Meta) UnmarshalYAML(f func(interface{}) error) error {
raw := make(map[interface{}]interface{})
err := f(&raw)
if err != nil {
return err
}
if err := ensureUnambiguousFormat(raw); err != nil {
return err
}
v, err := charmSchema.Coerce(raw, nil)
if err != nil {
return errors.New("metadata: " + err.Error())
}
m := v.(map[string]interface{})
meta1, err := parseMeta(m)
if err != nil {
return err
}
*meta = *meta1
// Assumes blocks have their own dedicated parser so we need to invoke
// it here and attach the resulting expression tree (if any) to the
// metadata
var assumesBlock = struct {
Assumes *assumes.ExpressionTree `yaml:"assumes"`
}{}
if err := f(&assumesBlock); err != nil {
return err
}
meta.Assumes = assumesBlock.Assumes
return nil
}
func parseMeta(m map[string]interface{}) (*Meta, error) {
var meta Meta
var err error
meta.Name = m["name"].(string)
// Schema decodes as int64, but the int range should be good
// enough for revisions.
meta.Summary = m["summary"].(string)
meta.Description = m["description"].(string)
meta.Provides = parseRelations(m["provides"], RoleProvider)
meta.Requires = parseRelations(m["requires"], RoleRequirer)
meta.Peers = parseRelations(m["peers"], RolePeer)
if meta.ExtraBindings, err = parseMetaExtraBindings(m["extra-bindings"]); err != nil {
return nil, err
}
meta.Categories = parseStringList(m["categories"])
meta.Tags = parseStringList(m["tags"])
if subordinate := m["subordinate"]; subordinate != nil {
meta.Subordinate = subordinate.(bool)
}
meta.Storage = parseStorage(m["storage"])
meta.Devices = parseDevices(m["devices"])
if err != nil {
return nil, err
}
meta.MinJujuVersion, err = parseMinJujuVersion(m["min-juju-version"])
if err != nil {
return nil, err
}
meta.Terms = parseStringList(m["terms"])
meta.Resources, err = parseMetaResources(m["resources"])
if err != nil {
return nil, err
}
// v2 parsing
meta.Containers, err = parseContainers(m["containers"], meta.Resources, meta.Storage)
if err != nil {
return nil, errors.Annotatef(err, "parsing containers")
}
meta.CharmUser, err = parseCharmUser(m["charm-user"])
if err != nil {
return nil, errors.Annotatef(err, "parsing charm-user")
}
return &meta, nil
}
// MarshalYAML implements yaml.Marshaler (yaml.v2).
// It is recommended to call Check() before calling this method,
// otherwise you make get metadata which is not v1 nor v2 format.
func (m Meta) MarshalYAML() (interface{}, error) {
var minver string
if m.MinJujuVersion != version.Zero {
minver = m.MinJujuVersion.String()
}
return struct {
Name string `yaml:"name"`
Summary string `yaml:"summary"`
Description string `yaml:"description"`
Provides map[string]marshaledRelation `yaml:"provides,omitempty"`
Requires map[string]marshaledRelation `yaml:"requires,omitempty"`
Peers map[string]marshaledRelation `yaml:"peers,omitempty"`
ExtraBindings map[string]interface{} `yaml:"extra-bindings,omitempty"`
Categories []string `yaml:"categories,omitempty"`
Tags []string `yaml:"tags,omitempty"`
Subordinate bool `yaml:"subordinate,omitempty"`
Storage map[string]Storage `yaml:"storage,omitempty"`
Devices map[string]Device `yaml:"devices,omitempty"`
Terms []string `yaml:"terms,omitempty"`
MinJujuVersion string `yaml:"min-juju-version,omitempty"`
Resources map[string]marshaledResourceMeta `yaml:"resources,omitempty"`
Containers map[string]marshaledContainer `yaml:"containers,omitempty"`
Assumes *assumes.ExpressionTree `yaml:"assumes,omitempty"`
}{
Name: m.Name,
Summary: m.Summary,
Description: m.Description,
Provides: marshaledRelations(m.Provides),
Requires: marshaledRelations(m.Requires),
Peers: marshaledRelations(m.Peers),
ExtraBindings: marshaledExtraBindings(m.ExtraBindings),
Categories: m.Categories,
Tags: m.Tags,
Subordinate: m.Subordinate,
Storage: m.Storage,
Devices: m.Devices,
Terms: m.Terms,
MinJujuVersion: minver,
Resources: marshaledResources(m.Resources),
Containers: marshaledContainers(m.Containers),
Assumes: m.Assumes,
}, nil
}
type marshaledResourceMeta struct {
Path string `yaml:"filename"` // TODO(ericsnow) Change to "path"?
Type string `yaml:"type,omitempty"`
Description string `yaml:"description,omitempty"`
}
func marshaledResources(rs map[string]resource.Meta) map[string]marshaledResourceMeta {
rs1 := make(map[string]marshaledResourceMeta, len(rs))
for name, r := range rs {
r1 := marshaledResourceMeta{
Path: r.Path,
Description: r.Description,
}
if r.Type != resource.TypeFile {
r1.Type = r.Type.String()
}
rs1[name] = r1
}
return rs1
}
func marshaledRelations(relations map[string]Relation) map[string]marshaledRelation {
marshaled := make(map[string]marshaledRelation)
for name, relation := range relations {
marshaled[name] = marshaledRelation(relation)
}
return marshaled
}
type marshaledRelation Relation
func (r marshaledRelation) MarshalYAML() (interface{}, error) {
// See calls to ifaceExpander in charmSchema.
var noLimit int
if !r.Optional && r.Limit == noLimit && r.Scope == ScopeGlobal {
// All attributes are default, so use the simple string form of the relation.
return r.Interface, nil
}
mr := struct {
Interface string `yaml:"interface"`
Limit *int `yaml:"limit,omitempty"`
Optional bool `yaml:"optional,omitempty"`
Scope RelationScope `yaml:"scope,omitempty"`
}{
Interface: r.Interface,
Optional: r.Optional,
}
if r.Limit != noLimit {
mr.Limit = &r.Limit
}
if r.Scope != ScopeGlobal {
mr.Scope = r.Scope
}
return mr, nil
}
func marshaledExtraBindings(bindings map[string]ExtraBinding) map[string]interface{} {
marshaled := make(map[string]interface{})
for _, binding := range bindings {
marshaled[binding.Name] = nil
}
return marshaled
}
type marshaledContainer Container
func marshaledContainers(c map[string]Container) map[string]marshaledContainer {
marshaled := make(map[string]marshaledContainer)
for k, v := range c {
marshaled[k] = marshaledContainer(v)
}
return marshaled
}
func (c marshaledContainer) MarshalYAML() (interface{}, error) {
mc := struct {
Resource string `yaml:"resource,omitempty"`
Mounts []Mount `yaml:"mounts,omitempty"`
}{
Resource: c.Resource,
Mounts: c.Mounts,
}
return mc, nil
}
// Format of the parsed charm.
type Format int
// Formats are the different versions of charm metadata supported.
const (
FormatUnknown Format = iota
FormatV1 Format = iota
FormatV2 Format = iota
)
// Check checks that the metadata is well-formed.
func (m Meta) Check(format Format, reasons ...FormatSelectionReason) error {
switch format {
case FormatV1:
return errors.NotValidf("charm metadata without bases in manifest")
case FormatV2:
err := m.checkV2(reasons)
if err != nil {
return errors.Trace(err)
}
default:
return errors.Errorf("unknown format %v", format)
}
if err := validateMetaExtraBindings(m); err != nil {
return errors.Errorf("charm %q has invalid extra bindings: %v", m.Name, err)
}
// Subordinate charms must have at least one relation that
// has container scope, otherwise they can't relate to the
// principal.
if m.Subordinate {
valid := false
if m.Requires != nil {
for _, relationData := range m.Requires {
if relationData.Scope == ScopeContainer {
valid = true
break
}
}
}
if !valid {
return errors.Errorf("subordinate charm %q lacks \"requires\" relation with container scope", m.Name)
}
}
names := make(map[string]bool)
for name, store := range m.Storage {
if store.Location != "" && store.Type != StorageFilesystem {
return errors.Errorf(`charm %q storage %q: location may not be specified for "type: %s"`, m.Name, name, store.Type)
}
if store.Type == "" {
return errors.Errorf("charm %q storage %q: type must be specified", m.Name, name)
}
if store.CountMin < 0 {
return errors.Errorf("charm %q storage %q: invalid minimum count %d", m.Name, name, store.CountMin)
}
if store.CountMax == 0 || store.CountMax < -1 {
return errors.Errorf("charm %q storage %q: invalid maximum count %d", m.Name, name, store.CountMax)
}
if names[name] {
return errors.Errorf("charm %q storage %q: duplicated storage name", m.Name, name)
}
names[name] = true
}
names = make(map[string]bool)
for name, device := range m.Devices {
if device.Type == "" {
return errors.Errorf("charm %q device %q: type must be specified", m.Name, name)
}
if device.CountMax >= 0 && device.CountMin >= 0 && device.CountMin > device.CountMax {
return errors.Errorf(
"charm %q device %q: maximum count %d can not be smaller than minimum count %d",
m.Name, name, device.CountMax, device.CountMin)
}
if names[name] {
return errors.Errorf("charm %q device %q: duplicated device name", m.Name, name)
}
names[name] = true
}
if err := validateMetaResources(m.Resources); err != nil {
return err
}
for _, term := range m.Terms {
if _, terr := ParseTerm(term); terr != nil {
return errors.Trace(terr)
}
}
return nil
}
func (m Meta) checkV2(reasons []FormatSelectionReason) error {
if len(reasons) == 0 {
return errors.NotValidf("metadata v2 without manifest.yaml")
}
if m.MinJujuVersion != version.Zero {
return errors.NotValidf("min-juju-version in metadata v2")
}
return nil
}
func reservedName(charmName, endpointName string) (reserved bool, reason string) {
if strings.HasPrefix(charmName, "juju-") {
return false, ""
}
if endpointName == "juju" {
return true, `"juju" is a reserved name`
}
if strings.HasPrefix(endpointName, "juju-") {
return true, `the "juju-" prefix is reserved`
}
return false, ""
}
func parseRelations(relations interface{}, role RelationRole) map[string]Relation {
if relations == nil {
return nil
}
result := make(map[string]Relation)
for name, rel := range relations.(map[string]interface{}) {
relMap := rel.(map[string]interface{})
relation := Relation{
Name: name,
Role: role,
Interface: relMap["interface"].(string),
Optional: relMap["optional"].(bool),
}
if scope := relMap["scope"]; scope != nil {
relation.Scope = RelationScope(scope.(string))
}
if relMap["limit"] != nil {
// Schema defaults to int64, but we know
// the int range should be more than enough.
relation.Limit = int(relMap["limit"].(int64))
}
result[name] = relation
}
return result
}
// CombinedRelations returns all defined relations, regardless of their type in
// a single map.
func (m Meta) CombinedRelations() map[string]Relation {
combined := make(map[string]Relation)
for name, relation := range m.Provides {
combined[name] = relation
}
for name, relation := range m.Requires {
combined[name] = relation
}
for name, relation := range m.Peers {
combined[name] = relation
}
return combined
}
// Schema coercer that expands the interface shorthand notation.
// A consistent format is easier to work with than considering the
// potential difference everywhere.
//
// Supports the following variants::
//
// provides:
// server: riak
// admin: http
// foobar:
// interface: blah
//
// provides:
// server:
// interface: mysql
// limit:
// optional: false
//
// In all input cases, the output is the fully specified interface
// representation as seen in the mysql interface description above.
func ifaceExpander(limit interface{}) schema.Checker {
return ifaceExpC{limit}
}
type ifaceExpC struct {
limit interface{}
}
var (
stringC = schema.String()
mapC = schema.StringMap(schema.Any())
)
func (c ifaceExpC) Coerce(v interface{}, path []string) (newv interface{}, err error) {
s, err := stringC.Coerce(v, path)
if err == nil {
newv = map[string]interface{}{
"interface": s,
"limit": c.limit,
"optional": false,
"scope": string(ScopeGlobal),
}
return
}
v, err = mapC.Coerce(v, path)
if err != nil {
return
}
m := v.(map[string]interface{})
if _, ok := m["limit"]; !ok {
m["limit"] = c.limit
}
return ifaceSchema.Coerce(m, path)
}
var ifaceSchema = schema.FieldMap(
schema.Fields{
"interface": schema.String(),
"limit": schema.OneOf(schema.Const(nil), schema.Int()),
"scope": schema.OneOf(schema.Const(string(ScopeGlobal)), schema.Const(string(ScopeContainer))),
"optional": schema.Bool(),
},
schema.Defaults{
"scope": string(ScopeGlobal),
"optional": false,
},
)
func parseStorage(stores interface{}) map[string]Storage {
if stores == nil {
return nil
}
result := make(map[string]Storage)
for name, store := range stores.(map[string]interface{}) {
storeMap := store.(map[string]interface{})
store := Storage{
Name: name,
Type: StorageType(storeMap["type"].(string)),
Shared: storeMap["shared"].(bool),
ReadOnly: storeMap["read-only"].(bool),
CountMin: 1,
CountMax: 1,
}
if desc, ok := storeMap["description"].(string); ok {
store.Description = desc
}
if multiple, ok := storeMap["multiple"].(map[string]interface{}); ok {
if r, ok := multiple["range"].([2]int); ok {
store.CountMin, store.CountMax = r[0], r[1]
}
}
if minSize, ok := storeMap["minimum-size"].(uint64); ok {
store.MinimumSize = minSize
}
if loc, ok := storeMap["location"].(string); ok {
store.Location = loc
}
if properties, ok := storeMap["properties"].([]interface{}); ok {
for _, p := range properties {
store.Properties = append(store.Properties, p.(string))
}
}
result[name] = store
}
return result
}
func parseDevices(devices interface{}) map[string]Device {
if devices == nil {
return nil
}
result := make(map[string]Device)
for name, device := range devices.(map[string]interface{}) {
deviceMap := device.(map[string]interface{})
device := Device{
Name: name,
Type: DeviceType(deviceMap["type"].(string)),
CountMin: 1,
CountMax: 1,
}
if desc, ok := deviceMap["description"].(string); ok {
device.Description = desc
}
if countmin, ok := deviceMap["countmin"].(int64); ok {
device.CountMin = countmin
}
if countmax, ok := deviceMap["countmax"].(int64); ok {
device.CountMax = countmax
}
result[name] = device
}
return result
}
func parseContainers(input interface{}, resources map[string]resource.Meta, storage map[string]Storage) (map[string]Container, error) {
var err error
if input == nil {
return nil, nil
}
containers := map[string]Container{}
for name, v := range input.(map[string]interface{}) {
containerMap := v.(map[string]interface{})
container := Container{}
if value, ok := containerMap["resource"]; ok {
container.Resource = value.(string)
}
if container.Resource != "" {
if r, ok := resources[container.Resource]; !ok {
return nil, errors.NotFoundf("referenced resource %q", container.Resource)
} else if r.Type != resource.TypeContainerImage {
return nil, errors.Errorf("referenced resource %q is not a %s",
container.Resource,
resource.TypeContainerImage.String())
}
}
container.Mounts, err = parseMounts(containerMap["mounts"], storage)
if err != nil {
return nil, errors.Annotatef(err, "container %q", name)
}
if value, ok := containerMap["uid"]; ok {
uid := int(value.(int64))
container.Uid = &uid
if uid >= 1000 && uid < 10000 {
return nil, errors.Errorf("container %q has invalid uid %d: uid cannot be in reserved range 1000-9999",
name, uid)
}
}
if value, ok := containerMap["gid"]; ok {
gid := int(value.(int64))
container.Gid = &gid
if gid >= 1000 && gid < 10000 {
return nil, errors.Errorf("container %q has invalid gid %d: gid cannot be in reserved range 1000-9999",
name, gid)
}
}
containers[name] = container
}
if len(containers) == 0 {
return nil, nil
}
return containers, nil
}
func parseMounts(input interface{}, storage map[string]Storage) ([]Mount, error) {
if input == nil {
return nil, nil
}
mounts := []Mount(nil)
for _, v := range input.([]interface{}) {
mount := Mount{}
mountMap := v.(map[string]interface{})
if value, ok := mountMap["storage"].(string); ok {
mount.Storage = value
}
if value, ok := mountMap["location"].(string); ok {
mount.Location = value
}
if mount.Storage == "" {
return nil, errors.Errorf("storage must be specified on mount")
}
if mount.Location == "" {
return nil, errors.Errorf("location must be specified on mount")
}
if _, ok := storage[mount.Storage]; !ok {
return nil, errors.NotValidf("storage %q", mount.Storage)
}
mounts = append(mounts, mount)
}
return mounts, nil
}
func parseMinJujuVersion(value any) (version.Number, error) {
if value == nil {
return version.Zero, nil
}
ver, err := version.Parse(value.(string))
if err != nil {
return version.Zero, errors.Annotate(err, "invalid min-juju-version")
}
return ver, nil
}
func parseCharmUser(value any) (RunAs, error) {
if value == nil {
return RunAsDefault, nil
}
v := RunAs(value.(string))
switch v {
case RunAsRoot, RunAsSudoer, RunAsNonRoot:
return v, nil
default:
return RunAsDefault, errors.Errorf("invalid charm-user %q expected one of %s, %s or %s", v,
RunAsRoot, RunAsSudoer, RunAsNonRoot)
}
}
var storageSchema = schema.FieldMap(
schema.Fields{
"type": schema.OneOf(schema.Const(string(StorageBlock)), schema.Const(string(StorageFilesystem))),
"shared": schema.Bool(),
"read-only": schema.Bool(),
"multiple": schema.FieldMap(
schema.Fields{
"range": storageCountC{}, // m, m-n, m+, m-
},
schema.Defaults{},
),
"minimum-size": storageSizeC{},
"location": schema.String(),
"description": schema.String(),
"properties": schema.List(propertiesC{}),
},
schema.Defaults{
"shared": false,
"read-only": false,
"multiple": schema.Omit,
"location": schema.Omit,
"description": schema.Omit,
"properties": schema.Omit,
"minimum-size": schema.Omit,
},
)
var deviceSchema = schema.FieldMap(
schema.Fields{
"description": schema.String(),
"type": schema.String(),
"countmin": deviceCountC{},
"countmax": deviceCountC{},
}, schema.Defaults{
"description": schema.Omit,
"countmin": schema.Omit,
"countmax": schema.Omit,
},
)
type deviceCountC struct{}
func (c deviceCountC) Coerce(v interface{}, path []string) (interface{}, error) {
s, err := schema.Int().Coerce(v, path)
if err != nil {
return 0, err
}
if m, ok := s.(int64); ok {
if m >= 0 {
return m, nil
}
}
return 0, errors.Errorf("invalid device count %d", s)
}
type storageCountC struct{}
var storageCountRE = regexp.MustCompile("^([0-9]+)([-+]|-[0-9]+)$")
func (c storageCountC) Coerce(v interface{}, path []string) (newv interface{}, err error) {
s, err := schema.OneOf(schema.Int(), stringC).Coerce(v, path)
if err != nil {
return nil, err
}
if m, ok := s.(int64); ok {
// We've got a count of the form "m": m represents
// both the minimum and maximum.
if m <= 0 {
return nil, errors.Errorf("%s: invalid count %v", strings.Join(path[1:], ""), m)
}
return [2]int{int(m), int(m)}, nil
}
match := storageCountRE.FindStringSubmatch(s.(string))
if match == nil {
return nil, errors.Errorf("%s: value %q does not match 'm', 'm-n', or 'm+'", strings.Join(path[1:], ""), s)
}
var m, n int
if m, err = strconv.Atoi(match[1]); err != nil {
return nil, err
}
if len(match[2]) == 1 {
// We've got a count of the form "m+" or "m-":
// m represents the minimum, and there is no
// upper bound.
n = -1
} else {
if n, err = strconv.Atoi(match[2][1:]); err != nil {
return nil, err
}
}
return [2]int{m, n}, nil
}
type storageSizeC struct{}
func (c storageSizeC) Coerce(v interface{}, path []string) (newv interface{}, err error) {
s, err := schema.String().Coerce(v, path)
if err != nil {
return nil, err
}
return utils.ParseSize(s.(string))
}
type propertiesC struct{}
func (c propertiesC) Coerce(v interface{}, path []string) (newv interface{}, err error) {
return schema.OneOf(schema.Const("transient")).Coerce(v, path)
}
var containerSchema = schema.FieldMap(
schema.Fields{
"resource": schema.String(),
"mounts": schema.List(mountSchema),
"uid": schema.Int(),
"gid": schema.Int(),
}, schema.Defaults{
"resource": schema.Omit,
"mounts": schema.Omit,
"uid": schema.Omit,
"gid": schema.Omit,
})
var mountSchema = schema.FieldMap(
schema.Fields{
"storage": schema.String(),
"location": schema.String(),
}, schema.Defaults{
"storage": schema.Omit,
"location": schema.Omit,
})
var charmSchema = schema.FieldMap(
schema.Fields{
"name": schema.String(),
"summary": schema.String(),
"description": schema.String(),
"peers": schema.StringMap(ifaceExpander(nil)),
"provides": schema.StringMap(ifaceExpander(nil)),
"requires": schema.StringMap(ifaceExpander(nil)),
"extra-bindings": extraBindingsSchema,
"revision": schema.Int(), // Obsolete
"format": schema.Int(), // Obsolete
"subordinate": schema.Bool(),
"categories": schema.List(schema.String()),
"tags": schema.List(schema.String()),
"storage": schema.StringMap(storageSchema),
"devices": schema.StringMap(deviceSchema),
"resources": schema.StringMap(resourceSchema),
"terms": schema.List(schema.String()),
"min-juju-version": schema.String(),
"assumes": schema.List(schema.Any()),
"containers": schema.StringMap(containerSchema),
"charm-user": schema.String(),
},
schema.Defaults{
"provides": schema.Omit,
"requires": schema.Omit,
"peers": schema.Omit,
"extra-bindings": schema.Omit,
"revision": schema.Omit,
"format": schema.Omit,
"subordinate": schema.Omit,
"categories": schema.Omit,
"tags": schema.Omit,
"storage": schema.Omit,
"devices": schema.Omit,
"resources": schema.Omit,
"terms": schema.Omit,
"min-juju-version": schema.Omit,
"assumes": schema.Omit,
"containers": schema.Omit,
"charm-user": schema.Omit,
},
)
// ensureUnambiguousFormat returns an error if the raw data contains
// both metadata v1 and v2 contents. However is it unable to definitively
// determine which format the charm is as metadata does not contain bases.
func ensureUnambiguousFormat(raw map[interface{}]interface{}) error {
format := FormatUnknown
matched := []string(nil)
mismatched := []string(nil)
keys := []string(nil)
for k := range raw {
key, ok := k.(string)
if !ok {
// Non-string keys will be an error handled by the schema lib.
continue
}
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
detected := FormatUnknown
switch key {
case "containers", "assumes", "charm-user":
detected = FormatV2
case "series", "deployment", "min-juju-version":
detected = FormatV1
}
if detected == FormatUnknown {
continue
}
if format == FormatUnknown {
format = detected
}
if format == detected {
matched = append(matched, key)
} else {
mismatched = append(mismatched, key)
}
}
if mismatched != nil {
return errors.Errorf("ambiguous metadata: keys %s cannot be used with %s",
`"`+strings.Join(mismatched, `", "`)+`"`,
`"`+strings.Join(matched, `", "`)+`"`)
}
return nil
}
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charm
import (
"fmt"
"regexp"
"strings"
"github.com/juju/errors"
"github.com/juju/names/v6"
)
// OfferURL represents the location of an offered application and its
// associated exported endpoints.
type OfferURL struct {
// Source represents where the offer is hosted.
// If empty, the model is another model in the same controller.
Source string // "<controller-name>" or "<jaas>" or ""
// User is the user whose namespace in which the offer is made.
// Where a model is specified, the user is the model owner.
User string
// ModelName is the name of the model providing the exported endpoints.
// It is only used for local URLs or for specifying models in the same
// controller.
ModelName string
// ApplicationName is the name of the application providing the exported endpoints.
ApplicationName string
}
// Path returns the path component of the URL.
func (u *OfferURL) Path() string {
var parts []string
if u.User != "" {
parts = append(parts, u.User)
}
if u.ModelName != "" {
parts = append(parts, u.ModelName)
}
path := strings.Join(parts, "/")
path = fmt.Sprintf("%s.%s", path, u.ApplicationName)
if u.Source == "" {
return path
}
return fmt.Sprintf("%s:%s", u.Source, path)
}
func (u *OfferURL) String() string {
return u.Path()
}
// AsLocal returns a copy of the URL with an empty (local) source.
func (u *OfferURL) AsLocal() *OfferURL {
localURL := *u
localURL.Source = ""
return &localURL
}
// HasEndpoint returns whether this offer URL includes an
// endpoint name in the application name.
func (u *OfferURL) HasEndpoint() bool {
return strings.Contains(u.ApplicationName, ":")
}
// modelApplicationRegexp parses urls of the form controller:user/model.application[:relname]
var modelApplicationRegexp = regexp.MustCompile(`(/?((?P<user>[^/]+)/)?(?P<model>[^.]*)(\.(?P<application>[^:]*(:.*)?))?)?`)
// IsValidOfferURL ensures that a URL string is a valid OfferURL.
func IsValidOfferURL(urlStr string) bool {
_, err := ParseOfferURL(urlStr)
return err == nil
}
// ParseOfferURL parses the specified URL string into an OfferURL.
// The URL string is of one of the forms:
//
// <model-name>.<application-name>
// <model-name>.<application-name>:<relation-name>
// <user>/<model-name>.<application-name>
// <user>/<model-name>.<application-name>:<relation-name>
// <controller>:<user>/<model-name>.<application-name>
// <controller>:<user>/<model-name>.<application-name>:<relation-name>
func ParseOfferURL(urlStr string) (*OfferURL, error) {
return parseOfferURL(urlStr)
}
// parseOfferURL parses the specified URL string into an OfferURL.
func parseOfferURL(urlStr string) (*OfferURL, error) {
urlParts, err := parseOfferURLParts(urlStr, false)
if err != nil {
return nil, err
}
url := OfferURL(*urlParts)
return &url, nil
}
// OfferURLParts contains various attributes of a URL.
type OfferURLParts OfferURL
// ParseOfferURLParts parses a partial URL, filling out what parts are supplied.
// This method is used to generate a filter used to query matching offer URLs.
func ParseOfferURLParts(urlStr string) (*OfferURLParts, error) {
return parseOfferURLParts(urlStr, true)
}
var endpointRegexp = regexp.MustCompile(`^[a-zA-Z0-9]+$`)
func maybeParseSource(urlStr string) (source, rest string) {
parts := strings.Split(urlStr, ":")
switch len(parts) {
case 3:
return parts[0], parts[1] + ":" + parts[2]
case 2:
if endpointRegexp.MatchString(parts[1]) {
return "", urlStr
}
return parts[0], parts[1]
}
return "", urlStr
}
func parseOfferURLParts(urlStr string, allowIncomplete bool) (*OfferURLParts, error) {
var result OfferURLParts
source, urlParts := maybeParseSource(urlStr)
valid := !strings.HasPrefix(urlStr, ":")
valid = valid && modelApplicationRegexp.MatchString(urlParts)
if valid {
result.Source = source
result.User = modelApplicationRegexp.ReplaceAllString(urlParts, "$user")
result.ModelName = modelApplicationRegexp.ReplaceAllString(urlParts, "$model")
result.ApplicationName = modelApplicationRegexp.ReplaceAllString(urlParts, "$application")
}
if !valid || strings.Contains(result.ModelName, "/") || strings.Contains(result.ApplicationName, "/") {
// TODO(wallyworld) - update error message when we support multi-controller and JAAS CMR
return nil, errors.Errorf("application offer URL has invalid form, must be [<user/]<model>.<appname>: %q", urlStr)
}
if !allowIncomplete && result.ModelName == "" {
return nil, errors.Errorf("application offer URL is missing model")
}
if !allowIncomplete && result.ApplicationName == "" {
return nil, errors.Errorf("application offer URL is missing application")
}
// Application name part may contain a relation name part, so strip that bit out
// before validating the name.
appName := strings.Split(result.ApplicationName, ":")[0]
// Validate the resulting URL part values.
if result.User != "" && !names.IsValidUser(result.User) {
return nil, errors.NotValidf("user name %q", result.User)
}
if result.ModelName != "" && !names.IsValidModelName(result.ModelName) {
return nil, errors.NotValidf("model name %q", result.ModelName)
}
if appName != "" && !names.IsValidApplication(appName) {
return nil, errors.NotValidf("application name %q", appName)
}
return &result, nil
}
// MakeURL constructs an offer URL from the specified components.
func MakeURL(user, model, application, controller string) string {
base := fmt.Sprintf("%s/%s.%s", user, model, application)
if controller == "" {
return base
}
return fmt.Sprintf("%s:%s", controller, base)
}
// Copyright 2019 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"encoding/base64"
"fmt"
"math"
"path/filepath"
"reflect"
"strings"
"github.com/juju/errors"
"github.com/mohae/deepcopy"
)
// ExtractBaseAndOverlayParts splits the bundle data into a base and
// overlay-specific bundle so that their union yields bd. To decide whether a
// field is overlay-specific, the implementation uses reflection and
// recursively scans the BundleData fields looking for fields annotated with
// the "overlay-only: true" tag.
//
// To produce the base bundle, the original bundle is filtered and all
// overlay-specific values are set to the zero value for their type. To produce
// the overlay-specific bundle, we once again filter the original bundle but
// this time zero out fields that do not contain any descendant fields that are
// overlay-specific.
//
// To clarify how this method works let's consider a bundle created via the
// yaml blob below:
//
// applications:
// apache2:
// charm: cs:apache2-26
// offers:
// my-offer:
// endpoints:
// - apache-website
// - website-cache
// my-other-offer:
// endpoints:
// - apache-website
// series: bionic
//
// The "offers" and "endpoints" attributes are overlay-specific fields. If we
// were to run this method and then marshal the results back to yaml we would
// get:
//
// The base bundle:
//
// applications:
// apache2:
// charm: cs:apache2-26
// series: bionic
//
// The overlay-specific bundle:
//
// applications:
// apache2:
// offers:
// my-offer:
// endpoints:
// - apache-website
// - website-cache
// my-other-offer:
// endpoints:
// - apache-website
//
// The two bundles returned by this method are copies of the original bundle
// data and can thus be safely manipulated by the caller.
func ExtractBaseAndOverlayParts(bd *BundleData) (base, overlay *BundleData, err error) {
base = cloneBundleData(bd)
_ = visitField(&visitorContext{
structVisitor: clearOverlayFields,
dropNonRequiredMapKeys: false,
}, base)
overlay = cloneBundleData(bd)
_ = visitField(&visitorContext{
structVisitor: clearNonOverlayFields,
dropNonRequiredMapKeys: true,
}, overlay)
return base, overlay, nil
}
// cloneBundleData uses the gob package to perform a deep copy of bd.
func cloneBundleData(bd *BundleData) *BundleData {
return deepcopy.Copy(bd).(*BundleData)
}
// VerifyNoOverlayFieldsPresent scans the contents of bd and returns an error
// if the bundle contains any overlay-specific values.
func VerifyNoOverlayFieldsPresent(bd *BundleData) error {
var (
errList []error
pathStack []string
)
ctx := &visitorContext{
structVisitor: func(ctx *visitorContext, val reflect.Value, typ reflect.Type) (foundOverlay bool) {
for i := 0; i < typ.NumField(); i++ {
structField := typ.Field(i)
// Skip non-exportable and empty fields
v := val.Field(i)
if !v.CanInterface() || isZero(v) {
continue
}
if isOverlayField(structField) {
errList = append(
errList,
fmt.Errorf(
"%s.%s can only appear in an overlay section",
strings.Join(pathStack, "."),
yamlName(structField),
),
)
foundOverlay = true
}
pathStack = append(pathStack, yamlName(structField))
if visitField(ctx, v.Interface()) {
foundOverlay = true
}
pathStack = pathStack[:len(pathStack)-1]
}
return foundOverlay
},
indexedElemPreVisitor: func(index interface{}) {
pathStack = append(pathStack, fmt.Sprint(index))
},
indexedElemPostVisitor: func(_ interface{}) {
pathStack = pathStack[:len(pathStack)-1]
},
}
_ = visitField(ctx, bd)
if len(errList) == 0 {
return nil
}
return &VerificationError{errList}
}
func yamlName(structField reflect.StructField) string {
fields := strings.Split(structField.Tag.Get("yaml"), ",")
if len(fields) == 0 || fields[0] == "" {
return strings.ToLower(structField.Name)
}
return fields[0]
}
type visitorContext struct {
structVisitor func(ctx *visitorContext, val reflect.Value, typ reflect.Type) bool
// An optional pre/post visitor for indexable items (slices, maps)
indexedElemPreVisitor func(index interface{})
indexedElemPostVisitor func(index interface{})
dropNonRequiredMapKeys bool
}
// visitField invokes ctx.structVisitor(val) if v is a struct and returns back
// the visitor's result. On the other hand, if val is a slice or a map,
// visitField invoke specialized functions that support iterating such types.
func visitField(ctx *visitorContext, val interface{}) bool {
if val == nil {
return false
}
typ := reflect.TypeOf(val)
v := reflect.ValueOf(val)
// De-reference pointers
if v.Kind() == reflect.Ptr {
v = v.Elem()
if v.Kind() == reflect.Invalid {
return false
}
typ = v.Type()
}
switch typ.Kind() {
case reflect.Struct:
return ctx.structVisitor(ctx, v, typ)
case reflect.Map:
return visitFieldsInMap(ctx, v)
case reflect.Slice:
return visitFieldsInSlice(ctx, v)
}
// v is not a struct or something we can iterate to reach a struct
return false
}
// visitFieldsInMap iterates the map specified by val and recursively visits
// each map element. The returned value is the logical OR of the responses
// returned by visiting all map elements.
func visitFieldsInMap(ctx *visitorContext, val reflect.Value) (result bool) {
for _, key := range val.MapKeys() {
v := val.MapIndex(key)
if !v.CanInterface() {
continue
}
if ctx.indexedElemPreVisitor != nil {
ctx.indexedElemPreVisitor(key)
}
visRes := visitField(ctx, v.Interface())
result = visRes || result
// If the map value is a non-scalar value and the visitor
// returned false (don't retain), consult the dropNonRequiredMapKeys
// hint to decide whether we need to delete the key from the map.
//
// This is required when splitting bundles into base/overlay
// bits as empty map values would be encoded as empty objects
// that the overlay merge code would mis-interpret as deletions.
if !visRes && isNonScalar(v) && ctx.dropNonRequiredMapKeys {
val.SetMapIndex(key, reflect.Value{})
}
if ctx.indexedElemPostVisitor != nil {
ctx.indexedElemPostVisitor(key)
}
}
return result
}
// visitFieldsInSlice iterates the slice specified by val and recursively
// visits each element. The returned value is the logical OR of the responses
// returned by visiting all slice elements.
func visitFieldsInSlice(ctx *visitorContext, val reflect.Value) (result bool) {
for i := 0; i < val.Len(); i++ {
v := val.Index(i)
if !v.CanInterface() {
continue
}
if ctx.indexedElemPreVisitor != nil {
ctx.indexedElemPreVisitor(i)
}
result = visitField(ctx, v.Interface()) || result
if ctx.indexedElemPostVisitor != nil {
ctx.indexedElemPostVisitor(i)
}
}
return result
}
// clearOverlayFields is an implementation of structVisitor. It recursively
// visits all fields in the val struct and sets the ones that are tagged as
// overlay-only to the zero value for their particular type.
func clearOverlayFields(ctx *visitorContext, val reflect.Value, typ reflect.Type) (retainAncestors bool) {
for i := 0; i < typ.NumField(); i++ {
structField := typ.Field(i)
// Skip non-exportable and empty fields
v := val.Field(i)
if !v.CanInterface() || isZero(v) {
continue
}
// No need to recurse further down; just erase the field
if isOverlayField(structField) {
v.Set(reflect.Zero(v.Type()))
continue
}
_ = visitField(ctx, v.Interface())
retainAncestors = true
}
return retainAncestors
}
// clearNonOverlayFields is an implementation of structVisitor. It recursively
// visits all fields in the val struct and sets any field that does not contain
// any overlay-only descendants to the zero value for its particular type.
func clearNonOverlayFields(ctx *visitorContext, val reflect.Value, typ reflect.Type) (retainAncestors bool) {
for i := 0; i < typ.NumField(); i++ {
structField := typ.Field(i)
// Skip non-exportable and empty fields
v := val.Field(i)
if !v.CanInterface() || isZero(v) {
continue
}
// If this is an overlay field we need to preserve it and all
// its ancestor fields up to the root. However, we still need
// to visit its descendants in case we need to clear additional
// non-overlay fields further down the tree.
isOverlayField := isOverlayField(structField)
if isOverlayField {
retainAncestors = true
}
target := v.Interface()
if retain := visitField(ctx, target); !isOverlayField && !retain {
v.Set(reflect.Zero(v.Type()))
continue
}
retainAncestors = true
}
return retainAncestors
}
// isOverlayField returns true if a struct field is tagged as overlay-only.
func isOverlayField(structField reflect.StructField) bool {
return structField.Tag.Get("source") == "overlay-only"
}
// isZero reports whether v is the zero value for its type. It panics if the
// argument is invalid. The implementation has been copied from the upstream Go
// repo as it has not made its way to a stable Go release yet.
func isZero(v reflect.Value) bool {
switch v.Kind() {
case reflect.Invalid:
return true
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return math.Float64bits(v.Float()) == 0
case reflect.Complex64, reflect.Complex128:
c := v.Complex()
return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
case reflect.Array:
for i := 0; i < v.Len(); i++ {
if !isZero(v.Index(i)) {
return false
}
}
return true
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
return v.IsNil()
case reflect.String:
return v.Len() == 0
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
if !isZero(v.Field(i)) {
return false
}
}
return true
default:
// This should never happens, but will act as a safeguard for
// later, as a default value doesn't makes sense here.
panic(fmt.Sprintf("unexpected value of type %s passed to isZero", v.Kind().String()))
}
}
// ReadAndMergeBundleData reads N bundle data sources, composes their contents
// together and returns the result. The first bundle data source is treated as
// a base bundle while subsequent bundle data sources are treated as overlays
// which are sequentially merged onto the base bundle.
//
// Before returning the merged bundle, ReadAndMergeBundleData will also attempt
// to resolve any include directives present in the machine annotations,
// application options and annotations.
//
// When merging an overlay into a base bundle the following rules apply for the
// BundleData fields:
//
// - if an overlay specifies a bundle-level series, it overrides the base bundle
// series.
//
// - overlay-defined relations are appended to the base bundle relations
//
// - overlay-defined machines overwrite the base bundle machines.
//
// - if an overlay defines an application that is not present in the base bundle,
// it will get appended to the application list.
//
// - if an overlay defines an empty application or saas value, it will be removed
// from the base bundle together with any associated relations. For example, to
// remove an application named "mysql" the following overlay snippet can be
// provided:
// applications:
// mysql:
//
// - if an overlay defines an application that is also present in the base bundle
// the two application specs are merged together (see following rules)
//
// ApplicationSpec merge rules:
//
// - if the overlay defines a value for a scalar or slice field, it will overwrite
// the value from the base spec (e.g. trust, series etc).
//
// - if the overlay specifies a nil/empty value for a map field, then the map
// field of the base spec will be cleared.
//
// - if the overlay specifies a non-empty value for a map field, its key/value
// tuples are iterated and:
//
// - if the value is nil/zero and the value is non-scalar, it is deleted from
// the base spec.
//
// - otherwise, the key/value is inserted into the base spec overwriting any
// existing entries.
func ReadAndMergeBundleData(sources ...BundleDataSource) (*BundleData, error) {
var allParts []*BundleDataPart
var partSrcIndex []int
for srcIndex, src := range sources {
if src == nil {
continue
}
for _, part := range src.Parts() {
allParts = append(allParts, part)
partSrcIndex = append(partSrcIndex, srcIndex)
}
}
if len(allParts) == 0 {
return nil, errors.NotValidf("malformed bundle: bundle is empty")
}
// Treat the first part as the base bundle
base := allParts[0]
if err := VerifyNoOverlayFieldsPresent(base.Data); err != nil {
return nil, errors.Trace(err)
}
// Merge parts and resolve include directives
for index, part := range allParts {
// Resolve any re-writing of normalisation that could cause the presence
// field to be out of sync with the actual bundle representation.
resolveOverlayPresenceFields(part)
if index != 0 {
if err := applyOverlay(base, part); err != nil {
return nil, errors.Trace(err)
}
}
// Relative include directives are resolved using the base path
// of the datasource that yielded this part
srcIndex := partSrcIndex[index]
incResolver := sources[srcIndex].ResolveInclude
basePath := sources[srcIndex].BasePath()
for app, appData := range base.Data.Applications {
if appData == nil {
return nil, errors.Errorf("base application %q has no body", app)
}
resolvedCharm, err := resolveRelativeCharmPath(basePath, appData.Charm)
if err != nil {
return nil, errors.Annotatef(err, "resolving relative charm path %q for application %q", appData.Charm, app)
}
appData.Charm = resolvedCharm
for k, v := range appData.Options {
newV, changed, err := resolveIncludes(incResolver, v)
if err != nil {
return nil, errors.Annotatef(err, "processing option %q for application %q", k, app)
}
if changed {
appData.Options[k] = newV
}
}
for k, v := range appData.Annotations {
newV, changed, err := resolveIncludes(incResolver, v)
if err != nil {
return nil, errors.Annotatef(err, "processing annotation %q for application %q", k, app)
}
if changed {
appData.Annotations[k] = newV
}
}
}
for machine, machineData := range base.Data.Machines {
if machineData == nil {
continue
}
for k, v := range machineData.Annotations {
newV, changed, err := resolveIncludes(incResolver, v)
if err != nil {
return nil, errors.Annotatef(err, "processing annotation %q for machine %q", k, machine)
}
if changed {
machineData.Annotations[k] = newV
}
}
}
}
return base.Data, nil
}
// resolveOverlayPresenceFields exists because we expose an internal bundle
// representation of a type out to the consumers of the library. This means it
// becomes very difficult to know what was re-written during the normalisation
// phase, without telling downstream consumers.
//
// The following attempts to guess when a normalisation has occurred, but the
// presence field map is out of sync with the new changes.
func resolveOverlayPresenceFields(base *BundleDataPart) {
applications := base.PresenceMap.forField("applications")
if len(applications) == 0 {
return
}
for name, app := range base.Data.Applications {
if !applications.fieldPresent(name) {
continue
}
presence := applications.forField(name)
// If the presence map contains scale, but doesn't contain num_units
// and if the app.Scale_ has been set to zero. We can then assume that a
// normalistion has occurred.
if presence.fieldPresent("scale") && !presence.fieldPresent("num_units") && app.Scale_ == 0 && app.NumUnits > 0 {
presence["num_units"] = presence["scale"]
}
}
}
func applyOverlay(base, overlay *BundleDataPart) error {
if overlay == nil || len(overlay.PresenceMap) == 0 {
return nil
}
if !overlay.PresenceMap.fieldPresent("applications") && len(overlay.Data.Applications) > 0 {
return errors.Errorf("bundle overlay file used deprecated 'services' key, this is not valid for bundle overlay files")
}
// Merge applications
if len(overlay.Data.Applications) != 0 {
if base.Data.Applications == nil {
base.Data.Applications = make(map[string]*ApplicationSpec, len(overlay.Data.Applications))
}
fpm := overlay.PresenceMap.forField("applications")
for srcAppName, srcAppSpec := range overlay.Data.Applications {
// If the overlay map points to an empty object, delete
// it from the base bundle
if isZero(reflect.ValueOf(srcAppSpec)) {
delete(base.Data.Applications, srcAppName)
base.Data.Relations = removeRelations(base.Data.Relations, srcAppName)
continue
}
// If this is a new application just append it; otherwise
// recursively merge the two application specs.
dstAppSpec, defined := base.Data.Applications[srcAppName]
if !defined {
base.Data.Applications[srcAppName] = srcAppSpec
continue
}
mergeStructs(dstAppSpec, srcAppSpec, fpm.forField(srcAppName))
}
}
// Merge SAAS blocks
if len(overlay.Data.Saas) != 0 {
if base.Data.Saas == nil {
base.Data.Saas = make(map[string]*SaasSpec, len(overlay.Data.Saas))
}
fpm := overlay.PresenceMap.forField("saas")
for srcSaasName, srcSaasSpec := range overlay.Data.Saas {
// If the overlay map points to an empty object, delete
// it from the base bundle
if isZero(reflect.ValueOf(srcSaasSpec)) {
delete(base.Data.Saas, srcSaasName)
base.Data.Relations = removeRelations(base.Data.Relations, srcSaasName)
continue
}
// if this is a new saas block just append it; otherwise
// recursively merge the two saas specs.
dstSaasSpec, defined := base.Data.Saas[srcSaasName]
if !defined {
base.Data.Saas[srcSaasName] = srcSaasSpec
continue
}
mergeStructs(dstSaasSpec, srcSaasSpec, fpm.forField(srcSaasName))
}
}
// If default base is set in the config, it overrides the bundle.
if b := overlay.Data.DefaultBase; b != "" {
base.Data.DefaultBase = b
}
// Append any additional relations.
base.Data.Relations = append(base.Data.Relations, overlay.Data.Relations...)
// Override machine definitions.
if machines := overlay.Data.Machines; machines != nil {
base.Data.Machines = machines
}
return nil
}
// removeRelations removes any relation defined in data that references
// the application appName.
func removeRelations(data [][]string, appName string) [][]string {
var result [][]string
for _, relation := range data {
// Keep the dud relation in the set, it will be caught by the bundle
// verify code.
if len(relation) == 2 {
left, right := relation[0], relation[1]
if left == appName || strings.HasPrefix(left, appName+":") ||
right == appName || strings.HasPrefix(right, appName+":") {
continue
}
}
result = append(result, relation)
}
return result
}
// mergeStructs iterates the fields of srcStruct and merges them into the
// equivalent fields of dstStruct using the following rules:
//
// - if src defines a value for a scalar or slice field, it will overwrite
// the value from the dst (e.g. trust, series etc).
// - if the src specifies a nil/empty value for a map field, then the map
// field of dst will be cleared.
// - if the src specifies a non-empty value for a map field, its key/value
// tuples are iterated and:
// - if the value is nil/zero and non-scalar, it is deleted from the dst map.
// - otherwise, the key/value is inserted into the dst map overwriting any
// existing entries.
func mergeStructs(dstStruct, srcStruct interface{}, fpm FieldPresenceMap) {
dst := reflect.ValueOf(dstStruct)
src := reflect.ValueOf(srcStruct)
typ := src.Type()
// Dereference pointers
if src.Kind() == reflect.Ptr {
src = src.Elem()
typ = src.Type()
}
if dst.Kind() == reflect.Ptr {
dst = dst.Elem()
}
dstTyp := dst.Type()
// Sanity check
if typ.Kind() != reflect.Struct || typ != dstTyp {
panic(errors.Errorf("BUG: source/destination type mismatch; expected destination to be a %q; got %q", typ.Name(), dstTyp.Name()))
}
for i := 0; i < typ.NumField(); i++ {
// Skip non-exportable fields
structField := typ.Field(i)
srcVal := src.Field(i)
if !srcVal.CanInterface() {
continue
}
fieldName := yamlName(structField)
if !fpm.fieldPresent(fieldName) {
continue
}
switch srcVal.Kind() {
case reflect.Map:
// If a nil/empty map is provided then clear the destination map.
if isZero(srcVal) {
dst.Field(i).Set(reflect.MakeMap(srcVal.Type()))
continue
}
dstMap := dst.Field(i)
if dstMap.IsNil() {
dstMap.Set(reflect.MakeMap(srcVal.Type()))
}
for _, srcKey := range srcVal.MapKeys() {
// If the key points to an empty non-scalar value delete it from the dst map
srcMapVal := srcVal.MapIndex(srcKey)
if isZero(srcMapVal) && isNonScalar(srcMapVal) {
// Setting an empty value effectively deletes the key from the map
dstMap.SetMapIndex(srcKey, reflect.Value{})
continue
}
dstMap.SetMapIndex(srcKey, srcMapVal)
}
case reflect.Slice:
dst.Field(i).Set(srcVal)
default:
dst.Field(i).Set(srcVal)
}
}
}
// isNonScalar returns true if val is a non-scalar value such as a pointer,
// struct, map or slice.
func isNonScalar(val reflect.Value) bool {
kind := val.Kind()
if kind == reflect.Interface {
kind = reflect.TypeOf(val).Kind()
}
switch kind {
case reflect.Ptr, reflect.Struct,
reflect.Map, reflect.Slice, reflect.Array:
return true
default:
return false
}
}
// resolveIncludes operates on v which is expected to be string. It checks the
// value for the presence of an include directive. If such a directive is
// located, resolveIncludes invokes the provided includeResolver and returns
// back its output after applying the appropriate encoding for the directive.
func resolveIncludes(includeResolver func(path string) ([]byte, error), v interface{}) (string, bool, error) {
directives := []struct {
directive string
encoder func([]byte) string
}{
{
directive: "include-file://",
encoder: func(d []byte) string {
return string(d)
},
},
{
directive: "include-base64://",
encoder: base64.StdEncoding.EncodeToString,
},
}
val, isString := v.(string)
if !isString {
return "", false, nil
}
for _, dir := range directives {
if !strings.HasPrefix(val, dir.directive) {
continue
}
path := val[len(dir.directive):]
data, err := includeResolver(path)
if err != nil {
return "", false, errors.Annotatef(err, "resolving include %q", path)
}
return dir.encoder(data), true, nil
}
return val, false, nil
}
// resolveRelativeCharmPath resolves charmURL into an absolute path relative
// to basePath if charmURL contains a relative path. Otherwise, the function
// returns back the original charmURL.
//
// Note: this function will only resolve paths. It will not check whether the
// referenced charm path actually exists. That is the job of the bundle
// validator.
func resolveRelativeCharmPath(basePath, charmURL string) (string, error) {
// We don't need to do anything for non-relative paths.
if !strings.HasPrefix(charmURL, ".") {
return charmURL, nil
}
return filepath.Abs(filepath.Join(basePath, charmURL))
}
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package resource
import (
stdhash "hash"
"io"
"github.com/juju/errors"
"github.com/juju/utils/v4/hash"
)
var newHash, validateSum = hash.SHA384()
// Fingerprint represents the unique fingerprint value of a resource's data.
type Fingerprint struct {
hash.Fingerprint
}
// NewFingerprint returns wraps the provided raw fingerprint bytes.
// This function roundtrips with Fingerprint.Bytes().
func NewFingerprint(raw []byte) (Fingerprint, error) {
fp, err := hash.NewFingerprint(raw, validateSum)
if err != nil {
return Fingerprint{}, errors.Trace(err)
}
return Fingerprint{Fingerprint: fp}, nil
}
// ParseFingerprint returns wraps the provided raw fingerprint string.
// This function roundtrips with Fingerprint.String().
func ParseFingerprint(raw string) (Fingerprint, error) {
fp, err := hash.ParseHexFingerprint(raw, validateSum)
if err != nil {
return Fingerprint{}, errors.Trace(err)
}
return Fingerprint{Fingerprint: fp}, nil
}
// GenerateFingerprint returns the fingerprint for the provided data.
func GenerateFingerprint(reader io.Reader) (Fingerprint, error) {
fp, err := hash.GenerateFingerprint(reader, newHash)
if err != nil {
return Fingerprint{}, errors.Trace(err)
}
return Fingerprint{fp}, nil
}
// Fingerprint is a hash that may be used to generate fingerprints.
type FingerprintHash struct {
stdhash.Hash
}
// NewFingerprintHash returns a hash that may be used to create fingerprints.
func NewFingerprintHash() *FingerprintHash {
return &FingerprintHash{
Hash: newHash(),
}
}
// Fingerprint returns the current fingerprint of the hash.
func (fph FingerprintHash) Fingerprint() Fingerprint {
fp := hash.NewValidFingerprint(fph)
return Fingerprint{Fingerprint: fp}
}
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package resource
import (
"fmt"
"strings"
"github.com/juju/errors"
)
// Meta holds the information about a resource, as stored
// in a charm's metadata.
type Meta struct {
// Name identifies the resource.
Name string
// Type identifies the type of resource (e.g. "file").
Type Type
// TODO(ericsnow) Rename Path to Filename?
// Path is the relative path of the file or directory where the
// resource will be stored under the unit's data directory. The path
// is resolved against a subdirectory assigned to the resource. For
// example, given an application named "spam", a resource "eggs", and a
// path "eggs.tgz", the fully resolved storage path for the resource
// would be:
// /var/lib/juju/agent/spam-0/resources/eggs/eggs.tgz
Path string
// Description holds optional user-facing info for the resource.
Description string
}
// Validate checks the resource metadata to ensure the data is valid.
func (meta Meta) Validate() error {
if meta.Name == "" {
return errors.NewNotValid(nil, "resource missing name")
}
var typeUnknown Type
if meta.Type == typeUnknown {
return errors.NewNotValid(nil, "resource missing type")
}
if err := meta.Type.Validate(); err != nil {
msg := fmt.Sprintf("invalid resource type %v: %v", meta.Type, err)
return errors.NewNotValid(nil, msg)
}
if meta.Type == TypeFile && meta.Path == "" {
// TODO(ericsnow) change "filename" to "path"
return errors.NewNotValid(nil, "resource missing filename")
}
if meta.Type == TypeFile {
if strings.Contains(meta.Path, "/") {
msg := fmt.Sprintf(`filename cannot contain "/" (got %q)`, meta.Path)
return errors.NewNotValid(nil, msg)
}
// TODO(ericsnow) Constrain Path to alphanumeric?
}
return nil
}
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package resource
import (
"github.com/juju/errors"
)
// These are the valid resource origins.
const (
originUnknown Origin = iota
OriginUpload
OriginStore
)
var origins = map[Origin]string{
OriginUpload: "upload",
OriginStore: "store",
}
// Origin identifies where a charm's resource comes from.
type Origin int
// ParseOrigin converts the provided string into an Origin.
// If it is not a known origin then an error is returned.
func ParseOrigin(value string) (Origin, error) {
for o, str := range origins {
if value == str {
return o, nil
}
}
return originUnknown, errors.Errorf("unknown origin %q", value)
}
// String returns the printable representation of the origin.
func (o Origin) String() string {
return origins[o]
}
// Validate ensures that the origin is correct.
func (o Origin) Validate() error {
// Ideally, only the (unavoidable) zero value would be invalid.
// However, typedef'ing int means that the use of int literals
// could result in invalid Type values other than the zero value.
if _, ok := origins[o]; !ok {
return errors.NewNotValid(nil, "unknown origin")
}
return nil
}
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package resource
import (
"fmt"
"github.com/juju/errors"
)
// Resource describes a charm's resource in the charm store.
type Resource struct {
Meta
// Origin identifies where the resource will come from.
Origin Origin
// Revision is the charm store revision of the resource.
Revision int
// Fingerprint is the SHA-384 checksum for the resource blob.
Fingerprint Fingerprint
// Size is the size of the resource, in bytes.
Size int64
}
// Validate checks the payload class to ensure its data is valid.
func (res Resource) Validate() error {
if err := res.Meta.Validate(); err != nil {
return errors.Annotate(err, "bad metadata")
}
if err := res.Origin.Validate(); err != nil {
return errors.Annotate(err, "bad origin")
}
if err := res.validateRevision(); err != nil {
return errors.Annotate(err, "bad revision")
}
if res.Type == TypeFile {
if err := res.validateFileInfo(); err != nil {
return errors.Annotate(err, "bad file info")
}
}
return nil
}
func (res Resource) validateRevision() error {
if res.Origin == OriginUpload {
// We do not care about the revision, so we don't check it.
// TODO(ericsnow) Ensure Revision is 0 for OriginUpload?
return nil
}
if res.Revision < 0 && res.isFileAvailable() {
return errors.NewNotValid(nil, fmt.Sprintf("must be non-negative, got %d", res.Revision))
}
return nil
}
func (res Resource) validateFileInfo() error {
if res.Fingerprint.IsZero() {
if res.Size > 0 {
return errors.NewNotValid(nil, "missing fingerprint")
}
} else {
if err := res.Fingerprint.Validate(); err != nil {
return errors.Annotate(err, "bad fingerprint")
}
}
if res.Size < 0 {
return errors.NewNotValid(nil, "negative size")
}
return nil
}
// isFileAvailable determines whether or not the resource info indicates
// that the resource file is available.
func (res Resource) isFileAvailable() bool {
if !res.Fingerprint.IsZero() {
return true
}
if res.Size > 0 {
return true
}
return false
}
// Copyright 2016 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package resource
import (
"sort"
)
// Sort sorts the provided resources.
func Sort(resources []Resource) {
sort.Sort(byName(resources))
}
type byName []Resource
func (sorted byName) Len() int { return len(sorted) }
func (sorted byName) Swap(i, j int) { sorted[i], sorted[j] = sorted[j], sorted[i] }
func (sorted byName) Less(i, j int) bool { return sorted[i].Name < sorted[j].Name }
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package resource
import (
"github.com/juju/errors"
)
// These are the valid resource types (except for unknown).
const (
typeUnknown Type = iota
TypeFile
TypeContainerImage
)
var types = map[Type]string{
TypeFile: "file",
TypeContainerImage: "oci-image",
}
// Type enumerates the recognized resource types.
type Type int
// ParseType converts a string to a Type. If the given value does not
// match a recognized type then an error is returned.
func ParseType(value string) (Type, error) {
for rt, str := range types {
if value == str {
return rt, nil
}
}
return typeUnknown, errors.Errorf("unsupported resource type %q", value)
}
// String returns the printable representation of the type.
func (rt Type) String() string {
return types[rt]
}
// Validate ensures that the type is valid.
func (rt Type) Validate() error {
// Ideally, only the (unavoidable) zero value would be invalid.
// However, typedef'ing int means that the use of int literals
// could result in invalid Type values other than the zero value.
if _, ok := types[rt]; !ok {
return errors.NewNotValid(nil, "unknown resource type")
}
return nil
}
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"fmt"
"github.com/juju/errors"
"github.com/juju/schema"
"github.com/juju/juju/internal/charm/resource"
)
var resourceSchema = schema.FieldMap(
schema.Fields{
"type": schema.String(),
"filename": schema.String(), // TODO(ericsnow) Change to "path"?
"description": schema.String(),
},
schema.Defaults{
"type": resource.TypeFile.String(),
"filename": "",
"description": "",
},
)
func parseMetaResources(data interface{}) (map[string]resource.Meta, error) {
if data == nil {
return nil, nil
}
result := make(map[string]resource.Meta)
for name, val := range data.(map[string]interface{}) {
meta, err := parseResourceMeta(name, val)
if err != nil {
return nil, err
}
result[name] = meta
}
return result, nil
}
func validateMetaResources(resources map[string]resource.Meta) error {
for name, res := range resources {
if res.Name != name {
return fmt.Errorf("mismatch on resource name (%q != %q)", res.Name, name)
}
if err := res.Validate(); err != nil {
return err
}
}
return nil
}
// parseResourceMeta parses the provided data into a Meta, assuming
// that the data has first been checked with resourceSchema.
func parseResourceMeta(name string, data interface{}) (resource.Meta, error) {
meta := resource.Meta{
Name: name,
}
if data == nil {
return meta, nil
}
rMap := data.(map[string]interface{})
if val := rMap["type"]; val != nil {
var err error
meta.Type, err = resource.ParseType(val.(string))
if err != nil {
return meta, errors.Trace(err)
}
}
if val := rMap["filename"]; val != nil {
meta.Path = val.(string)
}
if val := rMap["description"]; val != nil {
meta.Description = val.(string)
}
return meta, nil
}
// Copyright 2011, 2012, 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"encoding/json"
"fmt"
gourl "net/url"
"regexp"
"strconv"
"strings"
"github.com/juju/errors"
"github.com/juju/juju/core/arch"
)
// Schema represents the different types of valid schemas.
type Schema string
const (
// Local represents a local charm URL, describes as a file system path.
Local Schema = "local"
// CharmHub schema represents the charmhub charm repository.
CharmHub Schema = "ch"
)
// Prefix creates a url with the given prefix, useful for typed schemas.
func (s Schema) Prefix(url string) string {
return fmt.Sprintf("%s:%s", s, url)
}
// Matches attempts to compare if a schema string matches the schema.
func (s Schema) Matches(other string) bool {
return string(s) == other
}
func (s Schema) String() string {
return string(s)
}
// Location represents a charm location, which must declare a path component
// and a string representation.
type Location interface {
Path() string
String() string
}
// URL represents a charm or bundle location:
//
// local:oneiric/wordpress
// ch:wordpress
// ch:amd64/jammy/wordpress-30
type URL struct {
Schema string // "ch" or "local".
Name string // "wordpress".
Revision int // -1 if unset, N otherwise.
series string // "precise" or "" if unset; "bundle" if it's a bundle.
Architecture string // "amd64" or "" if unset for charmstore (v1) URLs.
}
var (
validArch = regexp.MustCompile("^[a-z]+([a-z0-9]+)?$")
validName = regexp.MustCompile("^[a-z][a-z0-9]*(-[a-z0-9]*[a-z][a-z0-9]*)*$")
)
// ValidateSchema returns an error if the schema is invalid.
//
// Valid schemas for the URL are:
// - ch: charm hub
// - local: local file
func ValidateSchema(schema string) error {
switch schema {
case CharmHub.String(), Local.String():
return nil
}
return errors.NotValidf("schema %q", schema)
}
// IsValidArchitecture reports whether the architecture is a valid architecture
// in charm or bundle URLs.
func IsValidArchitecture(architecture string) bool {
return validArch.MatchString(architecture) && arch.IsSupportedArch(architecture)
}
// ValidateArchitecture returns an error if the given architecture is invalid.
func ValidateArchitecture(arch string) error {
if IsValidArchitecture(arch) {
return nil
}
return errors.NotValidf("architecture name %q", arch)
}
// IsValidName reports whether name is a valid charm or bundle name.
func IsValidName(name string) bool {
return validName.MatchString(name)
}
// ValidateName returns an error if the given name is invalid.
func ValidateName(name string) error {
if IsValidName(name) {
return nil
}
return errors.NotValidf("name %q", name)
}
// WithRevision returns a URL equivalent to url but with Revision set
// to revision.
func (u *URL) WithRevision(revision int) *URL {
urlCopy := *u
urlCopy.Revision = revision
return &urlCopy
}
// WithArchitecture returns a URL equivalent to url but with Architecture set
// to architecture.
func (u *URL) WithArchitecture(arch string) *URL {
urlCopy := *u
urlCopy.Architecture = arch
return &urlCopy
}
// MustParseURL works like ParseURL, but panics in case of errors.
func MustParseURL(url string) *URL {
u, err := ParseURL(url)
if err != nil {
panic(err)
}
return u
}
// ParseURL parses the provided charm URL string into its respective
// structure.
//
// A missing schema is assumed to be 'ch'.
func ParseURL(url string) (*URL, error) {
u, err := gourl.Parse(url)
if err != nil {
return nil, errors.Errorf("cannot parse charm or bundle URL: %q", url)
}
if u.RawQuery != "" || u.Fragment != "" || u.User != nil {
return nil, errors.Errorf("charm or bundle URL %q has unrecognized parts", url)
}
var curl *URL
switch {
case CharmHub.Matches(u.Scheme):
// Handle talking to the new style of the schema.
curl, err = parseCharmhubURL(u)
case u.Opaque != "":
u.Path = u.Opaque
curl, err = parseLocalURL(u, url)
default:
// Handle the fact that anything without a prefix is now a CharmHub
// charm URL.
curl, err = parseCharmhubURL(u)
}
if err != nil {
return nil, errors.Trace(err)
}
if curl.Schema == "" {
return nil, errors.Errorf("expected schema for charm or bundle URL: %q", url)
}
return curl, nil
}
func parseLocalURL(url *gourl.URL, originalURL string) (*URL, error) {
if !Local.Matches(url.Scheme) {
return nil, errors.NotValidf("cannot parse URL %q: schema %q", url, url.Scheme)
}
r := URL{Schema: Local.String()}
parts := strings.Split(url.Path[0:], "/")
if len(parts) < 1 || len(parts) > 4 {
return nil, errors.Errorf("charm or bundle URL has invalid form: %q", originalURL)
}
// ~<username>
if strings.HasPrefix(parts[0], "~") {
return nil, errors.Errorf("local charm or bundle URL with user name: %q", originalURL)
}
if len(parts) > 2 {
return nil, errors.Errorf("charm or bundle URL has invalid form: %q", originalURL)
}
// <series>
if len(parts) == 2 {
r.series, parts = parts[0], parts[1:]
}
if len(parts) < 1 {
return nil, errors.Errorf("URL without charm or bundle name: %q", originalURL)
}
// <name>[-<revision>]
r.Name, r.Revision = extractRevision(parts[0])
if err := ValidateName(r.Name); err != nil {
return nil, errors.Annotatef(err, "cannot parse URL %q", url)
}
return &r, nil
}
func (u *URL) path() string {
var parts []string
if u.Architecture != "" {
parts = append(parts, u.Architecture)
}
if u.series != "" {
parts = append(parts, u.series)
}
if u.Revision >= 0 {
parts = append(parts, fmt.Sprintf("%s-%d", u.Name, u.Revision))
} else {
parts = append(parts, u.Name)
}
return strings.Join(parts, "/")
}
// FullPath returns the full path of a URL path including the schema.
func (u *URL) FullPath() string {
return fmt.Sprintf("%s:%s", u.Schema, u.Path())
}
// Path returns the path of the URL without the schema.
func (u *URL) Path() string {
return u.path()
}
// String returns the string representation of the URL.
func (u *URL) String() string {
return u.FullPath()
}
// MarshalJSON will marshal the URL into a slice of bytes in a JSON
// representation.
func (u *URL) MarshalJSON() ([]byte, error) {
if u == nil {
panic("cannot marshal nil *charm.URL")
}
return json.Marshal(u.FullPath())
}
// UnmarshalJSON will unmarshal the URL from a JSON representation.
func (u *URL) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
url, err := ParseURL(s)
if err != nil {
return err
}
*u = *url
return nil
}
// MarshalText implements encoding.TextMarshaler by
// returning u.FullPath()
func (u *URL) MarshalText() ([]byte, error) {
if u == nil {
return nil, nil
}
return []byte(u.FullPath()), nil
}
// UnmarshalText implements encoding.TestUnmarshaler by
// parsing the data with ParseURL.
func (u *URL) UnmarshalText(data []byte) error {
url, err := ParseURL(string(data))
if err != nil {
return err
}
*u = *url
return nil
}
// Quote translates a charm url string into one which can be safely used
// in a file path. ASCII letters, ASCII digits, dot and dash stay the
// same; other characters are translated to their hex representation
// surrounded by underscores.
func Quote(unsafe string) string {
safe := make([]byte, 0, len(unsafe)*4)
for i := 0; i < len(unsafe); i++ {
b := unsafe[i]
switch {
case b >= 'a' && b <= 'z',
b >= 'A' && b <= 'Z',
b >= '0' && b <= '9',
b == '.',
b == '-':
safe = append(safe, b)
default:
safe = append(safe, fmt.Sprintf("_%02x_", b)...)
}
}
return string(safe)
}
// parseCharmhubURL will attempt to parse an identifier URL. The identifier
// URL is split up into 3 parts, some of which are optional and some are
// mandatory.
//
// - architecture (optional)
// - series (backward compatibility)
// - name
// - revision (optional)
//
// Examples are as follows:
//
// - ch:amd64/foo-1
// - ch:amd64/focal/foo-1
// - ch:foo-1
// - ch:foo
// - ch:amd64/focal/foo
func parseCharmhubURL(url *gourl.URL) (*URL, error) {
r := URL{
Schema: CharmHub.String(),
Revision: -1,
}
path := url.Path
if url.Opaque != "" {
path = url.Opaque
}
parts := strings.Split(strings.Trim(path, "/"), "/")
if len(parts) == 0 || len(parts) > 3 {
return nil, errors.Errorf(`charm or bundle URL %q malformed`, url)
}
// ~<username>
if strings.HasPrefix(parts[0], "~") {
return nil, errors.NotValidf("charmhub charm or bundle URL with user name: %q", url)
}
var nameRev string
switch len(parts) {
case 3:
r.Architecture, r.series, nameRev = parts[0], parts[1], parts[2]
if err := ValidateArchitecture(r.Architecture); err != nil {
return nil, errors.Annotatef(err, "in URL %q", url)
}
case 2:
// Since both the architecture and series are optional,
// the first part can be either architecture or series.
// To differentiate between them, we go ahead and try to
// validate the first part as an architecture to decide.
if err := ValidateArchitecture(parts[0]); err == nil {
r.Architecture, nameRev = parts[0], parts[1]
} else {
r.series, nameRev = parts[0], parts[1]
}
default:
nameRev = parts[0]
}
// Mandatory
r.Name, r.Revision = extractRevision(nameRev)
if err := ValidateName(r.Name); err != nil {
return nil, errors.Annotatef(err, "cannot parse name and/or revision in URL %q", url)
}
return &r, nil
}
// EnsureSchema will ensure that the scheme for a given URL is correct and
// valid. If the url does not specify a schema, the provided defaultSchema
// will be injected to it.
func EnsureSchema(url string, defaultSchema Schema) (string, error) {
u, err := gourl.Parse(url)
if err != nil {
return "", errors.Errorf("cannot parse charm or bundle URL: %q", url)
}
switch Schema(u.Scheme) {
case CharmHub, Local:
return url, nil
case Schema(""):
// If the schema is empty, we fall back to the default schema.
return defaultSchema.Prefix(url), nil
default:
return "", errors.NotValidf("schema %q", u.Scheme)
}
}
func extractRevision(name string) (string, int) {
revision := -1
for i := len(name) - 1; i > 0; i-- {
c := name[i]
if c >= '0' && c <= '9' {
continue
}
if c == '-' && i != len(name)-1 {
var err error
revision, err = strconv.Atoi(name[i+1:])
if err != nil {
panic(err) // We just checked it was right.
}
name = name[:i]
}
break
}
return name, revision
}
// Copyright 2011, 2012, 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package charm
import (
"bufio"
"fmt"
"io"
"strings"
"github.com/juju/errors"
)
// readVersion extracts the VCS version from a charm's version file.
func readVersion(r io.Reader) (string, error) {
scanner := bufio.NewScanner(r)
scanner.Scan()
if err := scanner.Err(); err != nil {
return "", errors.Annotate(err, "cannot read version file")
}
// bzr revision info starts with "revision-id: " so strip that.
revLine := strings.TrimPrefix(scanner.Text(), "revision-id: ")
return fmt.Sprintf("%.100s", revLine), nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
// CharmHub is a client for communication with charmHub. Unlike
// the charmHub client within juju, this package does not rely on
// wrapping an external package client. Generic client code for this
// package has been copied from "github.com/juju/charmrepo/v7/csclient".
//
// TODO: (hml) 2020-06-17
// Implement:
// - use of macaroons, at that time consider refactoring the local
// charmHub pkg to share macaroonJar.
// - user/password ?
// - allow for use of the channel pieces
package charmhub
import (
"context"
"fmt"
"net/url"
"path"
"strings"
"time"
"github.com/juju/errors"
corelogger "github.com/juju/juju/core/logger"
charmhubpath "github.com/juju/juju/internal/charmhub/path"
"github.com/juju/juju/internal/charmhub/transport"
)
const (
// DefaultServerURL is the default location of the global Charmhub API.
// An alternate location can be configured by changing the URL
// field in the Config struct.
DefaultServerURL = "https://api.charmhub.io"
// RefreshTimeout is the timout callers should use for Refresh calls.
RefreshTimeout = 10 * time.Second
)
const (
serverVersion = "v2"
serverEntity = "charms"
)
// Config holds configuration for creating a new charm hub client.
// The zero value is a valid default configuration.
type Config struct {
// Logger to use during the API requests. This field is required.
Logger corelogger.Logger
// URL holds the base endpoint URL of the Charmhub API,
// with no trailing slash, not including the version.
// If empty string, use the default Charmhub API server.
URL string
// HTTPClient represents the HTTP client to use for all API
// requests. If nil, use the default HTTP client.
HTTPClient HTTPClient
// FileSystem represents the file system operations for downloading.
// If nil, use the real OS file system.
// This is only required for downloading of charms or bundles.
FileSystem FileSystem
}
// basePath returns the base configuration path for speaking to the server API.
func basePath(configURL string) (charmhubpath.Path, error) {
baseURL := strings.TrimRight(configURL, "/")
rawURL := fmt.Sprintf("%s/%s", baseURL, path.Join(serverVersion, serverEntity))
url, err := url.Parse(rawURL)
if err != nil {
return charmhubpath.Path{}, errors.Trace(err)
}
return charmhubpath.MakePath(url), nil
}
// Client represents the client side of a charm store.
type Client struct {
url string
infoClient *infoClient
findClient *findClient
downloadClient *DownloadClient
refreshClient *refreshClient
resourcesClient *resourcesClient
}
// NewClient creates a new Charmhub client from the supplied configuration.
func NewClient(config Config) (*Client, error) {
logger := config.Logger.Child("client", corelogger.CHARMHUB)
url := config.URL
if url == "" {
url = DefaultServerURL
}
httpClient := config.HTTPClient
if httpClient == nil {
httpClient = DefaultHTTPClient(logger)
}
fs := config.FileSystem
if fs == nil {
fs = fileSystem{}
}
base, err := basePath(url)
if err != nil {
return nil, errors.Trace(err)
}
infoPath, err := base.Join("info")
if err != nil {
return nil, errors.Annotate(err, "constructing info path")
}
findPath, err := base.Join("find")
if err != nil {
return nil, errors.Annotate(err, "constructing find path")
}
refreshPath, err := base.Join("refresh")
if err != nil {
return nil, errors.Annotate(err, "constructing refresh path")
}
resourcesPath, err := base.Join("resources")
if err != nil {
return nil, errors.Annotate(err, "constructing resources path")
}
logger.Tracef(context.TODO(), "NewClient to %q", url)
apiRequester := newAPIRequester(httpClient, logger)
apiRequestLogger := newAPIRequesterLogger(apiRequester, logger)
restClient := newHTTPRESTClient(apiRequestLogger)
return &Client{
url: base.String(),
infoClient: newInfoClient(infoPath, restClient, logger),
findClient: newFindClient(findPath, restClient, logger),
refreshClient: newRefreshClient(refreshPath, restClient, logger),
resourcesClient: newResourcesClient(resourcesPath, restClient, logger),
// download client doesn't require a path here, as the download could
// be from any server in theory. That information is found from the
// refresh response.
downloadClient: NewDownloadClient(httpClient, fs, logger),
}, nil
}
// URL returns the underlying store URL.
func (c *Client) URL() string {
return c.url
}
// Info returns charm info on the provided charm name from CharmHub API.
func (c *Client) Info(ctx context.Context, name string, options ...InfoOption) (transport.InfoResponse, error) {
return c.infoClient.Info(ctx, name, options...)
}
// Find searches for a given charm for a given name from CharmHub API.
func (c *Client) Find(ctx context.Context, name string, options ...FindOption) ([]transport.FindResponse, error) {
return c.findClient.Find(ctx, name, options...)
}
// Refresh defines a client for making refresh API calls with different actions.
func (c *Client) Refresh(ctx context.Context, config RefreshConfig) ([]transport.RefreshResponse, error) {
return c.refreshClient.Refresh(ctx, config)
}
// RefreshWithRequestMetrics defines a client for making refresh API calls.
// Specifically to use the refresh action and provide metrics. Intended for
// use in the charm revision updater facade only. Otherwise use Refresh.
func (c *Client) RefreshWithRequestMetrics(ctx context.Context, config RefreshConfig, metrics Metrics) ([]transport.RefreshResponse, error) {
return c.refreshClient.RefreshWithRequestMetrics(ctx, config, metrics)
}
// RefreshWithMetricsOnly defines a client making a refresh API call with no
// action, whose purpose is to send metrics data for models without current
// units. E.G. the controller model.
func (c *Client) RefreshWithMetricsOnly(ctx context.Context, metrics Metrics) error {
return c.refreshClient.RefreshWithMetricsOnly(ctx, metrics)
}
// Download defines a client for downloading charms directly.
func (c *Client) Download(ctx context.Context, resourceURL *url.URL, archivePath string, options ...DownloadOption) (*Digest, error) {
return c.downloadClient.Download(ctx, resourceURL, archivePath, options...)
}
// ListResourceRevisions returns resource revisions for the provided charm and resource.
func (c *Client) ListResourceRevisions(ctx context.Context, charm, resource string) ([]transport.ResourceRevision, error) {
return c.resourcesClient.ListResourceRevisions(ctx, charm, resource)
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charmhub
import (
"context"
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
"io"
"net/http"
"net/url"
"os"
"runtime/pprof"
"github.com/juju/errors"
corelogger "github.com/juju/juju/core/logger"
"github.com/juju/juju/core/trace"
)
// FileSystem defines a file system for modifying files on a users system.
type FileSystem interface {
// Create creates or truncates the named file. If the file already exists,
// it is truncated.
Create(string) (*os.File, error)
}
type fileSystem struct{}
// Create creates or truncates the named file. If the file already exists,
// it is truncated.
func (fileSystem) Create(name string) (*os.File, error) {
return os.Create(name)
}
// DefaultFileSystem returns the default file system.
func DefaultFileSystem() FileSystem {
return fileSystem{}
}
// DownloadOption to be passed to Info to customize the resulting request.
type DownloadOption func(*downloadOptions)
type downloadOptions struct {
progressBar ProgressBar
}
// WithProgressBar sets the channel on the option.
func WithProgressBar(pb ProgressBar) DownloadOption {
return func(options *downloadOptions) {
options.progressBar = pb
}
}
// Digest represents a digest of a file.
type Digest struct {
SHA256 string
SHA384 string
Size int64
}
// Create a downloadOptions instance with default values.
func newDownloadOptions() *downloadOptions {
return &downloadOptions{}
}
// DownloadClient represents a client for downloading charm resources directly.
type DownloadClient struct {
httpClient HTTPClient
fileSystem FileSystem
logger corelogger.Logger
}
// newDownloadClient creates a DownloadClient for requesting
func NewDownloadClient(httpClient HTTPClient, fileSystem FileSystem, logger corelogger.Logger) *DownloadClient {
return &DownloadClient{
httpClient: httpClient,
fileSystem: fileSystem,
logger: logger,
}
}
// downloadKey represents a key for accessing the context value.
type downloadKey string
const (
// DownloadNameKey defines a name of a download, so the progress bar can
// show it.
DownloadNameKey downloadKey = "download-name-key"
)
// ProgressBar defines a progress bar type for giving feedback to the user about
// the state of the download.
type ProgressBar interface {
io.Writer
// Start progress with max "total" steps.
Start(label string, total float64)
// Finished the progress display
Finished()
}
// Download returns the raw charm zip file, which is retrieved from the given
// URL.
// It is expected that the archive path doesn't already exist and if it does, it
// will error out. It is expected that the callee handles the clean up of the
// archivePath.
// TODO (stickupkid): We should either create and remove, or take a file and
// let the callee remove. The fact that the operations are asymmetrical can lead
// to unexpected expectations; namely leaking of files.
func (c *DownloadClient) Download(ctx context.Context, resourceURL *url.URL, archivePath string, options ...DownloadOption) (digest *Digest, err error) {
ctx, span := trace.Start(ctx, trace.NameFromFunc(), trace.WithAttributes(
trace.StringAttr("charmhub.request", "download"),
trace.StringAttr("charmhub.url", resourceURL.String()),
))
defer func() {
span.RecordError(err)
span.End()
}()
pprof.Do(ctx, pprof.Labels(trace.OTELTraceID, span.Scope().TraceID()), func(ctx context.Context) {
digest, err = c.download(ctx, resourceURL, archivePath, options...)
})
return
}
func (c *DownloadClient) download(ctx context.Context, url *url.URL, archivePath string, options ...DownloadOption) (*Digest, error) {
opts := newDownloadOptions()
for _, option := range options {
option(opts)
}
f, err := c.fileSystem.Create(archivePath)
if err != nil {
return nil, errors.Trace(err)
}
defer func() {
_ = f.Close()
}()
r, err := c.downloadFromURL(ctx, url)
if err != nil {
return nil, errors.Annotatef(err, "cannot retrieve %q", url)
}
defer func() {
_ = r.Body.Close()
}()
progressBar := io.Discard
if opts.progressBar != nil {
// Progress bar has this nifty feature where you can supply a name. In
// this case we can supply one to help with UI feedback.
var name string
if n := ctx.Value(DownloadNameKey); n != nil {
if s, ok := n.(string); ok && s != "" {
name = s
}
}
downloadSize := float64(r.ContentLength)
opts.progressBar.Start(name, downloadSize)
defer opts.progressBar.Finished()
progressBar = opts.progressBar
}
hasher256 := sha256.New()
hasher384 := sha512.New384()
size, err := io.Copy(f, io.TeeReader(r.Body, io.MultiWriter(hasher256, hasher384, progressBar)))
if err != nil {
return nil, errors.Trace(err)
} else if size != r.ContentLength {
return nil, errors.Errorf("downloaded size %d does not match expected size %d", size, r.ContentLength)
}
return &Digest{
SHA256: hex.EncodeToString(hasher256.Sum(nil)),
SHA384: hex.EncodeToString(hasher384.Sum(nil)),
Size: size,
}, nil
}
func (c *DownloadClient) downloadFromURL(ctx context.Context, resourceURL *url.URL) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "GET", resourceURL.String(), nil)
if err != nil {
return nil, errors.Annotatef(err, "cannot make new request")
}
c.logger.Tracef(context.TODO(), "download from URL %s", resourceURL.String())
resp, err = c.httpClient.Do(req)
if err != nil {
return nil, errors.Annotate(err, "cannot get archive")
}
// If we get anything but a 200 status code, we don't know how to correctly
// handle that scenario. Return early and deal with the failure later on.
if resp.StatusCode == http.StatusOK {
return resp, nil
}
c.logger.Errorf(context.TODO(), "download failed from %s: response code: %s", resourceURL.String(), resp.Status)
// Ensure we drain the response body so this connection can be reused. As
// there is no error message, we have no ability other than to check the
// status codes.
_, _ = io.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil, errors.NotFoundf("archive")
}
// Server error, nothing we can do other than inform the user that the
// archive was unaviable.
return nil, errors.Errorf("unable to locate archive (store API responded with status: %s)", resp.Status)
}
// Copyright 2021 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charmhub
import (
"context"
"github.com/juju/errors"
corelogger "github.com/juju/juju/core/logger"
"github.com/juju/juju/internal/charmhub/transport"
)
// Handle some of the basic error messages.
func handleBasicAPIErrors(list transport.APIErrors, logger corelogger.Logger) error {
if len(list) == 0 {
return nil
}
masked := true
defer func() {
// Only log out the error if we're masking the original error, that
// way you can at least find the issue in `debug-log`.
// We do this because the original error message can be huge and
// verbose, like a java stack trace!
if masked {
logger.Errorf(context.TODO(), "charmhub API error %s:%s", list[0].Code, list[0].Message)
}
}()
switch list[0].Code {
case transport.ErrorCodeNotFound:
return errors.NotFoundf("charm or bundle")
case transport.ErrorCodeNameNotFound:
return errors.NotFoundf("charm or bundle name")
case transport.ErrorCodeResourceNotFound:
return errors.NotFoundf("charm resource")
case transport.ErrorCodeAPIError:
return errors.Errorf("unexpected api error attempting to query charm or bundle from the charmhub store")
case transport.ErrorCodeBadArgument:
return errors.BadRequestf("query argument")
}
// We haven't handled the errors, so just return them.
masked = false
return list
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charmhub
import (
"fmt"
)
func appendFilterList(value string, filters []string) []string {
retVals := make([]string, len(filters))
for i, v := range filters {
retVals[i] = fmt.Sprintf("%s.%s", value, v)
}
return retVals
}
var defaultChannelFilter = []string{
"channel.name",
"channel.base.architecture",
"channel.base.name",
"channel.base.channel",
"channel.released-at",
"channel.risk",
"channel.track",
}
var defaultResultFilter = []string{
"result.categories.featured",
"result.categories.name",
"result.contains-charms.name",
"result.contains-charms.package-id",
"result.contains-charms.store-url",
"result.description",
"result.license",
"result.publisher.display-name",
"result.store-url",
"result.summary",
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charmhub
import (
"context"
"net/http"
"runtime/pprof"
"strings"
"github.com/juju/errors"
corelogger "github.com/juju/juju/core/logger"
"github.com/juju/juju/core/trace"
"github.com/juju/juju/internal/charmhub/path"
"github.com/juju/juju/internal/charmhub/transport"
)
// FindOption to be passed to Find to customize the resulting request.
type FindOption func(*findOptions)
type findOptions struct {
category *string
channel *string
charmType *string
platforms *string
publisher *string
relationRequires *string
relationProvides *string
}
// WithFindCategory sets the category on the option.
func WithFindCategory(category string) FindOption {
return func(findOptions *findOptions) {
findOptions.category = &category
}
}
// WithFindChannel sets the channel on the option.
func WithFindChannel(channel string) FindOption {
return func(findOptions *findOptions) {
findOptions.channel = &channel
}
}
// WithFindType sets the charmType on the option.
func WithFindType(charmType string) FindOption {
return func(findOptions *findOptions) {
findOptions.charmType = &charmType
}
}
// WithFindPlatforms sets the charmPlatforms on the option.
func WithFindPlatforms(platforms string) FindOption {
return func(findOptions *findOptions) {
findOptions.platforms = &platforms
}
}
// WithFindPublisher sets the publisher on the option.
func WithFindPublisher(publisher string) FindOption {
return func(findOptions *findOptions) {
findOptions.publisher = &publisher
}
}
// WithFindRelationRequires sets the relationRequires on the option.
func WithFindRelationRequires(relationRequires string) FindOption {
return func(findOptions *findOptions) {
findOptions.relationRequires = &relationRequires
}
}
// WithFindRelationProvides sets the relationProvides on the option.
func WithFindRelationProvides(relationProvides string) FindOption {
return func(findOptions *findOptions) {
findOptions.relationProvides = &relationProvides
}
}
// Create a findOptions instance with default values.
func newFindOptions() *findOptions {
return &findOptions{}
}
// findClient defines a client for querying information about a given charm or
// bundle for a given CharmHub store.
type findClient struct {
path path.Path
client RESTClient
logger corelogger.Logger
}
// newFindClient creates a findClient for querying charm or bundle information.
func newFindClient(path path.Path, client RESTClient, logger corelogger.Logger) *findClient {
return &findClient{
path: path,
client: client,
logger: logger,
}
}
// Find searches Charm Hub and provides results matching a string.
func (c *findClient) Find(ctx context.Context, query string, options ...FindOption) (result []transport.FindResponse, err error) {
ctx, span := trace.Start(ctx, trace.NameFromFunc(), trace.WithAttributes(
trace.StringAttr("charmhub.query", query),
trace.StringAttr("charmhub.request", "find"),
))
defer func() {
span.RecordError(err)
span.End()
}()
pprof.Do(ctx, pprof.Labels(trace.OTELTraceID, span.Scope().TraceID()), func(ctx context.Context) {
result, err = c.find(ctx, query, options...)
})
return
}
func (c *findClient) find(ctx context.Context, query string, options ...FindOption) ([]transport.FindResponse, error) {
opts := newFindOptions()
for _, option := range options {
option(opts)
}
c.logger.Tracef(context.TODO(), "Find(%s)", query)
path, err := c.path.Query("q", query)
if err != nil {
return nil, errors.Trace(err)
}
path, err = path.Query("fields", defaultFindFilter())
if err != nil {
return nil, errors.Trace(err)
}
if err := walkFindOptions(opts, func(name, value string) error {
path, err = path.Query(name, value)
return errors.Trace(err)
}); err != nil {
return nil, errors.Trace(err)
}
var resp transport.FindResponses
restResp, err := c.client.Get(ctx, path, &resp)
if err != nil {
return nil, errors.Trace(err)
}
if restResp.StatusCode == http.StatusNotFound {
return nil, errors.NotFoundf(query)
}
if err := handleBasicAPIErrors(resp.ErrorList, c.logger); err != nil {
return nil, errors.Trace(err)
}
return resp.Results, nil
}
func walkFindOptions(opts *findOptions, fn func(string, string) error) error {
// We could use reflect here, but it might be easier to just list out what
// we want to walk over.
// See: https://gist.github.com/SimonRichardson/7c9243d71551cad4af7661128add93b5
if opts.category != nil {
if err := fn("category", *opts.category); err != nil {
return errors.Trace(err)
}
}
if opts.channel != nil {
if err := fn("channel", *opts.channel); err != nil {
return errors.Trace(err)
}
}
if opts.charmType != nil {
if err := fn("type", *opts.charmType); err != nil {
return errors.Trace(err)
}
}
if opts.platforms != nil {
if err := fn("platforms", *opts.platforms); err != nil {
return errors.Trace(err)
}
}
if opts.publisher != nil {
if err := fn("publisher", *opts.publisher); err != nil {
return errors.Trace(err)
}
}
if opts.relationRequires != nil {
if err := fn("relation-requires", *opts.relationRequires); err != nil {
return errors.Trace(err)
}
}
if opts.relationProvides != nil {
if err := fn("relation-provides", *opts.relationProvides); err != nil {
return errors.Trace(err)
}
}
return nil
}
// defaultFindFilter returns a filter string to retrieve all data
// necessary to fill the transport.FindResponse. Without it, we'd
// receive the Name, ID and Type.
func defaultFindFilter() string {
filter := defaultFindResultFilter
filter = append(filter, appendFilterList("default-release", defaultRevisionFilter)...)
return strings.Join(filter, ",")
}
var defaultFindResultFilter = []string{
"result.publisher.display-name",
"result.summary",
"result.store-url",
}
var defaultRevisionFilter = []string{
"revision.bases.architecture",
"revision.bases.name",
"revision.bases.channel",
"revision.version",
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charmhub
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"net/http/httputil"
"net/url"
"sort"
"time"
"github.com/juju/clock"
"github.com/juju/errors"
"github.com/juju/retry"
"gopkg.in/httprequest.v1"
corelogger "github.com/juju/juju/core/logger"
"github.com/juju/juju/core/version"
"github.com/juju/juju/internal/charmhub/path"
jujuhttp "github.com/juju/juju/internal/http"
)
const (
jsonContentType = "application/json"
userAgentKey = "User-Agent"
userAgentValue = version.UserAgentVersion
// defaultRetryAttempts defines the number of attempts that a default
// HTTPClient will retry before giving up.
// Retries are only performed on certain status codes, nothing in the 200 to
// 400 range and a select few from the 500 range (deemed retryable):
//
// - http.StatusBadGateway
// - http.StatusGatewayTimeout
// - http.StatusServiceUnavailable
// - http.StatusTooManyRequests
//
// See: juju/http package.
defaultRetryAttempts = 3
// defaultRetryDelay holds the amount of time after a try, a new attempt
// will wait before another attempt.
defaultRetryDelay = time.Second * 10
// defaultRetryMaxDelay holds the amount of time before a giving up on a
// request. This values includes any server response from the header
// Retry-After.
defaultRetryMaxDelay = time.Minute * 10
)
// HTTPClient defines a type for making the actual request. It may be an
// *http.Client.
type HTTPClient interface {
// Do performs the *http.Request and returns an *http.Response or an error.
Do(*http.Request) (*http.Response, error)
}
// DefaultHTTPClient creates a new HTTPClient with the default configuration.
func DefaultHTTPClient(logger corelogger.Logger) *jujuhttp.Client {
recorder := loggingRequestRecorder{
logger: logger.Child("transport.request-recorder", corelogger.METRICS),
}
return requestHTTPClient(recorder, defaultRetryPolicy())(logger)
}
// defaultRetryPolicy returns a retry policy with sane defaults for most
// requests.
func defaultRetryPolicy() jujuhttp.RetryPolicy {
return jujuhttp.RetryPolicy{
Attempts: defaultRetryAttempts,
Delay: defaultRetryDelay,
MaxDelay: defaultRetryMaxDelay,
}
}
type loggingRequestRecorder struct {
logger corelogger.Logger
}
// Record an outgoing request which produced an http.Response.
func (r loggingRequestRecorder) Record(method string, url *url.URL, res *http.Response, rtt time.Duration) {
if r.logger.IsLevelEnabled(corelogger.TRACE) {
r.logger.Tracef(context.TODO(), "request (method: %q, host: %q, path: %q, status: %q, duration: %s)", method, url.Host, url.Path, res.Status, rtt)
}
}
// RecordError records an outgoing request which returned an error.
func (r loggingRequestRecorder) RecordError(method string, url *url.URL, err error) {
if r.logger.IsLevelEnabled(corelogger.TRACE) {
r.logger.Tracef(context.TODO(), "request error (method: %q, host: %q, path: %q, err: %s)", method, url.Host, url.Path, err)
}
}
// requestHTTPClient returns a function that creates a new HTTPClient that
// records the requests.
func requestHTTPClient(recorder jujuhttp.RequestRecorder, policy jujuhttp.RetryPolicy) func(corelogger.Logger) *jujuhttp.Client {
return func(logger corelogger.Logger) *jujuhttp.Client {
return jujuhttp.NewClient(
jujuhttp.WithRequestRecorder(recorder),
jujuhttp.WithRequestRetrier(policy),
jujuhttp.WithLogger(logger.Child("transport", corelogger.CHARMHUB, corelogger.HTTP)),
)
}
}
// apiRequester creates a wrapper around the HTTPClient to allow for better
// error handling.
type apiRequester struct {
httpClient HTTPClient
logger corelogger.Logger
retryDelay time.Duration
}
// newAPIRequester creates a new http.Client for making requests to a server.
func newAPIRequester(httpClient HTTPClient, logger corelogger.Logger) *apiRequester {
return &apiRequester{
httpClient: httpClient,
logger: logger,
retryDelay: 3 * time.Second,
}
}
// Do performs the *http.Request and returns a *http.Response or an error.
//
// Handle empty response (io.EOF) errors specially and retry. The reason for
// this is we get these errors from Charmhub fairly regularly (they're not
// valid HTTP responses as there are no headers; they're empty responses).
func (t *apiRequester) Do(req *http.Request) (*http.Response, error) {
// To retry requests with a body, we need to read the entire body in
// up-front, otherwise it'll be empty on retries.
var body []byte
if req.Body != nil {
var err error
body, err = io.ReadAll(req.Body)
if err != nil {
return nil, errors.Annotate(err, "reading request body")
}
err = req.Body.Close()
if err != nil {
return nil, errors.Annotate(err, "closing request body")
}
}
// Try a fixed number of attempts with a doubling delay in between.
var resp *http.Response
err := retry.Call(retry.CallArgs{
Func: func() error {
if body != nil {
req.Body = io.NopCloser(bytes.NewReader(body))
}
var err error
resp, err = t.doOnce(req)
return err
},
IsFatalError: func(err error) bool {
return !errors.Is(err, io.EOF)
},
NotifyFunc: func(lastError error, attempt int) {
t.logger.Errorf(context.TODO(), "Charmhub API error (attempt %d): %v", attempt, lastError)
},
Attempts: 2,
Delay: t.retryDelay,
Clock: clock.WallClock,
Stop: req.Context().Done(),
})
return resp, err
}
func (t *apiRequester) doOnce(req *http.Request) (*http.Response, error) {
resp, err := t.httpClient.Do(req)
if err != nil {
return nil, errors.Trace(err)
}
if resp.StatusCode >= http.StatusOK && resp.StatusCode <= http.StatusNoContent {
return resp, nil
}
var potentialInvalidURL bool
if resp.StatusCode == http.StatusNotFound {
potentialInvalidURL = true
} else if resp.StatusCode >= http.StatusInternalServerError && resp.StatusCode <= http.StatusNetworkAuthenticationRequired {
defer func() {
_, _ = io.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
}()
return nil, errors.Errorf(`server error %q`, req.URL.String())
}
// We expect that we always have a valid content-type from the server, once
// we've checked that we don't get a 5xx error. Given that we send Accept
// header of application/json, I would only ever expect to see that.
// Everything will be incorrectly formatted.
if contentType := resp.Header.Get("Content-Type"); contentType != jsonContentType {
defer func() {
_, _ = io.Copy(io.Discard, resp.Body)
_ = resp.Body.Close()
}()
if potentialInvalidURL {
return nil, errors.Errorf(`unexpected charm-hub url %q when parsing headers`, req.URL.String())
}
return nil, errors.Errorf(`unexpected content-type from server %q`, contentType)
}
return resp, nil
}
// apiRequestLogger creates a wrapper around the HTTP client to allow for better
// logging.
type apiRequestLogger struct {
httpClient HTTPClient
logger corelogger.Logger
}
// newAPIRequesterLogger creates a new HTTPClient that allows logging of requests
// for every request.
func newAPIRequesterLogger(httpClient HTTPClient, logger corelogger.Logger) *apiRequestLogger {
return &apiRequestLogger{
httpClient: httpClient,
logger: logger,
}
}
// Do performs the request and logs the request and response if tracing is enabled.
func (t *apiRequestLogger) Do(req *http.Request) (*http.Response, error) {
if t.logger.IsLevelEnabled(corelogger.TRACE) {
if data, err := httputil.DumpRequest(req, true); err == nil {
t.logger.Tracef(context.TODO(), "%s request %s", req.Method, data)
} else {
t.logger.Tracef(context.TODO(), "%s request DumpRequest error %s", req.Method, err.Error())
}
}
resp, err := t.httpClient.Do(req)
if err != nil {
return nil, errors.Trace(err)
}
if t.logger.IsLevelEnabled(corelogger.TRACE) {
if data, err := httputil.DumpResponse(resp, true); err == nil {
t.logger.Tracef(context.TODO(), "%s response %s", req.Method, data)
} else {
t.logger.Tracef(context.TODO(), "%s response DumpResponse error %s", req.Method, err.Error())
}
}
return resp, err
}
// restResponse abstracts away the underlying response from the implementation.
type restResponse struct {
StatusCode int
}
// RESTClient defines a type for making requests to a server.
type RESTClient interface {
// Get performs GET requests to a given Path.
Get(context.Context, path.Path, interface{}) (restResponse, error)
// Post performs POST requests to a given Path.
Post(context.Context, path.Path, http.Header, interface{}, interface{}) (restResponse, error)
}
// httpRESTClient represents a RESTClient that expects to interact with an
// HTTPClient.
type httpRESTClient struct {
httpClient HTTPClient
}
// newHTTPRESTClient creates a new httpRESTClient
func newHTTPRESTClient(httpClient HTTPClient) *httpRESTClient {
return &httpRESTClient{
httpClient: httpClient,
}
}
// Get makes a GET request to the given path in the CharmHub (not
// including the host name or version prefix but including a leading /),
// parsing the result as JSON into the given result value, which should
// be a pointer to the expected data, but may be nil if no result is
// desired.
func (c *httpRESTClient) Get(ctx context.Context, path path.Path, result interface{}) (restResponse, error) {
req, err := http.NewRequestWithContext(ctx, "GET", path.String(), nil)
if err != nil {
return restResponse{}, errors.Annotate(err, "can not make new request")
}
// Compose the request headers.
req.Header = make(http.Header)
req.Header.Set("Accept", jsonContentType)
req.Header.Set("Content-Type", jsonContentType)
req.Header.Set(userAgentKey, userAgentValue)
resp, err := c.httpClient.Do(req)
if err != nil {
return restResponse{}, errors.Trace(err)
}
defer func() { _ = resp.Body.Close() }()
// Parse the response.
if err := httprequest.UnmarshalJSONResponse(resp, result); err != nil {
return restResponse{}, errors.Annotate(err, "charm hub client get")
}
return restResponse{
StatusCode: resp.StatusCode,
}, nil
}
// Post makes a POST request to the given path in the CharmHub (not
// including the host name or version prefix but including a leading /),
// parsing the result as JSON into the given result value, which should
// be a pointer to the expected data, but may be nil if no result is
// desired.
func (c *httpRESTClient) Post(ctx context.Context, path path.Path, headers http.Header, body, result interface{}) (restResponse, error) {
buffer := new(bytes.Buffer)
if err := json.NewEncoder(buffer).Encode(body); err != nil {
return restResponse{}, errors.Trace(err)
}
req, err := http.NewRequestWithContext(ctx, "POST", path.String(), buffer)
if err != nil {
return restResponse{}, errors.Annotate(err, "can not make new request")
}
// Compose the request headers.
req.Header = make(http.Header)
req.Header.Set("Accept", jsonContentType)
req.Header.Set("Content-Type", jsonContentType)
req.Header.Set(userAgentKey, userAgentValue)
// Add any headers specific to this request (in sorted order).
keys := make([]string, 0, len(headers))
for k := range headers {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
for _, v := range headers[k] {
req.Header.Add(k, v)
}
}
resp, err := c.httpClient.Do(req)
if err != nil {
return restResponse{}, errors.Trace(err)
}
defer func() { _ = resp.Body.Close() }()
// Parse the response.
if err := httprequest.UnmarshalJSONResponse(resp, result); err != nil {
return restResponse{}, errors.Annotate(err, "charm hub client post")
}
return restResponse{
StatusCode: resp.StatusCode,
}, nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charmhub
import (
"context"
"net/http"
"runtime/pprof"
"strings"
"github.com/juju/errors"
corelogger "github.com/juju/juju/core/logger"
"github.com/juju/juju/core/trace"
"github.com/juju/juju/internal/charmhub/path"
"github.com/juju/juju/internal/charmhub/transport"
)
// InfoOption to be passed to Info to customize the resulting request.
type InfoOption func(*infoOptions)
type infoOptions struct {
channel *string
}
// WithInfoChannel sets the channel on the option.
func WithInfoChannel(ch string) InfoOption {
return func(infoOptions *infoOptions) {
infoOptions.channel = &ch
}
}
// Create a infoOptions instance with default values.
func newInfoOptions() *infoOptions {
return &infoOptions{}
}
// infoClient defines a client for info requests.
type infoClient struct {
path path.Path
client RESTClient
logger corelogger.Logger
}
// newInfoClient creates a infoClient for requesting
func newInfoClient(path path.Path, client RESTClient, logger corelogger.Logger) *infoClient {
return &infoClient{
path: path,
client: client,
logger: logger,
}
}
// Info requests the information of a given charm. If that charm doesn't exist
// an error stating that fact will be returned.
func (c *infoClient) Info(ctx context.Context, name string, options ...InfoOption) (resp transport.InfoResponse, err error) {
ctx, span := trace.Start(ctx, trace.NameFromFunc(), trace.WithAttributes(
trace.StringAttr("charmhub.name", name),
trace.StringAttr("charmhub.request", "info"),
))
defer func() {
span.RecordError(err)
span.End()
}()
pprof.Do(ctx, pprof.Labels(trace.OTELTraceID, span.Scope().TraceID()), func(ctx context.Context) {
resp, err = c.info(ctx, name, options...)
})
return
}
func (c *infoClient) info(ctx context.Context, name string, options ...InfoOption) (transport.InfoResponse, error) {
opts := newInfoOptions()
for _, option := range options {
option(opts)
}
isTraceEnabled := c.logger.IsLevelEnabled(corelogger.TRACE)
if isTraceEnabled {
c.logger.Tracef(context.TODO(), "Info(%s)", name)
}
var resp transport.InfoResponse
path, err := c.path.Join(name)
if err != nil {
return resp, errors.Trace(err)
}
path, err = path.Query("fields", defaultInfoFilter())
if err != nil {
return resp, errors.Trace(err)
}
if opts.channel != nil {
path, err = path.Query("channel", *opts.channel)
if err != nil {
return resp, errors.Trace(err)
}
}
restResp, err := c.client.Get(ctx, path, &resp)
if err != nil {
return resp, errors.Trace(err)
}
if restResp.StatusCode == http.StatusNotFound {
return resp, errors.NotFoundf(name)
}
if err := handleBasicAPIErrors(resp.ErrorList, c.logger); err != nil {
return resp, errors.Trace(err)
}
switch resp.Type {
case transport.CharmType, transport.BundleType:
default:
return resp, errors.Errorf("unexpected response type %q, expected charm or bundle", resp.Type)
}
if isTraceEnabled {
c.logger.Tracef(context.TODO(), "Info() unmarshalled: %+v", resp)
}
return resp, nil
}
// defaultInfoFilter returns a filter string to retrieve all data
// necessary to fill the transport.InfoResponse. Without it, we'd
// receive the Name, ID and Type.
func defaultInfoFilter() string {
filter := defaultResultFilter
filter = append(filter, "default-release.revision.download.size")
filter = append(filter, appendFilterList("default-release", infoDefaultRevisionFilter)...)
filter = append(filter, appendFilterList("default-release", defaultChannelFilter)...)
filter = append(filter, "channel-map.revision.download.size")
filter = append(filter, appendFilterList("channel-map", infoChannelMapRevisionFilter)...)
filter = append(filter, appendFilterList("channel-map", defaultChannelFilter)...)
return strings.Join(filter, ",")
}
var infoDefaultRevisionFilter = []string{
"revision.config-yaml",
"revision.metadata-yaml",
"revision.bundle-yaml",
"revision.bases.architecture",
"revision.bases.name",
"revision.bases.channel",
"revision.revision",
"revision.version",
}
var infoChannelMapRevisionFilter = []string{
"revision.created-at",
"revision.bases.architecture",
"revision.bases.name",
"revision.bases.channel",
"revision.revision",
"revision.version",
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package path
import (
"net/url"
"path"
"strings"
"github.com/juju/errors"
)
// Path defines a absolute path for calling requests to the server.
type Path struct {
base *url.URL
}
// MakePath creates a URL for queries to a server.
func MakePath(base *url.URL) Path {
return Path{
base: base,
}
}
// Join will sum path names onto a base URL and ensure it constructs a URL
// that is valid.
// Example:
// - http://baseurl/name0/name1/
func (u Path) Join(names ...string) (Path, error) {
baseURL := u.String()
if !strings.HasSuffix(baseURL, "/") {
baseURL += "/"
}
namedPath := path.Join(names...)
path, err := url.Parse(baseURL + namedPath)
if err != nil {
return Path{}, errors.Trace(err)
}
return MakePath(path), nil
}
// Query adds additional query parameters to the Path.
// Example:
// - http://baseurl/name0/name1?q=value
func (u Path) Query(key string, value string) (Path, error) {
// If value is empty, nothing to change and return back the original
// path.
if strings.TrimSpace(value) == "" {
return u, nil
}
baseQuery, err := url.ParseQuery(u.base.RawQuery)
if err != nil {
return Path{}, errors.Trace(err)
}
baseQuery.Add(key, value)
newURL, err := url.Parse(u.base.String())
if err != nil {
return Path{}, errors.Trace(err)
}
newURL.RawQuery = baseQuery.Encode()
return MakePath(newURL), nil
}
// String returns a stringified version of the Path.
// Under the hood this calls the url.URL#String method.
func (u Path) String() string {
return u.base.String()
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charmhub
import (
"context"
"crypto/sha512"
"encoding/base64"
"fmt"
"net/http"
"sort"
"strings"
"github.com/juju/collections/set"
"github.com/juju/errors"
"github.com/juju/names/v6"
"github.com/kr/pretty"
"golang.org/x/crypto/pbkdf2"
corebase "github.com/juju/juju/core/base"
charmmetrics "github.com/juju/juju/core/charm/metrics"
corelogger "github.com/juju/juju/core/logger"
"github.com/juju/juju/core/trace"
"github.com/juju/juju/core/version"
"github.com/juju/juju/internal/charmhub/path"
"github.com/juju/juju/internal/charmhub/transport"
internallogger "github.com/juju/juju/internal/logger"
"github.com/juju/juju/internal/uuid"
)
// Metrics is a map of metrics data to be sent to the charmhub.
type Metrics map[charmmetrics.MetricKey]map[charmmetrics.MetricValueKey]string
// action represents the type of refresh is performed.
type action string
const (
// installAction defines a install action.
installAction action = "install"
// downloadAction defines a download action.
downloadAction action = "download"
// refreshAction defines a refresh action.
refreshAction action = "refresh"
)
var (
// A set of fields that are always requested when performing refresh calls
requiredRefreshFields = set.NewStrings(
"download", "id", "license", "name", "publisher", "resources",
"revision", "summary", "type", "version", "bases", "config-yaml",
"metadata-yaml",
).SortedValues()
)
const (
// notAvailable is used a placeholder for Name and Channel for a refresh
// base request, if the Name and Channel is not known.
notAvailable = "NA"
)
// RefreshBase defines a base for selecting a specific charm.
// Continues to exist to allow for incoming bases to be converted
// to bases inside this package.
type RefreshBase struct {
Architecture string
Name string
Channel string
}
func (p RefreshBase) String() string {
path := p.Architecture
if p.Channel != "" {
if p.Name != "" {
path = fmt.Sprintf("%s/%s", path, p.Name)
}
path = fmt.Sprintf("%s/%s", path, p.Channel)
}
return path
}
// refreshClient defines a client for refresh requests.
type refreshClient struct {
path path.Path
client RESTClient
logger corelogger.Logger
}
// newRefreshClient creates a refreshClient for requesting
func newRefreshClient(path path.Path, client RESTClient, logger corelogger.Logger) *refreshClient {
return &refreshClient{
path: path,
client: client,
logger: logger,
}
}
// Refresh is used to refresh installed charms to a more suitable revision.
func (c *refreshClient) Refresh(ctx context.Context, config RefreshConfig) ([]transport.RefreshResponse, error) {
if c.logger.IsLevelEnabled(corelogger.TRACE) {
c.logger.Tracef(context.TODO(), "Refresh(%s)", pretty.Sprint(config))
}
req, err := config.Build()
if err != nil {
return nil, errors.Trace(err)
}
return c.refresh(ctx, config.Ensure, req)
}
// RefreshWithRequestMetrics is to get refreshed charm data and provide metrics
// at the same time. Used as part of the charm revision updater facade.
func (c *refreshClient) RefreshWithRequestMetrics(ctx context.Context, config RefreshConfig, metrics Metrics) ([]transport.RefreshResponse, error) {
if c.logger.IsLevelEnabled(corelogger.TRACE) {
c.logger.Tracef(context.TODO(), "RefreshWithRequestMetrics(%s, %+v)", pretty.Sprint(config), metrics)
}
req, err := config.Build()
if err != nil {
return nil, errors.Trace(err)
}
m, err := contextMetrics(metrics)
if err != nil {
return nil, errors.Trace(err)
}
req.Metrics = m
return c.refresh(ctx, config.Ensure, req)
}
// RefreshWithMetricsOnly is to provide metrics without context or actions. Used
// as part of the charm revision updater facade.
func (c *refreshClient) RefreshWithMetricsOnly(ctx context.Context, metrics Metrics) error {
c.logger.Tracef(context.TODO(), "RefreshWithMetricsOnly(%+v)", metrics)
m, err := contextMetrics(metrics)
if err != nil {
return errors.Trace(err)
}
req := transport.RefreshRequest{
Context: []transport.RefreshRequestContext{},
Actions: []transport.RefreshRequestAction{},
Metrics: m,
}
// No need to ensure data which is not expected.
ensure := func(responses []transport.RefreshResponse) error { return nil }
_, err = c.refresh(ctx, ensure, req)
return err
}
func contextMetrics(metrics Metrics) (transport.RequestMetrics, error) {
m := make(transport.RequestMetrics)
for k, v := range metrics {
// verify top level "model" and "controller" keys
if k != charmmetrics.Controller && k != charmmetrics.Model {
return nil, errors.Trace(errors.NotValidf("highlevel metrics label %q", k))
}
ctxM := make(map[string]string, len(v))
for k2, v2 := range v {
ctxM[k2.String()] = v2
}
m[k.String()] = ctxM
}
return m, nil
}
func (c *refreshClient) refresh(ctx context.Context, ensure func(responses []transport.RefreshResponse) error, req transport.RefreshRequest) (_ []transport.RefreshResponse, err error) {
ctx, span := trace.Start(ctx, trace.NameFromFunc(), trace.WithAttributes(
trace.StringAttr("charmhub.request", "refresh"),
trace.StringAttr("charmhub.names", traceNames(req)),
trace.StringAttr("charmhub.idents", traceIdents(req)),
))
defer func() {
span.RecordError(err)
span.End()
}()
httpHeaders := make(http.Header)
var resp transport.RefreshResponses
restResp, err := c.client.Post(ctx, c.path, httpHeaders, req, &resp)
if err != nil {
return nil, errors.Trace(err)
}
if restResp.StatusCode == http.StatusNotFound {
return nil, logAndReturnError(errors.NotFoundf("refresh"))
}
if err := handleBasicAPIErrors(resp.ErrorList, c.logger); err != nil {
return nil, errors.Trace(err)
}
// Ensure that all the results contain the correct instance keys.
if err := ensure(resp.Results); err != nil {
return nil, errors.Trace(err)
}
// Exit early.
if len(resp.Results) <= 1 {
return resp.Results, nil
}
// As the results are not expected to be in the correct order, sort them
// to prevent others falling into not RTFM!
indexes := make(map[string]int, len(req.Actions))
for i, action := range req.Actions {
indexes[action.InstanceKey] = i
}
results := make([]transport.RefreshResponse, len(resp.Results))
for _, result := range resp.Results {
results[indexes[result.InstanceKey]] = result
}
if c.logger.IsLevelEnabled(corelogger.TRACE) {
c.logger.Tracef(context.TODO(), "Refresh() unmarshalled: %s", pretty.Sprint(results))
}
return results, nil
}
// RefreshOne creates a request config for requesting only one charm.
func RefreshOne(key, id string, revision int, channel string, base RefreshBase) (RefreshConfig, error) {
if id == "" {
return nil, logAndReturnError(errors.NotValidf("empty id"))
}
if key == "" {
// This is for compatibility reasons. With older clients, the
// key created in GetCharmURLOrigin will be lost to and from
// the client. Since a key is required, ensure we have one.
uuid, err := uuid.NewUUID()
if err != nil {
return nil, logAndReturnError(err)
}
key = uuid.String()
}
if err := validateBase(base); err != nil {
return nil, logAndReturnError(err)
}
return refreshOne{
instanceKey: key,
ID: id,
Revision: revision,
Channel: channel,
Base: base,
fields: requiredRefreshFields,
}, nil
}
// CreateInstanceKey creates an InstanceKey which can be unique and stable
// from Refresh action to Refresh action. Required for KPI collection
// on the charmhub side, see LP:1944582. Rather than saving in
// state, use the model uuid + the app name, which are unique. Modeled
// after the applicationDoc DocID and globalKey in state.
func CreateInstanceKey(app names.ApplicationTag, model names.ModelTag) string {
h := pbkdf2.Key([]byte(app.Id()), []byte(model.Id()), 8192, 32, sha512.New)
return base64.RawURLEncoding.EncodeToString(h)
}
// InstallOneFromRevision creates a request config using the revision and not
// the channel for requesting only one charm.
func InstallOneFromRevision(name string, revision int) (RefreshConfig, error) {
if name == "" {
return nil, logAndReturnError(errors.NotValidf("empty name"))
}
uuid, err := uuid.NewUUID()
if err != nil {
return nil, logAndReturnError(err)
}
return executeOneByRevision{
action: installAction,
instanceKey: uuid.String(),
Name: name,
Revision: &revision,
fields: requiredRefreshFields,
}, nil
}
// AddResource adds resource revision data to a executeOne config.
// Used for install by revision.
func AddResource(config RefreshConfig, name string, revision int) (RefreshConfig, bool) {
c, ok := config.(executeOneByRevision)
if !ok {
return config, false
}
if len(c.resourceRevisions) == 0 {
c.resourceRevisions = make([]transport.RefreshResourceRevision, 0)
}
c.resourceRevisions = append(c.resourceRevisions, transport.RefreshResourceRevision{
Name: name,
Revision: revision,
})
return c, true
}
// AddConfigMetrics adds metrics to a refreshOne config. All values are
// applied at once, subsequent calls, replace all values.
func AddConfigMetrics(config RefreshConfig, metrics map[charmmetrics.MetricValueKey]string) (RefreshConfig, error) {
c, ok := config.(refreshOne)
if !ok {
return config, nil // error?
}
if len(metrics) < 1 {
return c, nil
}
c.metrics = make(transport.ContextMetrics)
for k, v := range metrics {
c.metrics[k.String()] = v
}
return c, nil
}
// InstallOneFromChannel creates a request config using the channel and not the
// revision for requesting only one charm.
func InstallOneFromChannel(name string, channel string, base RefreshBase) (RefreshConfig, error) {
if name == "" {
return nil, logAndReturnError(errors.NotValidf("empty name"))
}
if err := validateBase(base); err != nil {
return nil, logAndReturnError(err)
}
uuid, err := uuid.NewUUID()
if err != nil {
return nil, logAndReturnError(err)
}
return executeOne{
action: installAction,
instanceKey: uuid.String(),
Name: name,
Channel: &channel,
Base: base,
fields: requiredRefreshFields,
}, nil
}
// DownloadOneFromRevision creates a request config using the revision and not
// the channel for requesting only one charm.
func DownloadOneFromRevision(id string, revision int) (RefreshConfig, error) {
if id == "" {
return nil, logAndReturnError(errors.NotValidf("empty id"))
}
uuid, err := uuid.NewUUID()
if err != nil {
return nil, logAndReturnError(err)
}
return executeOneByRevision{
action: downloadAction,
instanceKey: uuid.String(),
ID: id,
Revision: &revision,
fields: requiredRefreshFields,
}, nil
}
// DownloadOneFromRevisionByName creates a request config using the revision and not
// the channel for requesting only one charm.
func DownloadOneFromRevisionByName(name string, revision int) (RefreshConfig, error) {
if name == "" {
return nil, logAndReturnError(errors.NotValidf("empty name"))
}
uuid, err := uuid.NewUUID()
if err != nil {
return nil, logAndReturnError(err)
}
return executeOneByRevision{
action: downloadAction,
instanceKey: uuid.String(),
Name: name,
Revision: &revision,
fields: requiredRefreshFields,
}, nil
}
// DownloadOneFromChannel creates a request config using the channel and not the
// revision for requesting only one charm.
func DownloadOneFromChannel(id string, channel string, base RefreshBase) (RefreshConfig, error) {
if id == "" {
return nil, logAndReturnError(errors.NotValidf("empty id"))
}
if err := validateBase(base); err != nil {
return nil, logAndReturnError(err)
}
uuid, err := uuid.NewUUID()
if err != nil {
return nil, logAndReturnError(err)
}
return executeOne{
action: downloadAction,
instanceKey: uuid.String(),
ID: id,
Channel: &channel,
Base: base,
fields: requiredRefreshFields,
}, nil
}
// DownloadOneFromChannelByName creates a request config using the channel and not the
// revision for requesting only one charm.
func DownloadOneFromChannelByName(name string, channel string, base RefreshBase) (RefreshConfig, error) {
if name == "" {
return nil, logAndReturnError(errors.NotValidf("empty name"))
}
if err := validateBase(base); err != nil {
return nil, logAndReturnError(err)
}
uuid, err := uuid.NewUUID()
if err != nil {
return nil, logAndReturnError(err)
}
return executeOne{
action: downloadAction,
instanceKey: uuid.String(),
Name: name,
Channel: &channel,
Base: base,
fields: requiredRefreshFields,
}, nil
}
// constructRefreshBase creates a refresh request base that allows for
// partial base queries.
func constructRefreshBase(base RefreshBase) (transport.Base, error) {
if base.Architecture == "" {
return transport.Base{}, logAndReturnError(errors.NotValidf("refresh arch"))
}
name := base.Name
if name == "" {
name = notAvailable
}
var channel string
switch base.Channel {
case "":
channel = notAvailable
case "kubernetes":
// Kubernetes is not a valid channel for a base.
// Instead use the latest LTS version of ubuntu.
b := version.DefaultSupportedLTSBase()
name = b.OS
// Use the track to ensure no risk sneaks in
channel = b.Channel.Track
default:
var err error
channel, err = sanitiseChannel(base.Channel)
if err != nil {
return transport.Base{}, logAndReturnError(errors.Trace(err))
}
}
return transport.Base{
Architecture: base.Architecture,
Name: name,
Channel: channel,
}, nil
}
// sanitiseChannel returns a channel, sanitised for charmhub
//
// Sometimes channels we receive include a risk, which charmhub
// cannot understand. So ensure any risk is dropped.
func sanitiseChannel(channel string) (string, error) {
if channel == "" {
return channel, nil
}
ch, err := corebase.ParseChannel(channel)
if err != nil {
return "", errors.Trace(err)
}
return ch.Track, nil
}
// validateBase ensures that we do not pass "all" as part of base.
// This function is to help find programming related failures.
func validateBase(rp RefreshBase) error {
var msg []string
if rp.Architecture == "all" {
msg = append(msg, fmt.Sprintf("Architecture %q", rp.Architecture))
}
if rp.Name == "all" {
msg = append(msg, fmt.Sprintf("Name %q", rp.Name))
}
if rp.Channel == "all" {
msg = append(msg, fmt.Sprintf("Channel %q", rp.Channel))
}
if len(msg) > 0 {
return errors.Trace(errors.NotValidf(strings.Join(msg, ", ")))
}
return nil
}
type instanceKey interface {
InstanceKey() string
}
// ExtractConfigInstanceKey is used to get the instance key from a refresh
// config.
func ExtractConfigInstanceKey(cfg RefreshConfig) string {
key, ok := cfg.(instanceKey)
if ok {
return key.InstanceKey()
}
return ""
}
// Ideally we'd avoid the package-level logger and use the Client's one, but
// the functions that create a RefreshConfig like RefreshOne don't take
// loggers. This logging can sometimes be quite useful to avoid error sources
// getting lost across the wire, so leave as is for now.
var logger = internallogger.GetLogger("juju.charmhub", corelogger.CHARMHUB)
func logAndReturnError(err error) error {
err = errors.Trace(err)
logger.Errorf(context.TODO(), err.Error())
return err
}
func traceNames(req transport.RefreshRequest) string {
names := make(map[string]struct{})
for _, action := range req.Actions {
if action.Name == nil {
continue
}
names[*action.Name] = struct{}{}
}
return mapToString(names)
}
func traceIdents(req transport.RefreshRequest) string {
idents := make(map[string]struct{})
for _, action := range req.Actions {
if action.ID == nil {
continue
}
idents[*action.ID] = struct{}{}
}
for _, context := range req.Context {
idents[context.ID] = struct{}{}
}
return mapToString(idents)
}
func mapToString(m map[string]struct{}) string {
var res []string
for k := range m {
res = append(res, k)
}
sort.Strings(res)
return strings.Join(res, ",")
}
// Copyright 2021 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charmhub
import (
"fmt"
"strings"
"github.com/juju/collections/set"
"github.com/juju/errors"
"github.com/juju/juju/internal/charmhub/transport"
)
// RefreshConfig defines a type for building refresh requests.
type RefreshConfig interface {
// Build a refresh request for sending to the API.
Build() (transport.RefreshRequest, error)
// Ensure that the request back contains the information we requested.
Ensure([]transport.RefreshResponse) error
// String describes the underlying refresh config.
String() string
}
// refreshOne holds the config for making refresh calls to the CharmHub API.
type refreshOne struct {
ID string
Revision int
Channel string
Base RefreshBase
// instanceKey is a private unique key that we construct for CharmHub API
// asynchronous calls.
instanceKey string
metrics transport.ContextMetrics
fields []string
}
// InstanceKey returns the underlying instance key.
func (c refreshOne) InstanceKey() string {
return c.instanceKey
}
func (c refreshOne) String() string {
return fmt.Sprintf("Refresh one (instanceKey: %s): using ID %s revision %+v, with channel %s and base %v",
c.instanceKey, c.ID, c.Revision, c.Channel, c.Base.String())
}
// Build a refresh request that can be past to the API.
func (c refreshOne) Build() (transport.RefreshRequest, error) {
base, err := constructRefreshBase(c.Base)
if err != nil {
return transport.RefreshRequest{}, errors.Trace(err)
}
return transport.RefreshRequest{
Context: []transport.RefreshRequestContext{{
InstanceKey: c.instanceKey,
ID: c.ID,
Revision: c.Revision,
Base: base,
TrackingChannel: c.Channel,
Metrics: c.metrics,
// TODO (stickupkid): We need to model the refreshed date. It's
// currently optional, but will be required at some point. This
// is the installed date of the charm on the system.
}},
Actions: []transport.RefreshRequestAction{{
Action: string(refreshAction),
InstanceKey: c.instanceKey,
ID: &c.ID,
}},
Fields: c.fields,
}, nil
}
// Ensure that the request back contains the information we requested.
func (c refreshOne) Ensure(responses []transport.RefreshResponse) error {
for _, resp := range responses {
if resp.InstanceKey == c.instanceKey {
return nil
}
}
return errors.NotValidf("refresh action key")
}
type executeOne struct {
ID string
Name string
Revision *int
Channel *string
Base RefreshBase
// instanceKey is a private unique key that we construct for CharmHub API
// asynchronous calls.
action action
instanceKey string
fields []string
}
// InstanceKey returns the underlying instance key.
func (c executeOne) InstanceKey() string {
return c.instanceKey
}
// Build a refresh request that can be past to the API.
func (c executeOne) Build() (transport.RefreshRequest, error) {
base, err := constructRefreshBase(c.Base)
if err != nil {
return transport.RefreshRequest{}, errors.Trace(err)
}
var id *string
if c.ID != "" {
id = &c.ID
}
var name *string
if c.Name != "" {
name = &c.Name
}
req := transport.RefreshRequest{
// Context is required here, even if it looks optional.
Context: []transport.RefreshRequestContext{},
Actions: []transport.RefreshRequestAction{{
Action: string(c.action),
InstanceKey: c.instanceKey,
ID: id,
Name: name,
Revision: c.Revision,
Channel: c.Channel,
Base: &base,
}},
Fields: c.fields,
}
return req, nil
}
// Ensure that the request back contains the information we requested.
func (c executeOne) Ensure(responses []transport.RefreshResponse) error {
for _, resp := range responses {
if resp.InstanceKey == c.instanceKey {
return nil
}
}
return errors.NotValidf("%v action key", string(c.action))
}
func (c executeOne) String() string {
var channel string
if c.Channel != nil {
channel = *c.Channel
}
var using string
if c.ID != "" {
using = fmt.Sprintf("ID %s", c.ID)
} else {
using = fmt.Sprintf("Name %s", c.Name)
}
var revision string
if c.Revision != nil {
revision = fmt.Sprintf(" with revision: %+v", c.Revision)
}
return fmt.Sprintf("Execute One (action: %s, instanceKey: %s): using %s%s channel %v and base %s",
c.action, c.instanceKey, using, revision, channel, c.Base)
}
type executeOneByRevision struct {
Name string
Revision *int
// ID is only used for download by revision
ID string
resourceRevisions []transport.RefreshResourceRevision
// instanceKey is a private unique key that we construct for CharmHub API
// asynchronous calls.
instanceKey string
action action
fields []string
}
// InstanceKey returns the underlying instance key.
func (c executeOneByRevision) InstanceKey() string {
return c.instanceKey
}
// Build a refresh request for sending to the API.
func (c executeOneByRevision) Build() (transport.RefreshRequest, error) {
var name, id *string
if c.Name != "" {
name = &c.Name
}
if c.ID != "" {
id = &c.ID
}
req := transport.RefreshRequest{
// Context is required here, even if it looks optional.
Context: []transport.RefreshRequestContext{},
Actions: []transport.RefreshRequestAction{{
Action: string(c.action),
InstanceKey: c.instanceKey,
Name: name,
ID: id,
Revision: c.Revision,
ResourceRevisions: c.resourceRevisions,
}},
Fields: []string{"bases", "download", "id", "revision", "version", "resources", "type"},
}
if len(c.fields) != 0 {
fieldSet := set.NewStrings(req.Fields...)
for _, field := range c.fields {
fieldSet.Add(field)
}
req.Fields = fieldSet.SortedValues()
}
return req, nil
}
// Ensure that the request back contains the information we requested.
func (c executeOneByRevision) Ensure(responses []transport.RefreshResponse) error {
for _, resp := range responses {
if resp.InstanceKey == c.instanceKey {
return nil
}
}
return errors.NotValidf("%v action key", string(c.action))
}
// String describes the underlying refresh config.
func (c executeOneByRevision) String() string {
var revision string
if c.Revision != nil {
revision = fmt.Sprintf(" with revision: %+v", c.Revision)
}
return fmt.Sprintf("Install One (action: %s, instanceKey: %s): using Name %s %s",
c.action, c.instanceKey, c.Name, revision)
}
type refreshMany struct {
Configs []RefreshConfig
}
// RefreshMany will compose many refresh configs.
func RefreshMany(configs ...RefreshConfig) RefreshConfig {
return refreshMany{
Configs: configs,
}
}
// Build a refresh request that can be past to the API.
func (c refreshMany) Build() (transport.RefreshRequest, error) {
if len(c.Configs) == 0 {
return transport.RefreshRequest{}, errors.NotFoundf("configs")
}
// Not all configs built here have a context, start out with an empty
// slice, so we do not call Refresh with a nil context.
// See executeOne.Build().
result := transport.RefreshRequest{
Context: []transport.RefreshRequestContext{},
}
for _, config := range c.Configs {
req, err := config.Build()
if err != nil {
return transport.RefreshRequest{}, errors.Trace(err)
}
result.Context = append(result.Context, req.Context...)
result.Actions = append(result.Actions, req.Actions...)
result.Fields = append(result.Fields, req.Fields...)
}
// Ensure that the required field list contains no duplicates
if len(result.Fields) != 0 {
result.Fields = set.NewStrings(result.Fields...).SortedValues()
}
return result, nil
}
// Ensure that the request back contains the information we requested.
func (c refreshMany) Ensure(responses []transport.RefreshResponse) error {
for _, config := range c.Configs {
if err := config.Ensure(responses); err != nil {
return errors.Annotatef(err, "missing response")
}
}
return nil
}
func (c refreshMany) String() string {
plans := make([]string, len(c.Configs))
for i, config := range c.Configs {
plans[i] = config.String()
}
return strings.Join(plans, "\n")
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package charmhub
import (
"context"
"net/http"
"github.com/juju/errors"
"github.com/kr/pretty"
corelogger "github.com/juju/juju/core/logger"
"github.com/juju/juju/core/trace"
"github.com/juju/juju/internal/charmhub/path"
"github.com/juju/juju/internal/charmhub/transport"
)
// resourcesClient defines a client for resources requests.
type resourcesClient struct {
path path.Path
client RESTClient
logger corelogger.Logger
}
// newResourcesClient creates a resourcesClient for requesting
func newResourcesClient(path path.Path, client RESTClient, logger corelogger.Logger) *resourcesClient {
return &resourcesClient{
path: path,
client: client,
logger: logger,
}
}
// ListResourceRevisions returns a slice of resource revisions for the provided
// resource of the given charm.
func (c *resourcesClient) ListResourceRevisions(ctx context.Context, charm, resource string) (_ []transport.ResourceRevision, err error) {
ctx, span := trace.Start(ctx, trace.NameFromFunc(), trace.WithAttributes(
trace.StringAttr("charmhub.charm", charm),
trace.StringAttr("charmhub.resource", resource),
trace.StringAttr("charmhub.request", "list-resource-revisions"),
))
defer func() {
span.RecordError(err)
span.End()
}()
isTraceEnabled := c.logger.IsLevelEnabled(corelogger.TRACE)
if isTraceEnabled {
c.logger.Tracef(context.TODO(), "ListResourceRevisions(%s, %s)", charm, resource)
}
var resp transport.ResourcesResponse
path, err := c.path.Join(charm, resource, "revisions")
if err != nil {
return nil, errors.Trace(err)
}
restResp, err := c.client.Get(ctx, path, &resp)
if err != nil {
return nil, errors.Trace(err)
}
if restResp.StatusCode == http.StatusNotFound {
return nil, errors.NotFoundf("%q for %q", charm, resource)
}
if isTraceEnabled {
c.logger.Tracef(context.TODO(), "ListResourceRevisions(%s, %s) unmarshalled: %s", charm, resource, pretty.Sprint(resp.Revisions))
}
return resp.Revisions, nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package transport
// The following contains all the common DTOs for a gathering information from
// a given store.
// Type represents the type of payload is expected from the API
type Type string
// Matches attempts to match a string to a given source.
func (t Type) Matches(o string) bool {
return string(t) == o
}
func (t Type) String() string {
return string(t)
}
const (
// CharmType represents the charm payload.
CharmType Type = "charm"
// BundleType represents the bundle payload.
BundleType Type = "bundle"
)
// Channel defines a unique permutation that corresponds to the track, risk
// and base. There can be multiple channels of the same track and risk, but
// with different bases.
type Channel struct {
Name string `json:"name"`
Base Base `json:"base"`
ReleasedAt string `json:"released-at"`
Risk string `json:"risk"`
Track string `json:"track"`
}
// Base is a typed tuple for identifying charms or bundles with a matching
// architecture, os and channel.
type Base struct {
Architecture string `json:"architecture"`
Name string `json:"name"`
Channel string `json:"channel"`
}
// Download represents the download structure from CharmHub.
// Elements not used by juju but not used are: "hash-sha3-384"
// and "hash-sha-512"
type Download struct {
HashSHA256 string `json:"hash-sha-256"`
HashSHA384 string `json:"hash-sha-384"`
Size int `json:"size"`
URL string `json:"url"`
}
// Entity holds the information about the charm or bundle, either contains the
// information about the charm or bundle or whom owns it.
type Entity struct {
Categories []Category `json:"categories"`
Charms []Charm `json:"contains-charms"`
Description string `json:"description"`
License string `json:"license"`
Publisher map[string]string `json:"publisher"`
Summary string `json:"summary"`
UsedBy []string `json:"used-by"`
StoreURL string `json:"store-url"`
}
// Category defines the category of a given charm or bundle. Akin to a tag.
type Category struct {
Featured bool `json:"featured"`
Name string `json:"name"`
}
// Charm is used to identify charms within a bundle.
type Charm struct {
Name string `json:"name"`
PackageID string `json:"package-id"`
StoreURL string `json:"store-url"`
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package transport
import (
"strings"
)
// APIError represents the error from the CharmHub API.
type APIError struct {
Code APIErrorCode `json:"code"`
Message string `json:"message"`
Extra APIErrorExtra `json:"extra"`
}
func (a APIError) Error() string {
return a.Message
}
// APIErrors represents a slice of APIError's
type APIErrors []APIError
func (a APIErrors) Error() string {
if len(a) > 0 {
var combined []string
for _, e := range a {
if err := e.Error(); err != "" {
combined = append(combined, err)
}
}
return strings.Join(combined, "\n")
}
return ""
}
// APIErrorExtra defines additional extra payloads from a given error. Think
// of this object as a series of suggestions to perform against the errorred
// API request, in the chance of the new request being successful.
type APIErrorExtra struct {
Releases []Release `json:"releases"`
DefaultBases []Base `json:"default-bases"`
}
// Release defines a set of suggested releases that might also work for the
// given request.
type Release struct {
Base Base `json:"base"`
Channel string `json:"channel"`
}
// APIErrorCode classifies the error code we get back from the API. This isn't
// tautological list of codes.
type APIErrorCode string
const (
ErrorCodeAccessByDownstreamStoreNotAllowed APIErrorCode = "access-by-downstream-store-not-allowed"
ErrorCodeAccessByRevisionNotAllowed APIErrorCode = "access-by-revision-not-allowed"
ErrorCodeAPIError APIErrorCode = "api-error"
ErrorCodeBadArgument APIErrorCode = "bad-argument"
ErrorCodeCharmResourceNotFound APIErrorCode = "charm-resource-not-found"
ErrorCodeChannelNotFound APIErrorCode = "channel-not-found"
ErrorCodeDeviceAuthorizationNeedsRefresh APIErrorCode = "device-authorization-needs-refresh"
ErrorCodeDeviceServiceDisallowed APIErrorCode = "device-service-disallowed"
ErrorCodeDuplicatedKey APIErrorCode = "duplicated-key"
ErrorCodeDuplicateFetchAssertionsKey APIErrorCode = "duplicate-fetch-assertions-key"
ErrorCodeEndpointDisabled APIErrorCode = "endpoint-disabled"
ErrorCodeIDNotFound APIErrorCode = "id-not-found"
ErrorCodeInconsistentData APIErrorCode = "inconsistent-data"
ErrorCodeInstanceKeyNotUnique APIErrorCode = "instance-key-not-unique"
ErrorCodeInvalidChannel APIErrorCode = "invalid-channel"
ErrorCodeInvalidCharmBase APIErrorCode = "invalid-charm-base"
ErrorCodeInvalidCharmResource APIErrorCode = "invalid-charm-resource"
ErrorCodeInvalidCohortKey APIErrorCode = "invalid-cohort-key"
ErrorCodeInvalidGrade APIErrorCode = "invalid-grade"
ErrorCodeInvalidMetric APIErrorCode = "invalid-metric"
ErrorCodeInvalidUnboundEmptySearch APIErrorCode = "invalid-unbound-empty-search"
ErrorCodeMacaroonPermissionRequired APIErrorCode = "macaroon-permission-required"
ErrorCodeMissingCharmBase APIErrorCode = "missing-charm-base"
ErrorCodeMissingContext APIErrorCode = "missing-context"
ErrorCodeMissingFetchAssertionsKey APIErrorCode = "missing-fetch-assertions-key"
ErrorCodeMissingHeader APIErrorCode = "missing-header"
ErrorCodeMissingInstanceKey APIErrorCode = "missing-instance-key"
ErrorCodeMissingKey APIErrorCode = "missing-key"
ErrorCodeNameNotFound APIErrorCode = "name-not-found"
ErrorCodeNotFound APIErrorCode = "not-found"
ErrorCodePaymentRequired APIErrorCode = "payment-required"
ErrorCodeRateLimitExceeded APIErrorCode = "rate-limit-exceeded"
ErrorCodeRefreshBundleNotSupported APIErrorCode = "refresh-bundle-not-supported"
ErrorCodeRemoteServiceUnavailable APIErrorCode = "remote-service-unavailable"
ErrorCodeResourceNotFound APIErrorCode = "resource-not-found"
ErrorCodeRevisionConflict APIErrorCode = "revision-conflict"
ErrorCodeRevisionNotFound APIErrorCode = "revision-not-found"
ErrorCodeServiceMisconfigured APIErrorCode = "service-misconfigured"
ErrorCodeStoreAuthorizationNeedsRefresh APIErrorCode = "store-authorization-needs-refresh"
ErrorCodeStoreDisallowed APIErrorCode = "store-disallowed"
ErrorCodeUnexpectedData APIErrorCode = "unexpected-data"
ErrorCodeUnknownGrade APIErrorCode = "unknown-grade"
ErrorCodeUserAuthenticationError APIErrorCode = "user-authentication-error"
ErrorCodeUserAuthorizationNeedsRefresh APIErrorCode = "user-authorization-needs-refresh"
// TODO 2021-04-08 hml
// Remove once Charmhub API returns ErrorCodeInvalidCharmBase
ErrorCodeInvalidCharmPlatform APIErrorCode = "invalid-charm-platform"
ErrorCodeMissingCharmPlatform APIErrorCode = "missing-charm-platform"
)
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package configschema
import (
"fmt"
"reflect"
"strings"
"github.com/juju/schema"
"gopkg.in/errgo.v1"
)
// What to do about reading content from paths?
// Could just have a load of client-side special cases.
// Fields holds a map from attribute name to
// information about that attribute.
type Fields map[string]Attr
type Attr struct {
// Description holds a human-readable description
// of the attribute.
Description string `json:"description"`
// Type holds the type of the attribute value.
Type FieldType `json:"type"`
// Group holds the group that the attribute belongs to.
// All attributes within a Fields that have the same Group
// attribute are considered to be part of the same group.
Group Group `json:"group"`
// Immutable specifies whether the attribute cannot
// be changed once set.
Immutable bool
// Mandatory specifies whether the attribute
// must be provided.
Mandatory bool `json:"mandatory,omitempty"`
// Secret specifies whether the attribute should be
// considered secret.
Secret bool `json:"is-secret,omitempty"`
// EnvVar holds the environment variable
// that will be used to obtain the default value
// if it isn't specified.
EnvVar string `json:"env-var,omitempty"`
// EnvVars holds additional environment
// variables to be used if the value in EnvVar is
// not available, from highest to lowest priority.
EnvVars []string `json:"env-vars,omitempty"`
// Example holds an example value for the attribute
// that can be used to produce a plausible-looking
// entry for the attribute without necessarily using
// it as a default value.
//
// TODO if the example holds some special values, use
// it as a template to generate initial random values
// (for example for admin-password) ?
Example interface{} `json:"example,omitempty"`
// Values holds the set of all possible values of the attribute.
Values []interface{} `json:"values,omitempty"`
// Documentation holds the longform documentation for this option.
// This may include markdown.
Documentation string
}
// Checker returns a checker that can be used to coerce values into the
// type of the attribute. Specifically, string is always supported for
// any checker type.
func (attr Attr) Checker() (schema.Checker, error) {
checker := checkers[attr.Type]
if checker == nil {
return nil, fmt.Errorf("invalid type %q", attr.Type)
}
if len(attr.Values) == 0 {
return checker, nil
}
return oneOfValues(checker, attr.Values)
}
// Group describes the grouping of attributes.
type Group string
// The following constants are the initially defined group values.
const (
// JujuGroup groups attributes defined by Juju that may
// not be specified by a user.
JujuGroup Group = "juju"
// EnvironGroup groups attributes that are defined across all
// possible Juju environments.
EnvironGroup Group = "environ"
// AccountGroup groups attributes that define a user account
// used by a provider.
AccountGroup Group = "account"
// ProviderGroup groups attributes defined by the provider
// that are not account credentials. This is also the default
// group.
ProviderGroup Group = ""
)
// FieldType describes the type of an attribute value.
type FieldType string
// The following constants are the possible type values.
// The "canonical Go type" is the type that the will be
// the result of a successful Coerce call.
const (
// Tstring represents a string type. Its canonical Go type is string.
Tstring FieldType = "string"
// Tbool represents a boolean type. Its canonical Go type is bool.
Tbool FieldType = "bool"
// Tint represents an integer type. Its canonical Go type is int.
Tint FieldType = "int"
// Tattrs represents an attribute map. Its canonical Go type is
// map[string]string.
Tattrs FieldType = "attrs"
// Tlist represents an list of strings. Its canonical Go type is []string
Tlist FieldType = "list"
)
var checkers = map[FieldType]schema.Checker{
Tstring: schema.String(),
Tbool: schema.Bool(),
Tint: schema.ForceInt(),
Tattrs: attrsChecker{},
Tlist: schema.List(schema.String()),
}
// Alternative possibilities to ValidationSchema to bear in mind for
// the future:
// func (s Fields) Checker() schema.Checker
// func (s Fields) Validate(value map[string]interface{}) (v map[string] interface{}, extra []string, err error)
// ValidationSchema returns values suitable for passing to
// schema.FieldMap to create a schema.Checker that will validate the given fields.
// It will return an error if the fields are invalid.
//
// The Defaults return value will contain entries for all non-mandatory
// attributes set to schema.Omit. It is the responsibility of the
// client to set any actual default values as required.
func (s Fields) ValidationSchema() (schema.Fields, schema.Defaults, error) {
fields := make(schema.Fields)
defaults := make(schema.Defaults)
for name, attr := range s {
path := []string{name}
checker, err := attr.Checker()
if err != nil {
return nil, nil, errgo.Notef(err, "%s", mkPath(path))
}
if !attr.Mandatory {
defaults[name] = schema.Omit
}
fields[name] = checker
}
return fields, defaults, nil
}
// oneOfValues returns a checker that coerces its value
// using the supplied checker, then checks that the
// resulting value is equal to one of the given values.
func oneOfValues(checker schema.Checker, values []interface{}) (schema.Checker, error) {
cvalues := make([]interface{}, len(values))
for i, v := range values {
cv, err := checker.Coerce(v, nil)
if err != nil {
return nil, fmt.Errorf("invalid enumerated value: %v", err)
}
cvalues[i] = cv
}
return oneOfValuesChecker{
vals: cvalues,
checker: checker,
}, nil
}
type oneOfValuesChecker struct {
vals []interface{}
checker schema.Checker
}
// Coerce implements schema.Checker.Coerce.
func (c oneOfValuesChecker) Coerce(v interface{}, path []string) (interface{}, error) {
v, err := c.checker.Coerce(v, path)
if err != nil {
return v, err
}
for _, allow := range c.vals {
if allow == v {
return v, nil
}
}
return nil, fmt.Errorf("%sexpected one of %v, got %#v", pathPrefix(path), c.vals, v)
}
type attrsChecker struct{}
var (
attrMapChecker = schema.Map(schema.String(), schema.String())
attrSliceChecker = schema.List(schema.String())
)
func (c attrsChecker) Coerce(v interface{}, path []string) (interface{}, error) {
// TODO consider allowing only the map variant.
switch reflect.TypeOf(v).Kind() {
case reflect.String:
s, err := schema.String().Coerce(v, path)
if err != nil {
return nil, errgo.Mask(err)
}
result, err := parseKeyValues(strings.Fields(s.(string)), true)
if err != nil {
return nil, fmt.Errorf("%s%v", pathPrefix(path), err)
}
return result, nil
case reflect.Slice:
slice0, err := attrSliceChecker.Coerce(v, path)
if err != nil {
return nil, errgo.Mask(err)
}
slice := slice0.([]interface{})
fields := make([]string, len(slice))
for i, f := range slice {
fields[i] = f.(string)
}
result, err := parseKeyValues(fields, true)
if err != nil {
return nil, fmt.Errorf("%s%v", pathPrefix(path), err)
}
return result, nil
case reflect.Map:
imap0, err := attrMapChecker.Coerce(v, path)
if err != nil {
return nil, errgo.Mask(err)
}
imap := imap0.(map[interface{}]interface{})
result := make(map[string]string)
for k, v := range imap {
result[k.(string)] = v.(string)
}
return result, nil
default:
return nil, errgo.Newf("%sunexpected type for value, got %T(%v)", pathPrefix(path), v, v)
}
}
// pathPrefix returns an error message prefix holding
// the concatenation of the path elements. If path
// starts with a ".", the dot is omitted.
func pathPrefix(path []string) string {
if p := mkPath(path); p != "" {
return p + ": "
}
return ""
}
// mkPath returns a string holding
// the concatenation of the path elements.
// If path starts with a ".", the dot is omitted.
func mkPath(path []string) string {
if len(path) == 0 {
return ""
}
if path[0] == "." {
return strings.Join(path[1:], "")
}
return strings.Join(path, "")
}
// ExampleYAML returns the fields formatted as a YAML
// example, with non-mandatory fields commented out,
// like the providers do currently.
func (s Fields) ExampleYAML() []byte {
panic("unimplemented")
}
// parseKeyValues parses the supplied string slice into a map mapping
// keys to values. Duplicate keys cause an error to be returned.
func parseKeyValues(src []string, allowEmptyValues bool) (map[string]string, error) {
results := map[string]string{}
for _, kv := range src {
parts := strings.SplitN(kv, "=", 2)
if len(parts) != 2 {
return nil, errgo.Newf(`expected "key=value", got %q`, kv)
}
key, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])
if len(key) == 0 || (!allowEmptyValues && len(value) == 0) {
return nil, errgo.Newf(`expected "key=value", got "%s=%s"`, key, value)
}
if _, exists := results[key]; exists {
return nil, errgo.Newf("key %q specified more than once", key)
}
results[key] = value
}
return results, nil
}
// Copyright 2015 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package configschema
import (
"bytes"
"fmt"
"go/doc"
"io"
"reflect"
"sort"
"strings"
"unicode"
"gopkg.in/yaml.v2"
)
// SampleYAML writes YAML output to w, indented by indent spaces
// that holds the attributes in attrs with descriptions found
// in the given fields. An entry for any attribute in fields not
// in attrs will be generated but commented out.
func SampleYAML(w io.Writer, indent int, attrs map[string]interface{}, fields Fields) error {
indentStr := strings.Repeat(" ", indent)
orderedFields := make(fieldsByGroup, 0, len(fields))
for name, f := range fields {
orderedFields = append(orderedFields, attrWithName{
name: name,
Attr: f,
})
}
sort.Sort(orderedFields)
for i, f := range orderedFields {
if i > 0 {
_, err := w.Write(nl)
if err != nil {
return err
}
}
writeSampleDescription(w, f.Attr, indentStr+"# ")
val, ok := attrs[f.name]
if ok {
fmt.Fprintf(w, "%s:", f.name)
err := indentVal(w, val, indentStr)
if err != nil {
return err
}
} else {
if f.Example != nil {
val = f.Example
} else {
val = sampleValue(f.Type)
}
fmt.Fprintf(w, "# %s:", f.name)
err := indentVal(w, val, indentStr+"# ")
if err != nil {
return err
}
}
}
return nil
}
const textWidth = 80
var (
space = []byte(" ")
nl = []byte("\n")
)
// writeSampleDescription writes the given attribute to w
// prefixed by the given indentation string.
func writeSampleDescription(w io.Writer, f Attr, indent string) {
previousText := false
// section marks the start of a new section of the comment;
// sections are separated with empty lines.
section := func() {
if previousText {
fmt.Fprintf(w, "%s\n", strings.TrimRightFunc(indent, unicode.IsSpace))
}
previousText = true
}
descr := strings.TrimSpace(f.Description)
if descr != "" {
section()
doc.ToText(w, descr, indent, " ", textWidth-len(indent))
}
vars := make([]string, 0, len(f.EnvVars)+1)
if f.EnvVar != "" {
vars = append(vars, "$"+f.EnvVar)
}
for _, v := range f.EnvVars {
vars = append(vars, "$"+v)
}
if len(vars) > 0 {
section()
fmt.Fprintf(w, "%sDefault value taken from %s.\n", indent, wordyList(vars))
}
attrText := ""
switch {
case f.Secret && f.Immutable:
attrText = "immutable and considered secret"
case f.Secret:
attrText = "considered secret"
case f.Immutable:
attrText = "immutable"
}
if attrText != "" {
section()
fmt.Fprintf(w, "%sThis attribute is %s.\n", indent, attrText)
}
section()
}
// wordyList formats the given slice in the form "x, y or z".
func wordyList(words []string) string {
if len(words) == 0 {
return ""
}
if len(words) == 1 {
return words[0]
}
return strings.Join(words[0:len(words)-1], ", ") + " or " + words[len(words)-1]
}
var groupPriority = map[Group]int{
ProviderGroup: 3,
AccountGroup: 2,
EnvironGroup: 1,
}
type attrWithName struct {
name string
Attr
}
type fieldsByGroup []attrWithName
func (f fieldsByGroup) Len() int {
return len(f)
}
func (f fieldsByGroup) Swap(i0, i1 int) {
f[i0], f[i1] = f[i1], f[i0]
}
func (f fieldsByGroup) Less(i0, i1 int) bool {
f0, f1 := &f[i0], &f[i1]
pri0, pri1 := groupPriority[f0.Group], groupPriority[f1.Group]
if pri0 != pri1 {
return pri0 > pri1
}
return f0.name < f1.name
}
// indentVal writes the given YAML-formatted value x to w and prefixing
// the second and subsequent lines with the given ident.
func indentVal(w io.Writer, x interface{}, indentStr string) error {
data, err := yaml.Marshal(x)
if err != nil {
panic(fmt.Errorf("cannot marshal YAML: %v", err))
}
if len(data) == 0 {
panic("YAML cannot marshal to empty string")
}
indent := []byte(indentStr + " ")
if canUseSameLine(x) {
_, err := w.Write(space)
if err != nil {
return err
}
} else {
_, err = w.Write(nl)
if err != nil {
return err
}
_, err = w.Write(indent)
if err != nil {
return err
}
}
data = bytes.TrimSuffix(data, nl)
lines := bytes.Split(data, nl)
for i, line := range lines {
if i > 0 {
_, err = w.Write(indent)
if err != nil {
return err
}
}
_, err = w.Write(line)
if err != nil {
return err
}
_, err = w.Write(nl)
if err != nil {
return err
}
}
return nil
}
func canUseSameLine(x interface{}) bool {
if x == nil {
return true
}
v := reflect.ValueOf(x)
switch v.Kind() {
case reflect.Map:
return v.Len() == 0
case reflect.Slice:
return v.Len() == 0
}
return true
}
func sampleValue(t FieldType) interface{} {
switch t {
case Tstring:
return ""
case Tbool:
return false
case Tint:
return 0
case Tattrs:
return map[string]string{
"example": "value",
}
case Tlist:
return []string{"example"}
default:
panic(fmt.Errorf("unknown schema type %q", t))
}
}
// Copyright 2024 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package errors
// ConstError is a type for representing static const errors that are best
// composed as strings.
//
// They're great for package level errors where a package needs to indicate that
// a certain type of problem to the caller. Const errors are immutable and
// always comparable.
type ConstError string
// Error returns the constant error string encapsulated by [ConstError].
//
// Error also implements the [error] interface.
func (e ConstError) Error() string {
return string(e)
}
// Copyright 2024 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package errors
// annotated is a wrapping error type that allows an already established error
// to be annotated with another error so that the new annotated error now
// satisfies both the original error and the annotation error with respect to
// Is() and As().
//
// annotated only implements stderrors.Unwrap() []error returning both errors
// involved in the annotation. This means that annotated errors are not
// injecting the new annotation into the error chain and calls to errors.Unwrap
// will never return the annotation error.
type annotated struct {
error
annotation error
}
// Error provides a way to enrich an already existing Go error and inject new
// information into the errors chain. Error and its operations are all immutable
// to the encapsulated error value.
type Error interface {
// error is the error being wrapped.
error
// Add will introduce a new error into the error chain so that subsequent
// calls to As() and Is() will be satisfied for this additional error. The
// error being added here will not appear in the error output from Error().
// Unwrap() does not unwrap errors that have been added.
Add(err error) Error
// Unwrap returns the underlying error being enriched by this interface.
Unwrap() error
}
// link is an implementation of [Error] and represents a transparent wrapper
// around the top most error in a chain of errors. It provides a way to wrap an
// existing error and offer functions to further enrich the error chain with
// new information.
//
// link wants to be completely transparent to the error chain. All std errors
// introspection on link works on the underlying error being wrapped.
type link struct {
// error is the underlying error being wrapped by link. We use error in
// composition here so that the errors implementation of the interface makes
// link conform as an error type.
error
}
// Add will introduce a new error into the error chain so that the resultant
// error satisfies [Is] and [As]. Implements [Errors.Add].
func (l link) Add(err error) Error {
if err == nil {
return l
}
return link{annotated{l.error, err}}
}
// Unwrap implements std errors Unwrap interface by returning the underlying
// error being wrapped by link.
func (l link) Unwrap() error {
return l.error
}
// Unwrap implements std errors Unwrap interface by returning both the
// underlying error and that of the annotation error in a slice.
func (a annotated) Unwrap() []error {
return []error{a.error, a.annotation}
}
// Copyright 2024 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package errors
import (
stderrors "errors"
"fmt"
)
// As finds the first error in err's tree that matches target, and if one is
// found, sets target to that error value and returns true. Otherwise, it
// returns false.
//
// The tree consists of err itself, followed by the errors obtained by
// repeatedly calling its Unwrap() error or Unwrap() []error method. When err
// wraps multiple errors, As examines err followed by a depth-first traversal
// of its children.
//
// An error matches target if the error's concrete value is assignable to the
// value pointed to by target, or if the error has a method As(interface{}) bool
// such that As(target) returns true. In the latter case, the As method is
// responsible for setting target.
//
// An error type might provide an As method so it can be treated as if it were a
// different error type.
//
// As panics if target is not a non-nil pointer to either a type that implements
// error, or to any interface type.
//
// As is a proxy for [pkg/errors.As] and does not alter the semantics offered by
// this function.
func As(err error, target any) bool {
return stderrors.As(err, target)
}
// AsType is a convenience method for checking and getting an error from within
// a chain that is of type T. If no error is found of type T in the chain the
// zero value of T is returned with false. If an error in the chain implements
// As(any) bool then it's As method will be called if it's type is not of type T.
// AsType finds the first error in err's chain that is assignable to type T, and
// if a match is found, returns that error value and true. Otherwise, it returns
// T's zero value and false.
//
// AsType is equivalent to errors.As, but uses a type parameter and returns
// the target, to avoid having to define a variable before the call. For
// example, callers can replace this:
//
// var pathError *fs.PathError
// if errors.As(err, &pathError) {
// fmt.Println("Failed at path:", pathError.Path)
// }
//
// With:
//
// if pathError, ok := errors.AsType[*fs.PathError](err); ok {
// fmt.Println("Failed at path:", pathError.Path)
// }
func AsType[T error](err error) (T, bool) {
var zero T
as := As(err, &zero)
return zero, as
}
// Errorf implements a straight through proxy for [pkg/fmt.Errorf]. The one
// change this function signature makes is that a type of [Error] is returned so
// that the resultant error can be further annotated.
func Errorf(format string, a ...any) Error {
return link{
newFrameTracer(fmt.Errorf(format, a...), 1),
}
}
// HasType is a function wrapper around AsType dropping the return value T
// from AsType() making a function that can be used like:
//
// return HasType[*MyError](err)
//
// Or
//
// if HasType[*MyError](err) {}
func HasType[T error](err error) bool {
_, rval := AsType[T](err)
return rval
}
// Is reports whether any error in err's tree matches target.
//
// The tree consists of err itself, followed by the errors obtained by repeatedly
// calling its Unwrap() error or Unwrap() []error method. When err wraps multiple
// errors, Is examines err followed by a depth-first traversal of its children.
//
// An error is considered to match a target if it is equal to that target or if
// it implements a method Is(error) bool such that Is(target) returns true.
//
// An error type might provide an Is method so it can be treated as equivalent
// to an existing error. For example, if MyError defines
//
// func (m MyError) Is(target error) bool { return target == fs.ErrExist }
//
// then Is(MyError{}, fs.ErrExist) returns true. See [syscall.Errno.Is] for
// an example in the standard library. An Is method should only shallowly
// compare err and the target and not call [Unwrap] on either.
//
// Is is a proxy for [pkg/errors.Is] and does not alter the semantics offered by
// this function.
func Is(err, target error) bool {
return stderrors.Is(err, target)
}
// IsOneOf reports whether any error in err's tree matches one of the target
// errors. This check works on a first match effort in that the first target
// error discovered reports back true with no further errors.
//
// If targets is empty then this func will always return false.
//
// IsOneOf is the same as writing Is(err, type1) || Is(err, type2) || Is(err, type3)
func IsOneOf(err error, targets ...error) bool {
for _, target := range targets {
if stderrors.Is(err, target) {
return true
}
}
return false
}
// Join returns an error that wraps the given errors.
// Any nil error values are discarded.
// Join returns nil if every value in errs is nil.
// The error formats as the concatenation of the strings obtained
// by calling the Error method of each element of errs, with a newline
// between each string.
//
// A non-nil error returned by Join implements the Unwrap() []error method.
//
// Join is a proxy for [pkg/errors.Join] with the difference being that the
// resultant error is of type [Error]
func Join(errs ...error) Error {
if err := stderrors.Join(errs...); err != nil {
return link{err}
}
return nil
}
// New returns an error that formats as the given text. Each call to New returns
// a distinct error value even if the text is identical.
//
// New is a proxy for [pkg/errors.New]. All errors returned from New are traced.
func New(text string) Error {
return link{
newFrameTracer(stderrors.New(text), 1),
}
}
// Unwrap returns the result of calling the Unwrap method on err, if err's type
// contains an Unwrap method returning error. Otherwise, Unwrap returns nil.
//
// Unwrap only calls a method of the form "Unwrap() error". In particular Unwrap
// does not unwrap errors returned by [Join]
func Unwrap(err error) error {
return stderrors.Unwrap(err)
}
// Copyright 2024 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package errors
import (
stderrors "errors"
"fmt"
"runtime"
"strings"
jujuerrors "github.com/juju/errors"
)
// Traced represents an error that has had its location recorded for where the
// error was raised. This is useful for recording error stacks for errors that
// have not been annotated with contextual information as they flow through a
// programs stack.
type Traced interface {
error
// Location returns the path-qualified function name where the error was
// created and the line number
Location() (function string, line int)
}
// frameTracer is an implementation of [Traced] that records the program counter
// of where an error was traced. The work of resolving the trace to a location
// is deferred till a call to [frameTracer.Location] is made.
type frameTracer struct {
// error is the wrapped error being recorded.
error
// pc is the program counter for the call site of the traced error.
pc uintptr
}
// Capture is responsible for recording the location where this function was
// called from in the error supplied. This allows errors that are being passed
// up through a stack to have extra information attached to them at call sites.
//
// Captured errors should only be used in scenario's where adding extra context
// to an error is not necessary or can't be done.
//
// [ErrorStack] can be used to gather all of the capture sites about an error.
func Capture(err error) Traced {
if err == nil {
return nil
}
return newFrameTracer(err, 1)
}
// ErrorStack recursively unwinds an error chain by repeatedly calling
// [stderrors.Unwrap] until no new errors are returned. A new line is outputted
// to the resultant string for each error in the chain. If an error in the chain
// has been traced the errors location information will also be outputted with
// the error message.
func ErrorStack(err error) string {
chain := []string{}
for err != nil {
switch x := err.(type) {
case link:
case Traced:
file, line := x.Location()
chain = append(chain, fmt.Sprintf(
"%s:%d: %s", file, line, err.Error()),
)
// Special case to handle errors that have been traced with juju/errors.
// This can be deleted in good time once we have removed all uses of
// this.
case jujuerrors.Locationer:
file, line := x.Location()
chain = append(chain, fmt.Sprintf(
"%s:%d: %s", file, line, err.Error()),
)
default:
chain = append(chain, err.Error())
}
err = stderrors.Unwrap(err)
}
return strings.Join(chain, "\n")
}
// Location returns the path-qualified function name where the error was created
// and the line number.
func (f frameTracer) Location() (string, int) {
frame, _ := runtime.CallersFrames([]uintptr{f.pc}).Next()
return frame.Function, frame.Line
}
// newFrameTracer is responsible for constructing a new [frameTracer] error
// recording the program counter at call site skip.
func newFrameTracer(err error, skip int) frameTracer {
pcs := make([]uintptr, 1)
n := runtime.Callers(skip+2, pcs)
pc := uintptr(0)
if n != 0 {
pc = pcs[0]
}
return frameTracer{
error: err,
pc: pc,
}
}
// Unwrap returns the underlying error that was wrapped by the frameTracer.
func (f frameTracer) Unwrap() error {
return f.error
}
// Copyright 2014 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package featureflag
import (
"fmt"
"os"
"strings"
"sync"
"github.com/juju/collections/set"
)
var (
flaglock sync.Mutex // seralises access to flags
flags = set.NewStrings()
)
// SetFlagsFromEnvironment populates the global set from the environment.
// White space between flags is ignored, and the flags are lower cased. Under
// normal circumstances this method is only ever called from the init
// function.
//
// NOTE: since SetFlagsFromEnvironment should only ever called during the
// program startup (or tests), and it is serialized by the runtime, we don't
// use any mutux when setting the flag set. Should this change in the future,
// a mutex should be used.
func SetFlagsFromEnvironment(envVarNames ...string) {
rawValues := make([]string, len(envVarNames))
for i, envVarName := range envVarNames {
rawValues[i] = os.Getenv(envVarName)
}
setFlags(rawValues...)
}
// setFlags populates the global set using string(s) passed to it containing the
// flags.
func setFlags(rawValues ...string) {
flaglock.Lock()
defer flaglock.Unlock()
flags = set.NewStrings()
for _, values := range rawValues {
values = strings.ToLower(values)
for _, flag := range strings.Split(values, ",") {
if flag = strings.TrimSpace(flag); flag != "" {
flags.Add(flag)
}
}
}
}
// Enabled is used to determine if a particular feature flag is enabled for
// the process.
func Enabled(flag string) bool {
flaglock.Lock()
defer flaglock.Unlock()
flag = strings.TrimSpace(strings.ToLower(flag))
if flag == "" {
// The empty feature is always enabled.
return true
}
return flags.Contains(flag)
}
// All returns all the current feature flags.
func All() []string {
flaglock.Lock()
defer flaglock.Unlock()
return flags.Values()
}
// AsEnvironmentValue returns a single string suitable to be assigned into an
// environment value that will be parsed into the same set of values currently
// set.
func AsEnvironmentValue() string {
flaglock.Lock()
defer flaglock.Unlock()
return strings.Join(flags.SortedValues(), ",")
}
// String provides a nice human readable string for the feature flags that
// are set.
func String() string {
flaglock.Lock()
defer flaglock.Unlock()
var quoted []string
for _, flag := range flags.SortedValues() {
quoted = append(quoted, fmt.Sprintf("%q", flag))
}
return strings.Join(quoted, ", ")
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package http
import (
"context"
"crypto/tls"
"crypto/x509"
"net/http"
"net/http/httptrace"
"net/http/httputil"
"time"
"github.com/juju/clock"
"github.com/juju/errors"
corehttp "github.com/juju/juju/core/http"
"github.com/juju/juju/core/logger"
internallogger "github.com/juju/juju/internal/logger"
)
// NOTE: Once we refactor the juju tests enough that they do not use
// a RoundTripper on the DefaultTransport, NewClient can always return
// a Client with a locally constructed Transport via NewHttpTLSTransport
// and init() will no longer be needed.
//
// https://bugs.launchpad.net/juju/+bug/1888888
//
// TODO (stickupkid) This is terrible, I'm not kidding! This isn't yours to
// touch!
func init() {
defaultTransport := http.DefaultTransport.(*http.Transport)
// Call the DialContextMiddleware for the DefaultTransport to
// facilitate testing use of allowOutgoingAccess.
defaultTransport = DialContextMiddleware(NewLocalDialBreaker(true))(defaultTransport)
// Call our own proxy function with the DefaultTransport.
http.DefaultTransport = ProxyMiddleware(defaultTransport)
}
// Option to be passed into the transport construction to customize the
// default transport.
type Option func(*options)
type options struct {
caCertificates []string
cookieJar http.CookieJar
disableKeepAlives bool
skipHostnameVerification bool
tlsHandshakeTimeout time.Duration
middlewares []TransportMiddleware
httpClient *http.Client
logger logger.Logger
requestRecorder RequestRecorder
retryPolicy *RetryPolicy
}
// WithCACertificates contains Authority certificates to be used to validate
// certificates of cloud infrastructure components.
// The contents are Base64 encoded x.509 certs.
func WithCACertificates(value ...string) Option {
return func(opt *options) {
opt.caCertificates = value
}
}
// WithCookieJar is used to insert relevant cookies into every
// outbound Request and is updated with the cookie values
// of every inbound Response. The Jar is consulted for every
// redirect that the Client follows.
//
// If Jar is nil, cookies are only sent if they are explicitly
// set on the Request.
func WithCookieJar(value http.CookieJar) Option {
return func(opt *options) {
opt.cookieJar = value
}
}
// WithDisableKeepAlives will disable HTTP keep alives, not TCP keep alives.
// Disabling HTTP keep alives will only use the connection to the server for a
// single HTTP request, slowing down subsequent requests and creating a lot of
// garbage for the collector.
func WithDisableKeepAlives(value bool) Option {
return func(opt *options) {
opt.disableKeepAlives = value
}
}
// WithSkipHostnameVerification will skip hostname verification on the TLS/SSL
// certificates.
func WithSkipHostnameVerification(value bool) Option {
return func(opt *options) {
opt.skipHostnameVerification = value
}
}
// WithTLSHandshakeTimeout will modify how long a TLS handshake should take.
// Setting the value to zero will mean that no timeout will occur.
func WithTLSHandshakeTimeout(value time.Duration) Option {
return func(opt *options) {
opt.tlsHandshakeTimeout = value
}
}
// WithTransportMiddlewares allows the wrapping or modification of the existing
// transport for a given client.
// In an ideal world, all transports should be cloned to prevent the
// modification of an existing client transport.
func WithTransportMiddlewares(middlewares ...TransportMiddleware) Option {
return func(opt *options) {
opt.middlewares = middlewares
}
}
// WithHTTPClient allows to define the http.Client to use.
func WithHTTPClient(value *http.Client) Option {
return func(opt *options) {
opt.httpClient = value
}
}
// WithLogger defines a logger to use with the client.
//
// It is recommended that you create a child logger to allow disabling of the
// trace logging to prevent log flooding.
func WithLogger(value logger.Logger) Option {
return func(opt *options) {
opt.logger = value
}
}
// WithRequestRecorder specifies a RequestRecorder used for recording outgoing
// http requests regardless of whether they succeeded or failed.
func WithRequestRecorder(value RequestRecorder) Option {
return func(opt *options) {
opt.requestRecorder = value
}
}
// WithRequestRetrier specifies a request retrying policy.
func WithRequestRetrier(value RetryPolicy) Option {
return func(opt *options) {
opt.retryPolicy = &value
}
}
// Create a options instance with default values.
func newOptions() *options {
// In this case, use a default http.Client.
// Ideally we should always use the NewHTTPTLSTransport,
// however test suites such as JujuConnSuite and some facade
// tests rely on settings to the http.DefaultTransport for
// tests to run with different protocol scheme such as "test"
// and some replace the RoundTripper to answer test scenarios.
//
// https://bugs.launchpad.net/juju/+bug/1888888
defaultCopy := *http.DefaultClient
return &options{
tlsHandshakeTimeout: 20 * time.Second,
skipHostnameVerification: false,
middlewares: []TransportMiddleware{
DialContextMiddleware(NewLocalDialBreaker(true)),
FileProtocolMiddleware,
ProxyMiddleware,
},
httpClient: &defaultCopy,
logger: internallogger.GetLogger("http"),
}
}
// Client represents an http client.
type Client struct {
corehttp.HTTPClient
logger logger.Logger
}
// NewClient returns a new juju http client defined
// by the given config.
func NewClient(options ...Option) *Client {
opts := newOptions()
for _, option := range options {
option(opts)
}
client := opts.httpClient
transport := NewHTTPTLSTransport(TransportConfig{
DisableKeepAlives: opts.disableKeepAlives,
TLSHandshakeTimeout: opts.tlsHandshakeTimeout,
Middlewares: opts.middlewares,
})
switch {
case len(opts.caCertificates) > 0:
transport = transportWithCerts(transport, opts.caCertificates, opts.skipHostnameVerification)
case opts.skipHostnameVerification:
transport = transportWithSkipVerify(transport, opts.skipHostnameVerification)
}
if opts.requestRecorder != nil {
client.Transport = roundTripRecorder{
requestRecorder: opts.requestRecorder,
wrappedRoundTripper: transport,
}
} else {
client.Transport = transport
}
// Ensure we add the retry middleware after request recorder if there is
// one, to ensure that we get all the logging at the right level.
if opts.retryPolicy != nil {
client.Transport = makeRetryMiddleware(
client.Transport,
*opts.retryPolicy,
clock.WallClock,
opts.logger,
)
}
if opts.cookieJar != nil {
client.Jar = opts.cookieJar
}
return &Client{
HTTPClient: client,
logger: opts.logger,
}
}
func transportWithSkipVerify(defaultTransport *http.Transport, skipHostnameVerify bool) *http.Transport {
transport := defaultTransport
// We know that the DefaultHTTPTransport doesn't create a tls.Config here
// so we can safely do that here.
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: skipHostnameVerify,
}
// We're creating a new tls.Config, HTTP/2 requests will not work, force the
// client to create a HTTP/2 requests.
transport.ForceAttemptHTTP2 = true
return transport
}
func transportWithCerts(defaultTransport *http.Transport, caCerts []string, skipHostnameVerify bool) *http.Transport {
pool := x509.NewCertPool()
for _, cert := range caCerts {
pool.AppendCertsFromPEM([]byte(cert))
}
tlsConfig := SecureTLSConfig()
tlsConfig.RootCAs = pool
tlsConfig.InsecureSkipVerify = skipHostnameVerify
transport := defaultTransport
transport.TLSClientConfig = tlsConfig
// We're creating a new tls.Config, HTTP/2 requests will not work, force the
// client to create a HTTP/2 requests.
transport.ForceAttemptHTTP2 = true
return transport
}
// Client returns the underlying http.Client. Used in testing
// only.
func (c *Client) Client() *http.Client {
return c.HTTPClient.(*http.Client)
}
// Get issues a GET to the specified URL. It mimics the net/http Get,
// but allows for enhanced debugging.
//
// When err is nil, resp always contains a non-nil resp.Body.
// Caller should close resp.Body when done reading from it.
func (c *Client) Get(ctx context.Context, path string) (resp *http.Response, err error) {
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, errors.Trace(err)
}
if err := c.traceRequest(req, path); err != nil {
// No need to fail, but let user know we're
// not tracing the client GET.
err = errors.Annotatef(err, "setup of http client tracing failed")
c.logger.Tracef(context.TODO(), "%s", err)
}
return c.Do(req)
}
// traceRequest enabled debugging on the http request if
// log level for ths package is set to Trace. Otherwise it
// returns with no change to the request.
func (c *Client) traceRequest(req *http.Request, url string) error {
if !c.logger.IsLevelEnabled(logger.TRACE) {
return nil
}
dump, err := httputil.DumpRequestOut(req, true)
if err != nil {
return errors.Trace(err)
}
c.logger.Tracef(context.TODO(), "request for %q: %q", url, dump)
trace := &httptrace.ClientTrace{
DNSStart: func(info httptrace.DNSStartInfo) {
c.logger.Tracef(context.TODO(), "%s DNS Start: %q", url, info.Host)
},
DNSDone: func(dnsInfo httptrace.DNSDoneInfo) {
c.logger.Tracef(context.TODO(), "%s DNS Info: %+v\n", url, dnsInfo)
},
ConnectDone: func(network, addr string, err error) {
c.logger.Tracef(context.TODO(), "%s Connection Done: network %q, addr %q, err %q", url, network, addr, err)
},
GetConn: func(hostPort string) {
c.logger.Tracef(context.TODO(), "%s Get Conn: %q", url, hostPort)
},
GotConn: func(connInfo httptrace.GotConnInfo) {
c.logger.Tracef(context.TODO(), "%s Got Conn: %+v", url, connInfo)
},
TLSHandshakeStart: func() {
c.logger.Tracef(context.TODO(), "%s TLS Handshake Start", url)
},
TLSHandshakeDone: func(st tls.ConnectionState, err error) {
c.logger.Tracef(context.TODO(), "%s TLS Handshake Done: complete %t, verified chains %d, server name %q",
url,
st.HandshakeComplete,
len(st.VerifiedChains),
st.ServerName)
},
}
*req = *req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
return nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package http
import (
"encoding/base64"
"fmt"
"net/http"
"strings"
)
// BasicAuthHeader creates a header that contains just the "Authorization"
// entry. The implementation was originally taked from net/http but this is
// needed externally from the http request object in order to use this with
// our websockets. See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt
// "To receive authorization, the client sends the userid and password,
// separated by a single colon (":") character, within a base64 encoded string
// in the credentials."
func BasicAuthHeader(username, password string) http.Header {
auth := username + ":" + password
encoded := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
return http.Header{
"Authorization": {encoded},
}
}
// ParseBasicAuth attempts to find an Authorization header in the supplied
// http.Header and if found parses it as a Basic header. See 2 (end of page 4)
// http://www.ietf.org/rfc/rfc2617.txt "To receive authorization, the client
// sends the userid and password, separated by a single colon (":") character,
// within a base64 encoded string in the credentials."
func ParseBasicAuthHeader(h http.Header) (userid, password string, err error) {
parts := strings.Fields(h.Get("Authorization"))
if len(parts) != 2 || parts[0] != "Basic" {
return "", "", fmt.Errorf("invalid or missing HTTP auth header")
}
// Challenge is a base64-encoded "tag:pass" string.
// See RFC 2617, Section 2.
challenge, err := base64.StdEncoding.DecodeString(parts[1])
if err != nil {
return "", "", fmt.Errorf("invalid HTTP auth encoding")
}
tokens := strings.SplitN(string(challenge), ":", 2)
if len(tokens) != 2 {
return "", "", fmt.Errorf("invalid HTTP auth contents")
}
return tokens[0], tokens[1], nil
}
// Copyright 2021 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package http
import (
"context"
"net"
"net/http"
"net/url"
"strconv"
"time"
"github.com/juju/clock"
"github.com/juju/errors"
"github.com/juju/retry"
"golang.org/x/net/http/httpproxy"
"github.com/juju/juju/core/logger"
internallogger "github.com/juju/juju/internal/logger"
)
// FileProtocolMiddleware registers support for file:// URLs on the given transport.
func FileProtocolMiddleware(transport *http.Transport) *http.Transport {
transport.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
return transport
}
// DialBreaker replicates a highly specialized CircuitBreaker pattern, which
// takes into account the current address.
type DialBreaker interface {
// Allowed checks to see if a given address is allowed.
Allowed(string) bool
// Trip will cause the DialBreaker to change the breaker state
Trip()
}
func isLocalAddr(addr string) bool {
host, _, err := net.SplitHostPort(addr)
if err != nil {
return false
}
return host == "localhost" || net.ParseIP(host).IsLoopback()
}
// DialContextMiddleware patches the default HTTP transport so
// that it fails when an attempt is made to dial a non-local
// host.
func DialContextMiddleware(breaker DialBreaker) TransportMiddleware {
return func(transport *http.Transport) *http.Transport {
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
if !breaker.Allowed(addr) {
return nil, errors.Errorf("access to address %q not allowed", addr)
}
return dialer.DialContext(ctx, network, addr)
}
return transport
}
}
// LocalDialBreaker defines a DialBreaker that when tripped only allows local
// dials, anything else is prevented.
type LocalDialBreaker struct {
allowOutgoingAccess bool
}
// NewLocalDialBreaker creates a new LocalDialBreaker with a default value.
func NewLocalDialBreaker(allowOutgoingAccess bool) *LocalDialBreaker {
return &LocalDialBreaker{
allowOutgoingAccess: allowOutgoingAccess,
}
}
// Allowed checks to see if a dial is allowed to happen, or returns an error
// stating why.
func (b *LocalDialBreaker) Allowed(addr string) bool {
if b.allowOutgoingAccess {
return true
}
// If we're not allowing outgoing access, then only local addresses are
// allowed to be dialed. Check for local only addresses.
return isLocalAddr(addr)
}
// Trip inverts the local state of the DialBreaker.
func (b *LocalDialBreaker) Trip() {
b.allowOutgoingAccess = !b.allowOutgoingAccess
}
// ProxyMiddleware adds a Proxy to the given transport. This implementation
// uses the http.ProxyFromEnvironment.
func ProxyMiddleware(transport *http.Transport) *http.Transport {
transport.Proxy = getProxy
return transport
}
var midLogger = internallogger.GetLogger("juju.http.middleware", "http")
func getProxy(req *http.Request) (*url.URL, error) {
// Get proxy config new for each client. Go will cache the proxy
// settings for a process, this is a problem for long running programs.
// And caused changes in proxy settings via model-config not to
// be used.
cfg := httpproxy.FromEnvironment()
midLogger.Tracef(context.TODO(), "proxy config http(%s), https(%s), no-proxy(%s)",
cfg.HTTPProxy, cfg.HTTPSProxy, cfg.NoProxy)
return cfg.ProxyFunc()(req.URL)
}
// ForceAttemptHTTP2Middleware forces a HTTP/2 connection if a non-zero
// Dial, DialTLS, or DialContext func or TLSClientConfig is provided to the
// Transport. Using any of these will render HTTP/2 disabled, so force the
// client to use it for requests.
func ForceAttemptHTTP2Middleware(transport *http.Transport) *http.Transport {
transport.ForceAttemptHTTP2 = true
return transport
}
// RequestRecorder is implemented by types that can record information about
// successful and unsuccessful http requests.
type RequestRecorder interface {
// Record an outgoing request which produced an http.Response.
Record(method string, url *url.URL, res *http.Response, rtt time.Duration)
// Record an outgoing request which returned back an error.
RecordError(method string, url *url.URL, err error)
}
// RoundTripper allows us to generate mocks for the http.RoundTripper because
// we're already in a http package.
type RoundTripper = http.RoundTripper
type roundTripRecorder struct {
requestRecorder RequestRecorder
wrappedRoundTripper http.RoundTripper
}
// RoundTrip implements http.RoundTripper. If delegates the request to the
// wrapped RoundTripper and invokes the appropriate RequestRecorder methods
// depending on the outcome.
func (lr roundTripRecorder) RoundTrip(req *http.Request) (*http.Response, error) {
start := time.Now()
res, err := lr.wrappedRoundTripper.RoundTrip(req)
rtt := time.Since(start)
if err != nil {
lr.requestRecorder.RecordError(req.Method, req.URL, err)
} else {
lr.requestRecorder.Record(req.Method, req.URL, res, rtt)
}
return res, err
}
// RetryMiddleware allows retrying of certain retryable http errors.
// This only handles very specific status codes, ones that are deemed retryable:
//
// - 502 Bad Gateway
// - 503 Service Unavailable
// - 504 Gateway Timeout
type retryMiddleware struct {
policy RetryPolicy
wrappedRoundTripper http.RoundTripper
clock clock.Clock
logger logger.Logger
}
type RetryPolicy struct {
Delay time.Duration
MaxDelay time.Duration
Attempts int
}
// Validate validates the RetryPolicy for any issues.
func (p RetryPolicy) Validate() error {
if p.Attempts < 1 {
return errors.Errorf("expected at least one attempt")
}
if p.MaxDelay < 1 {
return errors.Errorf("expected max delay to be a valid time")
}
return nil
}
// makeRetryMiddleware creates a retry transport.
func makeRetryMiddleware(transport http.RoundTripper, policy RetryPolicy, clock clock.Clock, logger logger.Logger) http.RoundTripper {
return retryMiddleware{
policy: policy,
wrappedRoundTripper: transport,
clock: clock,
logger: logger,
}
}
type retryableErr struct{}
func (retryableErr) Error() string {
return "retryable error"
}
// RoundTrip defines a strategy for handling retries based on the status code.
func (m retryMiddleware) RoundTrip(req *http.Request) (*http.Response, error) {
var (
res *http.Response
backOffErr error
)
err := retry.Call(retry.CallArgs{
Clock: m.clock,
Func: func() error {
if err := req.Context().Err(); err != nil {
return err
}
if backOffErr != nil {
return backOffErr
}
var retryable bool
var err error
res, retryable, err = m.roundTrip(req)
if err != nil {
return err
}
if retryable {
return retryableErr{}
}
return nil
},
IsFatalError: func(err error) bool {
// Work out if it's not a retryable error.
_, ok := errors.Cause(err).(retryableErr)
return !ok
},
Attempts: m.policy.Attempts,
Delay: m.policy.Delay,
BackoffFunc: func(delay time.Duration, attempts int) time.Duration {
var duration time.Duration
duration, backOffErr = m.defaultBackoff(res, delay)
return duration
},
})
return res, err
}
func (m retryMiddleware) roundTrip(req *http.Request) (*http.Response, bool, error) {
res, err := m.wrappedRoundTripper.RoundTrip(req)
if err != nil {
return nil, false, err
}
switch res.StatusCode {
case http.StatusBadGateway, http.StatusGatewayTimeout:
// The request should be retryable.
fallthrough
case http.StatusServiceUnavailable, http.StatusTooManyRequests:
// The request should be retryable, but additionally should contain
// a potential Retry-After header.
return res, true, nil
default:
// Don't handle any of the following status codes.
return res, false, nil
}
}
// defaultBackoff attempts to workout a good backoff strategy based on the
// backoff policy or the status code from the response.
//
// RFC7231 states that the retry-after header can look like the following:
//
// - Retry-After: <http-date>
// - Retry-After: <delay-seconds>
func (m retryMiddleware) defaultBackoff(resp *http.Response, backoff time.Duration) (time.Duration, error) {
if header := resp.Header.Get("Retry-After"); header != "" {
// Attempt to parse the header from the request.
//
// Check for delay in seconds first, before checking for a http-date
seconds, err := strconv.ParseInt(header, 10, 64)
if err == nil {
return m.clampBackoff(time.Second * time.Duration(seconds))
}
// Check for http-date.
date, err := time.Parse(time.RFC1123, header)
if err == nil {
return m.clampBackoff(m.clock.Now().Sub(date))
}
url := ""
if resp.Request != nil {
url = resp.Request.URL.String()
}
m.logger.Errorf(context.TODO(), "unable to parse Retry-After header %s from %s", header, url)
}
return m.clampBackoff(backoff)
}
func (m retryMiddleware) clampBackoff(duration time.Duration) (time.Duration, error) {
if m.policy.MaxDelay > 0 && duration > m.policy.MaxDelay {
future := m.clock.Now().Add(duration)
return duration, errors.Errorf("API request retry is not accepting further requests until %s", future.Format(time.RFC3339))
}
return duration, nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package http
import (
"crypto/tls"
"net/http"
"time"
)
// TransportMiddleware represents a way to add an adapter to the existing transport.
type TransportMiddleware func(*http.Transport) *http.Transport
// TransportConfig holds the configurable values for setting up a http
// transport.
type TransportConfig struct {
TLSConfig *tls.Config
DisableKeepAlives bool
TLSHandshakeTimeout time.Duration
Middlewares []TransportMiddleware
}
// NewHTTPTLSTransport returns a new http.Transport constructed with the TLS config
// and the necessary parameters for Juju.
func NewHTTPTLSTransport(config TransportConfig) *http.Transport {
transport := &http.Transport{
TLSClientConfig: config.TLSConfig,
DisableKeepAlives: config.DisableKeepAlives,
TLSHandshakeTimeout: config.TLSHandshakeTimeout,
}
for _, middlewareFn := range config.Middlewares {
transport = middlewareFn(transport)
}
return transport
}
// DefaultHTTPTransport creates a default transport with proxy middleware
// enabled.
func DefaultHTTPTransport() *http.Transport {
return NewHTTPTLSTransport(TransportConfig{
TLSHandshakeTimeout: 20 * time.Second,
Middlewares: []TransportMiddleware{
ProxyMiddleware,
},
})
}
// knownGoodCipherSuites contains the list of secure cipher suites to use
// with tls.Config. This list matches those that Go 1.6 implements from
// https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations.
//
// https://tools.ietf.org/html/rfc7525#section-4.2 excludes RSA exchange completely
// so we could be more strict if all our clients will support
// TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256/384. Unfortunately Go's crypto library
// is limited and doesn't support DHE-RSA-AES256-GCM-SHA384 and
// DHE-RSA-AES256-SHA256, which are part of the recommended set.
//
// Unfortunately we can't drop the RSA algorithms because our servers aren't
// generating ECDHE keys.
var knownGoodCipherSuites = []uint16{
// These are technically useless for Juju, since we use an RSA certificate,
// but they also don't hurt anything, and supporting an ECDSA certificate
// could be useful in the future.
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
// Windows doesn't support GCM currently, so we need these for RSA support.
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
// We need this so that we have at least one suite in common
// with the default gnutls installed for precise and trusty.
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
}
// SecureTLSConfig returns a tls.Config that conforms to Juju's security
// standards, so as to avoid known security vulnerabilities in certain
// configurations.
//
// Currently it excludes RC4 implementations from the available ciphersuites,
// requires ciphersuites that provide forward secrecy, and sets the minimum TLS
// version to 1.2.
func SecureTLSConfig() *tls.Config {
return &tls.Config{
CipherSuites: knownGoodCipherSuites,
MinVersion: tls.VersionTLS12,
}
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package logger
import (
"github.com/juju/loggo/v2"
"github.com/juju/juju/core/logger"
)
// GetLogger returns the default logger.
// Currently this is backed with loggo.
func GetLogger(name string, tags ...string) logger.Logger {
return WrapLoggo(loggo.GetLoggerWithTags(name, tags...).WithCallDepth(3))
}
// LoggerContext returns a logger factory that creates loggers.
// Currently this is backed with loggo.
func LoggerContext(level logger.Level) logger.LoggerContext {
return WrapLoggoContext(loggo.NewContext(loggo.Level(level)))
}
// DefaultContext returns a logger factory that creates loggers.
func DefaultContext() logger.LoggerContext {
return WrapLoggoContext(loggo.DefaultContext())
}
// ConfigureLoggers configures loggers on the default context according to the
// given string specification, which specifies a set of modules and their
// associated logging levels. Loggers are colon- or semicolon-separated; each
// module is specified as <modulename>=<level>. White space outside of module
// names and levels is ignored. The root module is specified with the name
// "<root>".
//
// An example specification:
//
// `<root>=ERROR; foo.bar=WARNING`
func ConfigureLoggers(config string) error {
return loggo.ConfigureLoggers(config)
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package logger
import (
"context"
"github.com/juju/loggo/v2"
"github.com/juju/juju/core/logger"
"github.com/juju/juju/core/trace"
)
// loggoLogger is a loggo.Logger that logs to a *testing.T or *check.C.
type loggoLogger struct {
logger loggo.Logger
}
// WrapLoggo wraps a loggo.Logger as a logger.Logger.
func WrapLoggo(logger loggo.Logger) logger.Logger {
return loggoLogger{logger: logger}
}
// Criticalf logs a message at the critical level.
func (c loggoLogger) Criticalf(ctx context.Context, msg string, args ...any) {
labels, ok := c.labelsFromContext(ctx)
if !ok {
c.logger.Criticalf(msg, args...)
return
}
c.logger.LogWithLabelsf(loggo.CRITICAL, msg, labels, args...)
}
// Errorf logs a message at the error level.
func (c loggoLogger) Errorf(ctx context.Context, msg string, args ...any) {
labels, ok := c.labelsFromContext(ctx)
if !ok {
c.logger.Errorf(msg, args...)
return
}
c.logger.LogWithLabelsf(loggo.ERROR, msg, labels, args...)
}
// Warningf logs a message at the warning level.
func (c loggoLogger) Warningf(ctx context.Context, msg string, args ...any) {
labels, ok := c.labelsFromContext(ctx)
if !ok {
c.logger.Warningf(msg, args...)
return
}
c.logger.LogWithLabelsf(loggo.WARNING, msg, labels, args...)
}
// Infof logs a message at the info level.
func (c loggoLogger) Infof(ctx context.Context, msg string, args ...any) {
labels, ok := c.labelsFromContext(ctx)
if !ok {
c.logger.Infof(msg, args...)
return
}
c.logger.LogWithLabelsf(loggo.INFO, msg, labels, args...)
}
// Debugf logs a message at the debug level.
func (c loggoLogger) Debugf(ctx context.Context, msg string, args ...any) {
labels, ok := c.labelsFromContext(ctx)
if !ok {
c.logger.Debugf(msg, args...)
return
}
c.logger.LogWithLabelsf(loggo.DEBUG, msg, labels, args...)
}
// Tracef logs a message at the trace level.
func (c loggoLogger) Tracef(ctx context.Context, msg string, args ...any) {
labels, ok := c.labelsFromContext(ctx)
if !ok {
c.logger.Tracef(msg, args...)
return
}
c.logger.LogWithLabelsf(loggo.TRACE, msg, labels, args...)
}
// Logf logs some information into the test error output. The labels are
// merged with the labels from the context, if any. The provided arguments
// are assembled together into a string with fmt.Sprintf.
func (c loggoLogger) Logf(ctx context.Context, level logger.Level, labels logger.Labels, msg string, args ...any) {
ctxLabels, ok := c.labelsFromContext(ctx)
if !ok {
ctxLabels = labels
} else {
for k, v := range labels {
ctxLabels[k] = v
}
}
c.logger.LogWithLabelsf(loggo.Level(level), msg, ctxLabels, args...)
}
// IsLevelEnabled returns true if the given level is enabled for the logger.
func (c loggoLogger) IsLevelEnabled(level logger.Level) bool {
return c.logger.IsLevelEnabled(loggo.Level(level))
}
// Child returns a new logger with the given name.
func (c loggoLogger) Child(name string, labels ...string) logger.Logger {
return loggoLogger{
logger: c.logger.ChildWithTags(name, labels...),
}
}
// GetChildByName returns a child logger with the given name.
func (c loggoLogger) GetChildByName(name string) logger.Logger {
return loggoLogger{
logger: c.logger.Root().Child(name),
}
}
func (c loggoLogger) labelsFromContext(ctx context.Context) (map[string]string, bool) {
traceID, ok := trace.TraceIDFromContext(ctx)
if !ok {
return nil, false
}
return map[string]string{
"traceid": traceID,
}, true
}
type loggoLoggerContext struct {
context *loggo.Context
}
// WrapLoggoContext wraps a loggo.Context as a logger.LoggerContext.
func WrapLoggoContext(context *loggo.Context) logger.LoggerContext {
return loggoLoggerContext{
context: context,
}
}
// GetLogger returns a logger with the given name and labels.
func (c loggoLoggerContext) GetLogger(name string, labels ...string) logger.Logger {
return WrapLoggo(c.context.GetLogger(name, labels...).WithCallDepth(3))
}
// ResetLoggerLevels iterates through the known logging modules and sets the
// levels of all to UNSPECIFIED, except for <root> which is set to WARNING.
// If labels are provided, then only loggers that have the provided labels
// will be reset.
func (c loggoLoggerContext) ResetLoggerLevels() {
c.context.ResetLoggerLevels()
}
// ConfigureLoggers configures loggers according to the given string
// specification, which specifies a set of modules and their associated
// logging levels. Loggers are colon- or semicolon-separated; each
// module is specified as <modulename>=<level>. White space outside of
// module names and levels is ignored. The root module is specified
// with the name "<root>".
//
// An example specification:
//
// <root>=ERROR; foo.bar=WARNING
//
// Label matching can be applied to the loggers by providing a set of labels
// to the function. If a logger has a label that matches the provided labels,
// then the logger will be configured with the provided level. If the logger
// does not have a label that matches the provided labels, then the logger
// will not be configured. No labels will configure all loggers in the
// specification.
func (c loggoLoggerContext) ConfigureLoggers(specification string) error {
return c.context.ConfigureLoggers(specification)
}
// Config returns the current configuration of the Loggers. Loggers
// with UNSPECIFIED level will not be included.
func (c loggoLoggerContext) Config() logger.Config {
coerced := make(logger.Config)
for k, v := range c.context.Config() {
coerced[k] = logger.Level(v)
}
return coerced
}
// AddWriter adds a writer to the list to be called for each logging call.
// The name cannot be empty, and the writer cannot be nil. If an existing
// writer exists with the specified name, an error is returned.
//
// Note: we're relying on loggo.Writer here, until we do model level logging.
// Deprecated: This will be removed in the future and is only here whilst
// we cut things across.
func (c loggoLoggerContext) AddWriter(name string, writer loggo.Writer) error {
return c.context.AddWriter(name, writer)
}
// Copyright 2024 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package logger
import (
"context"
"github.com/juju/juju/core/logger"
)
// noopLogger is a loggo.Logger that does nothing.
type noopLogger struct {
}
// Noop is a logger.Logger that doesn't do anything.
func Noop() logger.Logger {
return noopLogger{}
}
// Criticalf logs a message at the critical level.
func (c noopLogger) Criticalf(ctx context.Context, msg string, args ...any) {
}
// Errorf logs a message at the error level.
func (c noopLogger) Errorf(ctx context.Context, msg string, args ...any) {
}
// Warningf logs a message at the warning level.
func (c noopLogger) Warningf(ctx context.Context, msg string, args ...any) {
}
// Infof logs a message at the info level.
func (c noopLogger) Infof(ctx context.Context, msg string, args ...any) {
}
// Debugf logs a message at the debug level.
func (c noopLogger) Debugf(ctx context.Context, msg string, args ...any) {
}
// Tracef logs a message at the trace level.
func (c noopLogger) Tracef(ctx context.Context, msg string, args ...any) {
}
// Logf logs some information into the test error output.
// The provided arguments are assembled together into a string with
// fmt.Sprintf.
func (c noopLogger) Logf(ctx context.Context, level logger.Level, labels logger.Labels, msg string, args ...any) {
}
// IsLevelEnabled returns true if the given level is enabled for the logger.
func (c noopLogger) IsLevelEnabled(level logger.Level) bool {
return false
}
// Child returns a new logger with the given name.
func (c noopLogger) Child(name string, tags ...string) logger.Logger {
return c
}
// GetChildByName returns a child logger with the given name.
func (c noopLogger) GetChildByName(name string) logger.Logger {
return c
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package pki
import (
"crypto"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"strings"
"sync"
"time"
"github.com/juju/errors"
)
const (
DefaultLeafGroup = "controller"
ControllerIPLeafGroup = "controllerip"
)
// Authority represents a secure means of issuing groups of common interest
// certificates that share a certificate authority. Authority should
// only be shared around between trusted parties. Authority should be considered
// thread safe.
type Authority interface {
// Leaf Authority implements the Leaf interface
Leaf
// LeafForGroup returns the leaf associated with the given group. Returns
// error if no leaf exists for the given group.
LeafForGroup(string) (Leaf, error)
// LeafGroupFromPemCertKey loads an already existing certificate key pair as
// a new leaf at the given group. Returns error if a leaf for the given
// group already exists or an error occurred loading the pem data.
LeafGroupFromPemCertKey(group string, certPem, key []byte) (Leaf, error)
// LeafRequestForGroup starts a new leaf request for the given group. If a
// leaf already exists it will be overwritten with this request when
// committed.
LeafRequestForGroup(string) LeafRequest
// LeafRange is a method for safely iterating over all the leafs for the
// given Authority. Supplied function should return false to stop iteration
// early.
LeafRange(func(leaf Leaf) bool)
}
// DefaultAuthority is a juju implementation of the Authority interface. It's
// main difference is the ability to set a common leaf private key so all leafs
// use the same key.
type DefaultAuthority struct {
authority Leaf
leafs sync.Map
leafSignerMutex sync.Mutex
leafSigner crypto.Signer
}
// Organisation default organisation set on all certificates
var Organisation = []string{"Juju"}
// LeafSubjectTemplate is the default pkix.Name used for all leaf certificates
// made from a DefaultAuthority
var LeafSubjectTemplate = pkix.Name{
Organization: Organisation,
CommonName: "Juju server certificate",
}
// Certificate implements Leaf interface method. Returns the CA's certificate
func (a *DefaultAuthority) Certificate() *x509.Certificate {
return a.authority.Certificate()
}
// Chain implements Leaf interface method. Returns the CA's chain if it is an
// intermediate.
func (a *DefaultAuthority) Chain() []*x509.Certificate {
return a.authority.Chain()
}
func (a *DefaultAuthority) ChainWithAuthority() []*x509.Certificate {
chain := a.authority.Chain()
if chain == nil {
chain = []*x509.Certificate{}
}
return append(chain, a.authority.Certificate())
}
// leafMaker is responsible for providing a method to make new leafs after
// request signing.
func (a *DefaultAuthority) leafMaker(groupKey string) LeafMaker {
return func(cert *x509.Certificate, chain []*x509.Certificate,
signer crypto.Signer) (Leaf, error) {
leaf := NewDefaultLeaf(groupKey, cert, chain, signer)
a.leafs.Store(groupKey, leaf)
return leaf, nil
}
}
// LeafRequestForGroup implements Authority interface method. Starts a new leaf
// request for the given group overwritting any existing leaf when the request
// is committed.
func (a *DefaultAuthority) LeafRequestForGroup(group string) LeafRequest {
groupKey := strings.ToLower(group)
subject := MakeX509NameFromDefaults(&LeafSubjectTemplate,
&pkix.Name{
CommonName: fmt.Sprintf("%s - %s", LeafSubjectTemplate.CommonName, groupKey),
})
a.leafSignerMutex.Lock()
defer a.leafSignerMutex.Unlock()
if a.leafSigner != nil {
return NewDefaultLeafRequestWithSigner(subject, a.leafSigner,
NewDefaultRequestSigner(a.Certificate(), a.ChainWithAuthority(), a.Signer()),
a.leafMaker(groupKey))
}
return NewDefaultLeafRequest(subject,
NewDefaultRequestSigner(a.Certificate(), a.ChainWithAuthority(), a.Signer()),
a.leafMaker(groupKey))
}
// LeafForGroup implements Authority interface method.
func (a *DefaultAuthority) LeafForGroup(group string) (Leaf, error) {
groupKey := strings.ToLower(group)
leaf, has := a.leafs.Load(groupKey)
if !has {
return nil, errors.NotFoundf("no leaf for group key %s", groupKey)
}
return leaf.(Leaf), nil
}
// LeafGroupFromPemCertKey implements Authority interface method.
func (a *DefaultAuthority) LeafGroupFromPemCertKey(group string,
certPem, key []byte) (Leaf, error) {
groupKey := strings.ToLower(group)
certs, signers, err := UnmarshalPemData(append(certPem, key...))
if err != nil {
return nil, errors.Trace(err)
}
if len(certs) == 0 {
return nil, errors.New("found zero certificates in pem bundle")
}
if len(signers) != 1 {
return nil, errors.New("expected at least one private key in bundle")
}
if !PublicKeysEqual(signers[0].Public(), certs[0].PublicKey) {
return nil, errors.New("public keys of first certificate and key do not match")
}
leaf := NewDefaultLeaf(groupKey, certs[0], certs[1:], signers[0])
if _, exists := a.leafs.LoadOrStore(groupKey, leaf); exists {
return nil, errors.AlreadyExistsf("leaf for group %s", group)
}
return leaf, nil
}
// LeafRange implements Authority interface method.
func (a *DefaultAuthority) LeafRange(ranger func(leaf Leaf) bool) {
a.leafs.Range(func(_, val interface{}) bool {
return ranger(val.(Leaf))
})
}
// Helper method to generate a new certificate authority using the provided
// common name and signer.
func NewCA(commonName string, signer crypto.Signer) (*x509.Certificate, error) {
template := &x509.Certificate{}
if err := assetTagCertificate(template); err != nil {
return nil, errors.Annotate(err, "failed tagging new CA certificate")
}
template.Subject = pkix.Name{
CommonName: commonName,
Organization: Organisation,
}
now := time.Now()
template.NotBefore = now.Add(NotBeforeJitter)
template.NotAfter = now.AddDate(DefaultValidityYears, 0, 0)
template.KeyUsage = x509.KeyUsageKeyEncipherment |
x509.KeyUsageDigitalSignature |
x509.KeyUsageCertSign
template.BasicConstraintsValid = true
template.IsCA = true
der, err := x509.CreateCertificate(rand.Reader, template, template,
signer.Public(), signer)
if err != nil {
return nil, errors.Annotate(err, "failed creating CA certificate")
}
caCert, err := x509.ParseCertificate(der)
if err != nil {
return nil, errors.Trace(err)
}
return caCert, nil
}
// NewDefaultAuthority generates a new DefaultAuthority for the supplied CA
// cert and keys. Error is returned when the supplied certificate is not a CA.
func NewDefaultAuthority(authority *x509.Certificate, signer crypto.Signer,
chain ...*x509.Certificate) (*DefaultAuthority, error) {
if !authority.IsCA {
return nil, errors.NotValidf("%s is not a certificate authority",
authority.Subject)
}
return &DefaultAuthority{
authority: NewDefaultLeaf("", authority, chain, signer),
}, nil
}
// NewDefaultAuthorityPem generates a new DefaultAuthority for the supplied pem
// block. The pem block must contain a valid CA certificate and associated
// private key.
func NewDefaultAuthorityPem(pemBlock []byte) (*DefaultAuthority, error) {
leaf, err := NewDefaultLeafPem("", pemBlock)
if err != nil {
return nil, errors.Annotate(err, "generating CA leaf")
}
if !leaf.Certificate().IsCA {
return nil, errors.Errorf("certificate %s is not a CA",
leaf.Certificate().Subject.CommonName)
}
return NewDefaultAuthority(leaf.Certificate(), leaf.Signer(), leaf.Chain()...)
}
// NewDefaultAuthorityPemCAKey generates a new DefaultAuthority for the supplied
// pem ca and key. Returns error if the supplied cert is not a ca or passing of
// the pem data fails.
func NewDefaultAuthorityPemCAKey(caPem, keyPem []byte) (*DefaultAuthority, error) {
return NewDefaultAuthorityPem(append(caPem, keyPem...))
}
// SetLeafSigner sets a default signer to use for all new created leafs on this
// authority.
func (a *DefaultAuthority) SetLeafSigner(signer crypto.Signer) {
a.leafSignerMutex.Lock()
defer a.leafSignerMutex.Unlock()
a.leafSigner = signer
}
// Signer implements Leaf interface method. Returns the signer used for this
// authority.
func (a *DefaultAuthority) Signer() crypto.Signer {
return a.authority.Signer()
}
// TLSCertificate implements Leaf interface method. Returns a tls certificate
// that can be used in tls connections.
func (a *DefaultAuthority) TLSCertificate() *tls.Certificate {
return a.authority.TLSCertificate()
}
// ToPemParts implements the Leaf interface method. Returns this authority split
// into certificate and key pem components.
func (a *DefaultAuthority) ToPemParts() (cert, key []byte, err error) {
return a.authority.ToPemParts()
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package pki
import (
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"math/big"
"github.com/juju/errors"
"github.com/juju/juju/internal/uuid"
)
// CSRToCertificate copies all fields from a CertificateRequest into a new x509
// Certificate. No policy check is performed this is just a straight 1 to 1
// copy.
func CSRToCertificate(csr *x509.CertificateRequest) *x509.Certificate {
cert := &x509.Certificate{}
cert.Subject = csr.Subject
cert.Extensions = csr.Extensions
cert.ExtraExtensions = csr.ExtraExtensions
cert.DNSNames = csr.DNSNames
cert.EmailAddresses = csr.EmailAddresses
cert.IPAddresses = csr.IPAddresses
cert.URIs = csr.URIs
return cert
}
func assetTagCertificate(cert *x509.Certificate) error {
uuid, err := uuid.NewUUID()
if err != nil {
return errors.Annotate(err, "failed to generate new certificate uuid")
}
serialNumber, err := newSerialNumber()
if err != nil {
return errors.Annotate(err,
"failed to generate new certificate serial number")
}
cert.SerialNumber = serialNumber
cert.Subject.SerialNumber = uuid.String()
return nil
}
// MakeX509NameFromDefaults constructs a new x509 name from the merging of a
// default and request name. Fields not set in the request name
// will be copied from the default name.
func MakeX509NameFromDefaults(template, request *pkix.Name) pkix.Name {
rval := pkix.Name{}
if template == nil {
template = &pkix.Name{}
}
rval.Country = request.Country
if len(rval.Country) == 0 {
rval.Country = template.Country
}
rval.Organization = request.Organization
if len(rval.Organization) == 0 {
rval.Organization = template.Organization
}
rval.OrganizationalUnit = request.OrganizationalUnit
if len(rval.OrganizationalUnit) == 0 {
rval.OrganizationalUnit = template.OrganizationalUnit
}
rval.Locality = request.Locality
if len(rval.Locality) == 0 {
rval.Locality = template.Locality
}
rval.Province = request.Province
if len(rval.Province) == 0 {
rval.Province = template.Province
}
rval.StreetAddress = request.StreetAddress
if len(rval.StreetAddress) == 0 {
rval.StreetAddress = template.StreetAddress
}
rval.PostalCode = request.PostalCode
if len(rval.PostalCode) == 0 {
rval.PostalCode = template.PostalCode
}
rval.SerialNumber = request.SerialNumber
if rval.SerialNumber == "" {
rval.SerialNumber = template.SerialNumber
}
rval.CommonName = request.CommonName
if rval.CommonName == "" {
rval.CommonName = template.CommonName
}
rval.Names = request.Names
if len(rval.Names) == 0 {
rval.Names = template.Names
}
rval.ExtraNames = request.ExtraNames
if len(rval.ExtraNames) == 0 {
rval.ExtraNames = template.ExtraNames
}
return rval
}
// newSerialNumber returns a new random serial number suitable for use in a
// certificate.
func newSerialNumber() (*big.Int, error) {
// A serial number can be up to 20 octets in size.
// https://tools.ietf.org/html/rfc5280#section-4.1.2.2
n, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 8*20))
if err != nil {
return nil, errors.Annotatef(err, "failed to generate serial number")
}
return n, nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package pki
import (
"bytes"
"crypto"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"net"
"sort"
"github.com/juju/collections/set"
"github.com/juju/errors"
)
// DefaultLeaf is a default implementation of the Leaf interface
type DefaultLeaf struct {
group string
certificate *x509.Certificate
chain []*x509.Certificate
signer crypto.Signer
tlsCertificate *tls.Certificate
}
// DefaultLeafRequest is a default implementation of the LeafRequest interface
type DefaultLeafRequest struct {
dnsNames set.Strings
ipAddresses map[string]net.IP
leafMaker LeafMaker
requestSigner CertificateRequestSigner
signer crypto.Signer
subject pkix.Name
}
// Leaf represents a certificate and is associated key for signing operations.
type Leaf interface {
// Certificate returns the x509 certificate of this leaf. May be nil if no
// certificate exists yet. Call Commit to sign the leaf.
Certificate() *x509.Certificate
// Chain is the certificate signing chain for this leaf in the case of
// intermediate CA's
Chain() []*x509.Certificate
// Signer is the crypto key used for signing operations on this leaf.
Signer() crypto.Signer
// Convenience method for generating a tls certificate for use in tls
// transport.
TLSCertificate() *tls.Certificate
// Convenience method for converting this leaf to pem parts of
// certificate/chain and private key
ToPemParts() (cert, key []byte, err error)
}
// LeafMaker describes a function that can construct new Leaf's from the
// supplied certificate and crypto signer
type LeafMaker func(*x509.Certificate, []*x509.Certificate, crypto.Signer) (Leaf, error)
// LeafRequest is an intermediate unit for requesting new leafs with specific
// attributes.
type LeafRequest interface {
// AddDNSNames adds the specificed dns names to the LeafRequest
AddDNSNames(...string) LeafRequest
// AddIPAddresses adds the specificed ip addresses to the LeafRequest
AddIPAddresses(...net.IP) LeafRequest
// Commit transforms the LeafRequest to a new Leaf
Commit() (Leaf, error)
}
var (
HeaderLeafGroup = "leaf.pki.juju.is/group"
)
// AddDNSNames implements LeafRequest AddDNSNames
func (d *DefaultLeafRequest) AddDNSNames(dnsNames ...string) LeafRequest {
d.dnsNames = d.dnsNames.Union(set.NewStrings(dnsNames...))
return d
}
// AddIPAddresses implements LeafRequest AddIPAddresses
func (d *DefaultLeafRequest) AddIPAddresses(ipAddresses ...net.IP) LeafRequest {
for _, ipAddress := range ipAddresses {
ipStr := ipAddress.String()
if _, exists := d.ipAddresses[ipStr]; !exists {
d.ipAddresses[ipStr] = ipAddress
}
}
return d
}
// Certificate implements Leaf Certificate
func (d *DefaultLeaf) Certificate() *x509.Certificate {
return d.certificate
}
// Chain implements Leaf Chain
func (d *DefaultLeaf) Chain() []*x509.Certificate {
return d.chain
}
// Commit implements Leaf Commit
func (d *DefaultLeafRequest) Commit() (Leaf, error) {
var err error
if d.signer == nil {
d.signer, err = DefaultKeyProfile()
if err != nil {
return nil, errors.Trace(err)
}
}
csr := &x509.CertificateRequest{
DNSNames: d.dnsNames.Values(),
IPAddresses: ipAddressMapToSlice(d.ipAddresses),
PublicKey: d.signer.Public(),
Subject: d.subject,
}
cert, chain, err := d.requestSigner.SignCSR(csr)
if err != nil {
return nil, errors.Annotate(err, "signing CSR for leaf")
}
return d.leafMaker(cert, chain, d.signer)
}
// LeafHasDNSNames tests a diven Leaf to see if it contains the supplied DNS
// names
func LeafHasDNSNames(leaf Leaf, dnsNames []string) bool {
certDNSNames := leaf.Certificate().DNSNames
if len(certDNSNames) < len(dnsNames) {
return false
}
a := make([]string, len(certDNSNames))
copy(a, certDNSNames)
sort.Strings(a)
sort.Strings(dnsNames)
for _, name := range dnsNames {
index := sort.SearchStrings(a, name)
if index == len(a) || a[index] != name {
return false
}
}
return true
}
func ipAddressMapToSlice(m map[string]net.IP) []net.IP {
rval := make([]net.IP, len(m))
i := 0
for _, v := range m {
rval[i] = v
i = i + 1
}
return rval
}
// NewDefaultLeaf constructs a new DefaultLeaf for the supplied certificate and
// key
func NewDefaultLeaf(group string, cert *x509.Certificate,
chain []*x509.Certificate, signer crypto.Signer) *DefaultLeaf {
tlsCert := &tls.Certificate{
Certificate: make([][]byte, len(chain)+1),
PrivateKey: signer,
Leaf: cert,
}
tlsCert.Certificate[0] = cert.Raw
for i, chainCert := range chain {
tlsCert.Certificate[i+1] = chainCert.Raw
}
return &DefaultLeaf{
group: group,
certificate: cert,
chain: chain,
signer: signer,
tlsCertificate: tlsCert,
}
}
// NewDefaultLeafPem constructs a new DefaultLeaf from the supplied PEM data
func NewDefaultLeafPem(group string, pemBlock []byte) (*DefaultLeaf, error) {
certs, signers, err := UnmarshalPemData(pemBlock)
if err != nil {
return nil, errors.Trace(err)
}
if len(certs) == 0 {
return nil, errors.New("found zero certificates in pem bundle")
}
if len(signers) != 1 {
return nil, errors.New("expected at least one private key in bundle")
}
if !PublicKeysEqual(signers[0].Public(), certs[0].PublicKey) {
return nil, errors.New("public keys of first certificate and key do not match")
}
return NewDefaultLeaf(group, certs[0], certs[1:], signers[0]), nil
}
// NewDefaultLeafRequest create a DefaultLeafRequest object that implements
// LeafRequest
func NewDefaultLeafRequest(subject pkix.Name,
requestSigner CertificateRequestSigner, maker LeafMaker) *DefaultLeafRequest {
return &DefaultLeafRequest{
dnsNames: set.Strings{},
ipAddresses: map[string]net.IP{},
leafMaker: maker,
requestSigner: requestSigner,
subject: subject,
}
}
// NewDefaultLeafRequestWithSigner create a DefaultLeafRequest object that
// implements LeafRequest. Takes a default signer to use for all certificate
// creation instead of generating a new one.
func NewDefaultLeafRequestWithSigner(subject pkix.Name, signer crypto.Signer,
requestSigner CertificateRequestSigner,
maker LeafMaker) *DefaultLeafRequest {
return &DefaultLeafRequest{
dnsNames: set.Strings{},
ipAddresses: map[string]net.IP{},
leafMaker: maker,
requestSigner: requestSigner,
signer: signer,
subject: subject,
}
}
// Signer implements Leaf interface Signer
func (d *DefaultLeaf) Signer() crypto.Signer {
return d.signer
}
// TLSCertificate implements Leaf interface TLSCertificate
func (d *DefaultLeaf) TLSCertificate() *tls.Certificate {
return d.tlsCertificate
}
// ToPemParts implements Leaf interface ToPemParts
func (d *DefaultLeaf) ToPemParts() ([]byte, []byte, error) {
certBuf := bytes.Buffer{}
err := CertificateToPemWriter(&certBuf, map[string]string{
HeaderLeafGroup: d.group,
}, d.Certificate(), d.Chain()...)
if err != nil {
return nil, nil, errors.Annotate(err, "turning leaf certificate to pem")
}
keyBuf := bytes.Buffer{}
err = SignerToPemWriter(&keyBuf, d.Signer())
if err != nil {
return nil, nil, errors.Annotate(err, "turning leaf key to pem")
}
return certBuf.Bytes(), keyBuf.Bytes(), nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package pki
import (
"crypto"
"crypto/sha256"
"crypto/x509"
"encoding/pem"
"io"
"strings"
"github.com/juju/errors"
)
const (
PEMTypeCertificate = "CERTIFICATE"
PEMTypePKCS1 = "RSA PRIVATE KEY"
PEMTypePKCS8 = "PRIVATE KEY"
)
var (
DefaultPemHeaders = map[string]string{}
hexAlphabet = []byte("0123456789ABCDEF")
)
// CertificateToPemString transforms an x509 certificate to a pem string
func CertificateToPemString(headers map[string]string,
cert *x509.Certificate, chain ...*x509.Certificate) (string, error) {
builder := strings.Builder{}
if err := CertificateToPemWriter(&builder, headers, cert, chain...); err != nil {
return "", errors.Trace(err)
}
return builder.String(), nil
}
// CertificateToPemWriter transforms an x509 certificate to pem format on the
// supplied writer
func CertificateToPemWriter(writer io.Writer, headers map[string]string,
cert *x509.Certificate, chain ...*x509.Certificate) error {
for _, cert := range append([]*x509.Certificate{cert}, chain...) {
err := pem.Encode(writer, &pem.Block{
Bytes: cert.Raw,
//TODO re-enable headers on certificate to pem when Juju upgrade
//CAAS mongo to something compiled with latest openssl. Currently
//not all our Openssl versions support pem headers. Make sure to
//also uncomment test.
//Headers: headers,
Type: PEMTypeCertificate,
})
if err != nil {
return errors.Annotate(err, "encoding certificate to pem format")
}
}
return nil
}
// Fingerprint returns a human-readable SHA-256 fingerprint for a certificate
// stored in the PEM format. The returned fingerprint matches the output of:
// openssl x509 -noout -fingerprint -sha256 -inform pem -in cert.pem. Also
// returns the remainder of the input for the next blocks.
func Fingerprint(pemData []byte) (string, []byte, error) {
block, rest := pem.Decode(pemData)
if block == nil {
return "", rest, errors.New(
"input does not contain a valid certificate in PEM format")
} else if block.Type != PEMTypeCertificate {
return "", rest, errors.NotValidf(
"discovered pem block is not of type %s", PEMTypeCertificate)
}
if _, err := x509.ParseCertificate(block.Bytes); err != nil {
return "", rest, errors.Annotate(err, "cannot parse pem certificate to x509")
}
// fingerprint format is: XX:YY:...:ZZ
fingerprint := make([]byte, (sha256.Size*3)-1)
var index int
for _, fb := range sha256.Sum256(block.Bytes) {
if index != 0 {
fingerprint[index] = ':'
index++
}
// Encode each byte as two chars
fingerprint[index] = hexAlphabet[(fb>>4)&0xf]
fingerprint[index+1] = hexAlphabet[fb&0xf]
index += 2
}
return string(fingerprint), rest, nil
}
// IsPemCA returns true if the supplied pem certificate is a CA
func IsPemCA(pemData []byte) (bool, error) {
certs, _, err := UnmarshalPemData(pemData)
if err != nil {
return false, errors.Trace(err)
}
if len(certs) == 0 {
return false, errors.New("no certificates in pem bundle")
}
return certs[0].IsCA, nil
}
// SignerToPemString transforms a crypto signer to PKCS8 pem string
func SignerToPemString(signer crypto.Signer) (string, error) {
builder := strings.Builder{}
if err := SignerToPemWriter(&builder, signer); err != nil {
return "", errors.Trace(err)
}
return builder.String(), nil
}
// SignerToPemWriter transforms a crypto signer to PKCS8 pem using the supplied
// writer
func SignerToPemWriter(writer io.Writer, signer crypto.Signer) error {
der, err := x509.MarshalPKCS8PrivateKey(signer)
if err != nil {
return errors.Annotate(err, "marshalling signer to pkcs8 format")
}
err = pem.Encode(writer, &pem.Block{
Type: PEMTypePKCS8,
Bytes: der,
})
if err != nil {
return errors.Annotate(err, "encoding signer to pkcs8 pem format")
}
return nil
}
// UnmarshalSignerFromPemBlock transforms a given pem block to a crypto signer
func UnmarshalSignerFromPemBlock(block *pem.Block) (crypto.Signer, error) {
switch blockType := block.Type; blockType {
case PEMTypePKCS8:
key, err := x509.ParsePKCS8PrivateKey(block.Bytes)
if err != nil {
return nil, errors.Annotate(err, "parsing pem private key")
}
signer, ok := key.(crypto.Signer)
if !ok {
return nil, errors.New("unable to case pem private key to crypto.Signer")
}
return signer, nil
case PEMTypePKCS1:
key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return nil, errors.Annotate(err, "parsing pem private key")
}
return key, nil
default:
return nil, errors.NotSupportedf("block type %s", blockType)
}
}
// UnmarshalPemData unmarshals a set of pem data into certificates and signers
func UnmarshalPemData(pemData []byte) ([]*x509.Certificate, []crypto.Signer, error) {
var (
block *pem.Block
rest = pemData
)
certificates := []*x509.Certificate{}
signers := []crypto.Signer{}
for {
block, rest = pem.Decode(rest)
if block == nil {
break
}
switch blockType := block.Type; blockType {
case PEMTypeCertificate:
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, nil, errors.Annotate(err, "parsing pem certificate block")
}
certificates = append(certificates, cert)
case PEMTypePKCS8:
signer, err := UnmarshalSignerFromPemBlock(block)
if err != nil {
return nil, nil, errors.Annotate(err, "parsing pem private key block")
}
signers = append(signers, signer)
case PEMTypePKCS1:
signer, err := UnmarshalSignerFromPemBlock(block)
if err != nil {
return nil, nil, errors.Annotate(err, "parsing pem private key block")
}
signers = append(signers, signer)
default:
return nil, nil, errors.NotSupportedf("block type %s", blockType)
}
}
return certificates, signers, nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package pki
import (
"crypto/rand"
"crypto/x509"
"time"
"github.com/juju/errors"
)
// CertificateRequestSigner is an interface for signing CSR's under a CA
type CertificateRequestSigner interface {
SignCSR(*x509.CertificateRequest) (*x509.Certificate, []*x509.Certificate, error)
}
// CertificateRequestSignerFn implements CertificateRequestSigner
type CertificateRequestSignerFn func(*x509.CertificateRequest) (*x509.Certificate, []*x509.Certificate, error)
// DefaultRequestSigner is a default implementation of CertificateRequestSigner
type DefaultRequestSigner struct {
authority *x509.Certificate
chain []*x509.Certificate
privKey interface{}
}
const (
// DefaultValidityYears is the max age a certificate is signed for using the
// DefaultRequestSigner
DefaultValidityYears = 10
)
var (
// NotBeforeJitter is the amount of time before now that a certificate is
// valid for
NotBeforeJitter = time.Minute * -5
)
// NewDefaultRequestSigner creates a new DefaultRequestSigner for the supplied
// CA and key
func NewDefaultRequestSigner(
authority *x509.Certificate,
chain []*x509.Certificate,
privKey interface{}) *DefaultRequestSigner {
return &DefaultRequestSigner{
authority: authority,
chain: chain,
privKey: privKey,
}
}
// SignCSR implements CertificateRequestSigner SignCSR
func (c CertificateRequestSignerFn) SignCSR(r *x509.CertificateRequest) (*x509.Certificate, []*x509.Certificate, error) {
return c(r)
}
// SignCSR implements CertificateRequestSigner SignCSR
func (d *DefaultRequestSigner) SignCSR(csr *x509.CertificateRequest) (*x509.Certificate, []*x509.Certificate, error) {
template := CSRToCertificate(csr)
if err := assetTagCertificate(template); err != nil {
return nil, nil, errors.Annotate(err, "failed tagging certificate")
}
now := time.Now()
template.NotBefore = now.Add(NotBeforeJitter)
template.NotAfter = now.AddDate(DefaultValidityYears, 0, 0)
der, err := x509.CreateCertificate(rand.Reader, template, d.authority,
csr.PublicKey, d.privKey)
if err != nil {
return nil, nil, errors.Trace(err)
}
reqCert, err := x509.ParseCertificate(der)
if err != nil {
return nil, nil, errors.Trace(err)
}
return reqCert, d.chain, nil
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package pki
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
)
// KeyProfile is a convience way of getting a crypto private key with a default
// set of attributes
type KeyProfile func() (crypto.Signer, error)
var (
//DefaultKeyProfile KeyProfile = RSA3072
DefaultKeyProfile KeyProfile = RSA3072
)
func PublicKeysEqual(key1, key2 interface{}) bool {
return true
}
// ECDSAP224 returns a ECDSA 224 private key
func ECDSAP224() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
}
// ECDSAP224 returns a ECDSA 256 private key
func ECDSAP256() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
}
// ECDSA384 returns a ECDSA 384 private key
func ECDSAP384() (crypto.Signer, error) {
return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
}
// RSA2048 returns a RSA 2048 private key
func RSA2048() (crypto.Signer, error) {
return rsa.GenerateKey(rand.Reader, 2048)
}
// RSA3072 returns a RSA 3072 private key
func RSA3072() (crypto.Signer, error) {
return rsa.GenerateKey(rand.Reader, 3072)
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package test
import (
"github.com/juju/errors"
"github.com/juju/juju/internal/pki"
)
// NewTestAuthority returns a valid pki Authority for testing
func NewTestAuthority() (pki.Authority, error) {
signer, err := pki.DefaultKeyProfile()
if err != nil {
return nil, errors.Trace(err)
}
caCert, err := pki.NewCA("juju-testing", signer)
if err != nil {
return nil, errors.Trace(err)
}
return pki.NewDefaultAuthority(caCert, signer)
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package test
import (
"crypto"
"crypto/x509"
"encoding/pem"
"flag"
"strings"
"github.com/juju/juju/internal/pki"
)
// InsecureKeyProfile for tests. Will panic if used outside tests.
func InsecureKeyProfile() (crypto.Signer, error) {
if flag.Lookup("test.v") == nil {
panic("InsecureKeyProfile cannot be used outside tests")
}
// From crypto/rsa documentation:
// This is an insecure, test-only key from RFC 9500, Section 2.1.
// It can be used in tests to avoid slow key generation.
block, _ := pem.Decode([]byte(strings.ReplaceAll(`
-----BEGIN RSA TESTING KEY-----
MIIEowIBAAKCAQEAsPnoGUOnrpiSqt4XynxA+HRP7S+BSObI6qJ7fQAVSPtRkqso
tWxQYLEYzNEx5ZSHTGypibVsJylvCfuToDTfMul8b/CZjP2Ob0LdpYrNH6l5hvFE
89FU1nZQF15oVLOpUgA7wGiHuEVawrGfey92UE68mOyUVXGweJIVDdxqdMoPvNNU
l86BU02vlBiESxOuox+dWmuVV7vfYZ79Toh/LUK43YvJh+rhv4nKuF7iHjVjBd9s
B6iDjj70HFldzOQ9r8SRI+9NirupPTkF5AKNe6kUhKJ1luB7S27ZkvB3tSTT3P59
3VVJvnzOjaA1z6Cz+4+eRvcysqhrRgFlwI9TEwIDAQABAoIBAEEYiyDP29vCzx/+
dS3LqnI5BjUuJhXUnc6AWX/PCgVAO+8A+gZRgvct7PtZb0sM6P9ZcLrweomlGezI
FrL0/6xQaa8bBr/ve/a8155OgcjFo6fZEw3Dz7ra5fbSiPmu4/b/kvrg+Br1l77J
aun6uUAs1f5B9wW+vbR7tzbT/mxaUeDiBzKpe15GwcvbJtdIVMa2YErtRjc1/5B2
BGVXyvlJv0SIlcIEMsHgnAFOp1ZgQ08aDzvilLq8XVMOahAhP1O2A3X8hKdXPyrx
IVWE9bS9ptTo+eF6eNl+d7htpKGEZHUxinoQpWEBTv+iOoHsVunkEJ3vjLP3lyI/
fY0NQ1ECgYEA3RBXAjgvIys2gfU3keImF8e/TprLge1I2vbWmV2j6rZCg5r/AS0u
pii5CvJ5/T5vfJPNgPBy8B/yRDs+6PJO1GmnlhOkG9JAIPkv0RBZvR0PMBtbp6nT
Y3yo1lwamBVBfY6rc0sLTzosZh2aGoLzrHNMQFMGaauORzBFpY5lU50CgYEAzPHl
u5DI6Xgep1vr8QvCUuEesCOgJg8Yh1UqVoY/SmQh6MYAv1I9bLGwrb3WW/7kqIoD
fj0aQV5buVZI2loMomtU9KY5SFIsPV+JuUpy7/+VE01ZQM5FdY8wiYCQiVZYju9X
Wz5LxMNoz+gT7pwlLCsC4N+R8aoBk404aF1gum8CgYAJ7VTq7Zj4TFV7Soa/T1eE
k9y8a+kdoYk3BASpCHJ29M5R2KEA7YV9wrBklHTz8VzSTFTbKHEQ5W5csAhoL5Fo
qoHzFFi3Qx7MHESQb9qHyolHEMNx6QdsHUn7rlEnaTTyrXh3ifQtD6C0yTmFXUIS
CW9wKApOrnyKJ9nI0HcuZQKBgQCMtoV6e9VGX4AEfpuHvAAnMYQFgeBiYTkBKltQ
XwozhH63uMMomUmtSG87Sz1TmrXadjAhy8gsG6I0pWaN7QgBuFnzQ/HOkwTm+qKw
AsrZt4zeXNwsH7QXHEJCFnCmqw9QzEoZTrNtHJHpNboBuVnYcoueZEJrP8OnUG3r
UjmopwKBgAqB2KYYMUqAOvYcBnEfLDmyZv9BTVNHbR2lKkMYqv5LlvDaBxVfilE0
2riO4p6BaAdvzXjKeRrGNEKoHNBpOSfYCOM16NjL8hIZB1CaV3WbT5oY+jp7Mzd5
7d56RZOE+ERK2uz/7JX9VSsM/LbH9pJibd4e8mikDS9ntciqOH/3
-----END RSA TESTING KEY-----`[1:], "TESTING KEY", "PRIVATE KEY")))
return x509.ParsePKCS1PrivateKey(block.Bytes)
}
// OriginalDefaultKeyProfile is the pre-patched pki.DefaultKeyProfile
// value.
var OriginalDefaultKeyProfile = pki.DefaultKeyProfile
func init() {
pki.DefaultKeyProfile = InsecureKeyProfile
}
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package lxdnames
// NOTE: this package exists to get around circular imports from cloud and
// provider/lxd.
// DefaultCloud is the name of the default lxd cloud, which corresponds to
// the local lxd daemon.
const DefaultCloud = "localhost"
// DefaultCloudAltName is the alternative name of the default lxd cloud,
// which corresponds to the local lxd daemon.
const DefaultCloudAltName = "lxd"
// DefaultLocalRegion is the name of the "region" we support in a local lxd,
// which corresponds to the local lxd daemon.
const DefaultLocalRegion = "localhost"
// DefaultRemoteRegion is the name of the "region" we report if there are no
// other regions for a remote lxd server.
const DefaultRemoteRegion = "default"
// ProviderType defines the provider/cloud type for lxd.
const ProviderType = "lxd"
// IsDefaultCloud returns true if the cloud name is the default lxd cloud.
func IsDefaultCloud(cloudName string) bool {
return cloudName == DefaultCloud || cloudName == DefaultCloudAltName
}
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
// Attrs is a convenience type for messing
// around with configuration attributes.
type Attrs map[string]interface{}
func (a Attrs) Merge(with Attrs) Attrs {
new := make(Attrs)
for attr, val := range a {
new[attr] = val
}
for attr, val := range with {
new[attr] = val
}
return new
}
func (a Attrs) Delete(attrNames ...string) Attrs {
new := make(Attrs)
for attr, val := range a {
new[attr] = val
}
for _, attr := range attrNames {
delete(new, attr)
}
return new
}
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"context"
"fmt"
"os"
"reflect"
"runtime"
"strings"
"time"
"github.com/juju/collections/set"
"github.com/juju/os/v2/series"
"github.com/juju/testing"
jc "github.com/juju/testing/checkers"
"github.com/juju/utils/v4"
"github.com/juju/version/v2"
gc "gopkg.in/check.v1"
"github.com/juju/juju/core/arch"
"github.com/juju/juju/core/base"
coreos "github.com/juju/juju/core/os"
"github.com/juju/juju/core/os/ostype"
jujuversion "github.com/juju/juju/core/version"
"github.com/juju/juju/internal/featureflag"
internallogger "github.com/juju/juju/internal/logger"
"github.com/juju/juju/internal/wrench"
"github.com/juju/juju/juju/osenv"
)
var logger = internallogger.GetLogger("juju.testing")
// JujuOSEnvSuite isolates the tests from Juju environment variables.
// This is intended to be only used by existing suites, usually embedded in
// BaseSuite and in FakeJujuXDGDataHomeSuite. Eventually the tests relying on
// JujuOSEnvSuite will be converted to use the IsolationSuite in
// github.com/juju/testing, and this suite will be removed.
// Do not use JujuOSEnvSuite when writing new tests.
type JujuOSEnvSuite struct {
oldHomeEnv string
oldEnvironment map[string]string
initialFeatureFlags string
}
func (s *JujuOSEnvSuite) SetUpTest(c *gc.C) {
s.oldEnvironment = make(map[string]string)
for _, name := range []string{
osenv.JujuXDGDataHomeEnvKey,
osenv.JujuControllerEnvKey,
osenv.JujuModelEnvKey,
osenv.JujuLoggingConfigEnvKey,
osenv.JujuFeatureFlagEnvKey,
osenv.JujuFeatures,
osenv.XDGDataHome,
} {
s.oldEnvironment[name] = os.Getenv(name)
os.Setenv(name, "")
}
s.oldHomeEnv = utils.Home()
os.Setenv(osenv.JujuXDGDataHomeEnvKey, c.MkDir())
err := utils.SetHome("")
c.Assert(err, jc.ErrorIsNil)
// Update the feature flag set to be the requested initial set.
// For tests, setting with the environment variable isolates us
// from a single resource that was hitting contention during parallel
// test runs.
os.Setenv(osenv.JujuFeatureFlagEnvKey, s.initialFeatureFlags)
featureflag.SetFlagsFromEnvironment(osenv.JujuFeatureFlagEnvKey)
}
func (s *JujuOSEnvSuite) TearDownTest(c *gc.C) {
for name, value := range s.oldEnvironment {
os.Setenv(name, value)
}
err := utils.SetHome(s.oldHomeEnv)
c.Assert(err, jc.ErrorIsNil)
}
// SkipIfPPC64EL skips the test if the arch is PPC64EL and the
// compiler is gccgo.
func SkipIfPPC64EL(c *gc.C, bugID string) {
if runtime.Compiler == "gccgo" &&
arch.NormaliseArch(runtime.GOARCH) == arch.PPC64EL {
c.Skip(fmt.Sprintf("Test disabled on PPC64EL until fixed - see bug %s", bugID))
}
}
// SkipIfS390X skips the test if the arch is S390X.
func SkipIfS390X(c *gc.C, bugID string) {
if arch.NormaliseArch(runtime.GOARCH) == arch.S390X {
c.Skip(fmt.Sprintf("Test disabled on S390X until fixed - see bug %s", bugID))
}
}
// SkipIfWindowsBug skips the test if the OS is Windows.
func SkipIfWindowsBug(c *gc.C, bugID string) {
if runtime.GOOS == "windows" {
c.Skip(fmt.Sprintf("Test disabled on Windows until fixed - see bug %s", bugID))
}
}
// SkipUnlessControllerOS skips the test if the current OS is not a supported
// controller OS.
func SkipUnlessControllerOS(c *gc.C) {
if coreos.HostOS() != ostype.Ubuntu {
c.Skip("Test disabled for non-controller OS")
}
}
// SkipLXDNotSupported will skip tests if the host does not support LXD
func SkipLXDNotSupported(c *gc.C) {
if coreos.HostOS() != ostype.Ubuntu {
c.Skip("Test disabled for non-LXD OS")
}
}
// SkipFlaky skips the test if there is an open bug for intermittent test failures
func SkipFlaky(c *gc.C, bugID string) {
c.Skip(fmt.Sprintf("Test disabled until flakiness is fixed - see bug %s", bugID))
}
// SetInitialFeatureFlags sets the feature flags to be in effect for
// the next call to SetUpTest.
func (s *JujuOSEnvSuite) SetInitialFeatureFlags(flags ...string) {
s.initialFeatureFlags = strings.Join(flags, ",")
}
func (s *JujuOSEnvSuite) SetFeatureFlags(flag ...string) {
flags := strings.Join(flag, ",")
if err := os.Setenv(osenv.JujuFeatureFlagEnvKey, flags); err != nil {
panic(err)
}
logger.Debugf(context.TODO(), "setting feature flags: %s", flags)
featureflag.SetFlagsFromEnvironment(osenv.JujuFeatureFlagEnvKey)
}
// BaseSuite provides required functionality for all test suites
// when embedded in a gocheck suite type:
// - logger redirect
// - no outgoing network access
// - protection of user's home directory
// - scrubbing of env vars
// TODO (frankban) 2014-06-09: switch to using IsolationSuite.
// NOTE: there will be many tests that fail when you try to change
// to the IsolationSuite that rely on external things in PATH.
type BaseSuite struct {
oldLtsForTesting string
testing.CleanupSuite
testing.LoggingSuite
JujuOSEnvSuite
InitialLoggingConfig string
}
func (s *BaseSuite) SetUpSuite(c *gc.C) {
wrench.SetEnabled(false)
s.CleanupSuite.SetUpSuite(c)
s.LoggingSuite.SetUpSuite(c)
// JujuOSEnvSuite does not have a suite setup.
// LTS-dependent requires new entry upon new LTS release.
s.oldLtsForTesting = series.SetLatestLtsForTesting("xenial")
}
func (s *BaseSuite) TearDownSuite(c *gc.C) {
// JujuOSEnvSuite does not have a suite teardown.
_ = series.SetLatestLtsForTesting(s.oldLtsForTesting)
s.LoggingSuite.TearDownSuite(c)
s.CleanupSuite.TearDownSuite(c)
}
func (s *BaseSuite) SetUpTest(c *gc.C) {
s.CleanupSuite.SetUpTest(c)
s.LoggingSuite.SetUpTest(c)
s.JujuOSEnvSuite.SetUpTest(c)
if s.InitialLoggingConfig != "" {
_ = internallogger.ConfigureLoggers(s.InitialLoggingConfig)
}
// We do this to isolate invocations of bash from pulling in the
// ambient user environment, and potentially affecting the tests.
// We can't always just use IsolationSuite because we still need
// PATH and possibly a couple other envars.
s.PatchEnvironment("BASH_ENV", "")
}
func (s *BaseSuite) TearDownTest(c *gc.C) {
s.JujuOSEnvSuite.TearDownTest(c)
s.LoggingSuite.TearDownTest(c)
s.CleanupSuite.TearDownTest(c)
}
// CheckString compares two strings. If they do not match then the spot
// where they do not match is logged.
func CheckString(c *gc.C, value, expected string) {
if !c.Check(value, gc.Equals, expected) {
diffStrings(c, value, expected)
}
}
func diffStrings(c *gc.C, value, expected string) {
// If only Go had a diff library.
vlines := strings.Split(value, "\n")
elines := strings.Split(expected, "\n")
vsize := len(vlines)
esize := len(elines)
if vsize < 2 || esize < 2 {
return
}
smaller := elines
if vsize < esize {
smaller = vlines
}
for i := range smaller {
vline := vlines[i]
eline := elines[i]
if vline != eline {
c.Logf("first mismatched line (%d/%d):", i, len(smaller))
c.Log("expected: " + eline)
c.Log("got: " + vline)
break
}
}
}
// TestCleanup is used to allow DumpTestLogsAfter to take any test suite
// that supports the standard cleanup function.
type TestCleanup interface {
AddCleanup(func(*gc.C))
}
// DumpTestLogsAfter will write the test logs to stdout if the timeout
// is reached.
func DumpTestLogsAfter(timeout time.Duration, c *gc.C, cleaner TestCleanup) {
done := make(chan interface{})
go func() {
select {
case <-time.After(timeout):
fmt.Print(c.GetTestLog())
case <-done:
}
}()
cleaner.AddCleanup(func(_ *gc.C) {
close(done)
})
}
// GetExportedFields return the exported fields of a struct.
func GetExportedFields(arg interface{}) set.Strings {
t := reflect.TypeOf(arg)
result := set.NewStrings()
count := t.NumField()
for i := 0; i < count; i++ {
f := t.Field(i)
// empty PkgPath means exported field.
// see https://golang.org/pkg/reflect/#StructField
if f.PkgPath == "" {
result.Add(f.Name)
}
}
return result
}
// CurrentVersion returns the current Juju version, asserting on error.
func CurrentVersion() version.Binary {
return version.Binary{
Number: jujuversion.Current,
Arch: arch.HostArch(),
Release: coreos.HostOSTypeName(),
}
}
// HostSeries returns series.HostSeries(), asserting on error.
func HostBase(c *gc.C) base.Base {
hostBase, err := coreos.HostBase()
c.Assert(err, jc.ErrorIsNil)
return hostBase
}
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"crypto/ed25519"
cryptorand "crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt"
"math/rand"
"sync"
mgotesting "github.com/juju/mgo/v3/testing"
cryptossh "golang.org/x/crypto/ssh"
)
// CACert and CAKey make up a CA key pair.
// CACertX509 and CAKeyRSA hold their parsed equivalents.
// ServerCert and ServerKey hold a CA-signed server cert/key.
// Certs holds the certificates and keys required to make a secure
// connection to a Mongo database.
//
//revive:disable:exported
var (
once sync.Once
// CACert and CAKey make up a CA key pair and ServerCert and ServerKey
// hold a CA-signed server cert/key.
CACert, CAKey, ServerCert, ServerKey = chooseGeneratedCA()
// CACertX509 and CAKeyRSA hold their parsed equivalents.
CACertX509, CAKeyRSA = mustParseCertAndKey(CACert, CAKey)
// ServerTLSCert is the parsed server certificate.
ServerTLSCert = mustParseServerCert(ServerCert, ServerKey)
// Certs holds the certificates and keys required to make a secure
Certs = serverCerts()
// Other valid test certs different from the default.
// OtherCACert and OtherCAKey is the other CA certificate and key.
OtherCACert, OtherCAKey = chooseGeneratedOtherCA()
// OtherCACertX509 and OtherCAKeyRSA hold their parsed equivalents.
OtherCACertX509, OtherCAKeyRSA = mustParseCertAndKey(OtherCACert, OtherCAKey)
// SSHServerHostKey for testing
SSHServerHostKey = mustGenerateSSHServerHostKey()
)
//revive:enable:exported
func chooseGeneratedCA() (string, string, string, string) {
index := rand.Intn(len(generatedCA))
if len(generatedCA) != len(generatedServer) {
// This should never happen.
panic("generatedCA and generatedServer have mismatched length")
}
ca := generatedCA[index]
server := generatedServer[index]
return ca.certPEM, ca.keyPEM, server.certPEM, server.keyPEM
}
func chooseGeneratedOtherCA() (string, string) {
index := rand.Intn(len(otherCA))
ca := otherCA[index]
return ca.certPEM, ca.keyPEM
}
func mustParseServerCert(srvCert string, srvKey string) *tls.Certificate {
tlsCert, err := tls.X509KeyPair([]byte(srvCert), []byte(srvKey))
if err != nil {
panic(err)
}
x509Cert, err := x509.ParseCertificate(tlsCert.Certificate[0])
if err != nil {
panic(err)
}
tlsCert.Leaf = x509Cert
return &tlsCert
}
func mustParseCertAndKey(certPEM, keyPEM string) (*x509.Certificate, *rsa.PrivateKey) {
tlsCert, err := tls.X509KeyPair([]byte(certPEM), []byte(keyPEM))
if err != nil {
panic(err)
}
cert, err := x509.ParseCertificate(tlsCert.Certificate[0])
if err != nil {
panic(err)
}
key, ok := tlsCert.PrivateKey.(*rsa.PrivateKey)
if !ok {
panic(fmt.Errorf("private key with unexpected type %T", tlsCert.PrivateKey))
}
return cert, key
}
func serverCerts() *mgotesting.Certs {
serverCert, serverKey := mustParseCertAndKey(ServerCert, ServerKey)
return &mgotesting.Certs{
CACert: CACertX509,
ServerCert: serverCert,
ServerKey: serverKey,
}
}
func mustGenerateSSHServerHostKey() string {
var k string
once.Do(func() {
_, privateKey, err := ed25519.GenerateKey(cryptorand.Reader)
if err != nil {
panic("failed to generate ED25519 key")
}
pemKey, err := cryptossh.MarshalPrivateKey(privateKey, "")
if err != nil {
panic("failed to marshal private key")
}
k = string(pem.EncodeToMemory(pemKey))
})
return k
}
// Copyright 2017 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"math/rand"
"time"
)
// To regenerate the certificates in certs_generated, run:
// go run github.com/juju/juju/generate/certgen
// NewCA returns a random one of the pre-generated certs to speed up
// tests. The comment on the certs are not going to match the args.
func NewCA(commonName, uuid string, expiry time.Time) (certPEM, keyPEM string, err error) {
index := rand.Intn(len(generatedCA))
cert := generatedCA[index]
return cert.certPEM, cert.keyPEM, nil
}
// Copyright 2011, 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"reflect"
"time"
jc "github.com/juju/testing/checkers"
gc "gopkg.in/check.v1"
)
// NotifyAsserterC gives helper functions for making assertions about how a
// channel operates (whether we get a receive event or not, whether it is
// closed, etc.)
type NotifyAsserterC struct {
// C is a gocheck C structure for doing assertions
C *gc.C
// Chan is the channel we want to receive on
Chan <-chan struct{}
// Precond will be called before waiting on the channel, can be nil
Precond func()
}
// AssertReceive will ensure that we get an event on the channel and the
// channel is not closed.
func (a *NotifyAsserterC) AssertReceive() {
if a.Precond != nil {
a.Precond()
}
select {
case _, ok := <-a.Chan:
a.C.Assert(ok, jc.IsTrue)
case <-time.After(LongWait):
a.C.Fatalf("timed out waiting for channel message")
}
}
// AssertOneReceive checks that we have exactly one message, and no more
func (a *NotifyAsserterC) AssertOneReceive() {
a.AssertReceive()
a.AssertNoReceive()
}
// AssertClosed ensures that we get a closed event on the channel
func (a *NotifyAsserterC) AssertClosed() {
if a.Precond != nil {
a.Precond()
}
select {
case _, ok := <-a.Chan:
a.C.Assert(ok, jc.IsFalse)
case <-time.After(LongWait):
a.C.Fatalf("timed out waiting for channel to close")
}
}
// Assert that we fail to receive on the channel after a short wait.
func (a *NotifyAsserterC) AssertNoReceive() {
select {
case <-a.Chan:
a.C.Fatalf("unexpected receive")
case <-time.After(ShortWait):
}
}
// ContentAsserterC is like NotifyAsserterC in that it checks the behavior of a
// channel. The difference is that we expect actual content on the channel, so
// callers need to put that into and out of an 'interface{}'
type ContentAsserterC struct {
// C is a gocheck C structure for doing assertions
C *gc.C
// Chan is the channel we want to receive on
Chan interface{}
// Precond will be called before waiting on the channel, can be nil
Precond func()
}
// recv waits to receive a value on the channe for the given
// time. It returns the value received, if any, whether it
// was received ok (the channel was not closed) and
// whether the receive timed out.
func (a *ContentAsserterC) recv(timeout time.Duration) (val interface{}, ok, timedOut bool) {
if a.Precond != nil {
a.Precond()
}
which, v, ok := reflect.Select([]reflect.SelectCase{{
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(a.Chan),
}, {
Dir: reflect.SelectRecv,
Chan: reflect.ValueOf(time.After(timeout)),
}})
switch which {
case 0:
a.C.Assert(ok, jc.IsTrue)
return v.Interface(), ok, false
case 1:
return nil, false, true
}
panic("unreachable")
}
// AssertReceive will ensure that we get an event on the channel and the
// channel is not closed. It will return the content received
func (a *ContentAsserterC) AssertReceive() interface{} {
v, ok, timedOut := a.recv(LongWait)
if timedOut {
a.C.Fatalf("timed out waiting for channel message")
}
a.C.Assert(ok, jc.IsTrue)
return v
}
// AssertOneReceive checks that we have exactly one message, and no more
func (a *ContentAsserterC) AssertOneReceive() interface{} {
res := a.AssertReceive()
a.AssertNoReceive()
return res
}
// AssertOneValue checks that exactly 1 message was sent, and that the content DeepEquals the value.
// It also returns the value in case further inspection is desired.
func (a *ContentAsserterC) AssertOneValue(val interface{}) interface{} {
res := a.AssertReceive()
a.C.Assert(val, gc.DeepEquals, res)
a.AssertNoReceive()
return res
}
// AssertClosed ensures that we get a closed event on the channel
func (a *ContentAsserterC) AssertClosed() {
_, ok, timedOut := a.recv(LongWait)
if timedOut {
a.C.Fatalf("timed out waiting for channel to close")
}
a.C.Assert(ok, jc.IsFalse)
}
// Assert that we fail to receive on the channel after a short wait.
func (a *ContentAsserterC) AssertNoReceive() {
content, _, timedOut := a.recv(ShortWait)
if timedOut {
return
}
a.C.Fatalf("unexpected receive: %#v", content)
}
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"strings"
"github.com/juju/errors"
jc "github.com/juju/testing/checkers"
gc "gopkg.in/check.v1"
)
func AssertOperationWasBlocked(c *gc.C, err error, msg string) {
c.Assert(err.Error(), jc.Contains, "disabled", gc.Commentf("%s", errors.Details(err)))
// msg is logged
stripped := strings.Replace(c.GetTestLog(), "\n", "", -1)
c.Check(stripped, gc.Matches, msg)
c.Check(stripped, jc.Contains, "disabled")
}
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"context"
"github.com/juju/utils/v4"
coretesting "github.com/juju/juju/core/testing"
)
// ShortWait is a reasonable amount of time to block waiting for something that
// shouldn't actually happen. (as in, the test suite will *actually* wait this
// long before continuing)
// Deprecated: use core/testing.ShortWait instead. This is so you don't bring
// in extra dependencies from this package.
const ShortWait = coretesting.ShortWait
// LongWait is used when something should have already happened, or happens
// quickly, but we want to make sure we just haven't missed it. As in, the test
// suite should proceed without sleeping at all, but just in case. It is long
// so that we don't have spurious failures without actually slowing down the
// test suite
// Deprecated: use core/testing.LongWait instead. This is so you don't bring
// in extra dependencies from this package.
const LongWait = coretesting.LongWait
// TODO(katco): 2016-08-09: lp:1611427
var LongAttempt = &utils.AttemptStrategy{
Total: LongWait,
Delay: ShortWait,
}
// LongWaitContext returns a context whose deadline is tied to the duration of
// a LongWait.
func LongWaitContext() (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), LongWait)
}
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"github.com/juju/names/v6"
"github.com/juju/testing"
jc "github.com/juju/testing/checkers"
"github.com/juju/utils/v4/ssh"
"github.com/juju/version/v2"
gc "gopkg.in/check.v1"
"github.com/juju/juju/cloud"
"github.com/juju/juju/controller"
corebase "github.com/juju/juju/core/base"
"github.com/juju/juju/core/objectstore"
jujuversion "github.com/juju/juju/core/version"
environscloudspec "github.com/juju/juju/environs/cloudspec"
"github.com/juju/juju/environs/config"
"github.com/juju/juju/internal/charmhub"
"github.com/juju/juju/internal/uuid"
)
// FakeAuthKeys holds the authorized key used for testing
// purposes in FakeConfig. It is valid for parsing with the utils/ssh
// authorized-key utilities.
const FakeAuthKeys = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAYQDP8fPSAMFm2PQGoVUks/FENVUMww1QTK6m++Y2qX9NGHm43kwEzxfoWR77wo6fhBhgFHsQ6ogE/cYLx77hOvjTchMEP74EVxSce0qtDjI7SwYbOpAButRId3g/Ef4STz8= joe@0.1.2.4`
func init() {
_, err := ssh.ParseAuthorisedKey(FakeAuthKeys)
if err != nil {
panic("FakeAuthKeys does not hold a valid authorized key: " + err.Error())
}
}
var (
// FakeSupportedJujuBases is used to provide a list of canned results
// of a base to test bootstrap code against.
FakeSupportedJujuBases = []corebase.Base{
corebase.MustParseBaseFromString("ubuntu@20.04"),
corebase.MustParseBaseFromString("ubuntu@22.04"),
corebase.MustParseBaseFromString("ubuntu@24.04"),
jujuversion.DefaultSupportedLTSBase(),
}
)
// FakeVersionNumber is a valid version number that can be used in testing.
var FakeVersionNumber = version.MustParse("2.99.0")
// ModelTag is a defined known valid UUID that can be used in testing.
var ModelTag = names.NewModelTag("deadbeef-0bad-400d-8000-4b1d0d06f00d")
// ControllerTag is a defined known valid UUID that can be used in testing.
var ControllerTag = names.NewControllerTag("deadbeef-1bad-500d-9000-4b1d0d06f00d")
// ControllerModelTag is a defined known valid UUID that can be used in testing
// for the model the controller is running on.
var ControllerModelTag = names.NewControllerTag("deadbeef-2bad-500d-9000-4b1d0d06f00d")
// FakeControllerConfig returns an environment configuration
// that is expected to be found in state for a fake controller.
func FakeControllerConfig() controller.Config {
return controller.Config{
"controller-uuid": ControllerTag.Id(),
"ca-cert": CACert,
"state-port": 1234,
"api-port": 17777,
"set-numa-control-policy": false,
"model-logfile-max-backups": 1,
"model-logfile-max-size": "1M",
"model-logs-size": "1M",
"max-txn-log-size": "10M",
"auditing-enabled": false,
"audit-log-capture-args": true,
"audit-log-max-size": "200M",
"audit-log-max-backups": 5,
"query-tracing-threshold": "1s",
"object-store-type": objectstore.FileBackend,
}
}
// FakeConfig returns an model configuration for a
// fake provider with all required attributes set.
func FakeConfig() Attrs {
return Attrs{
"type": "dummy",
"name": "testmodel",
"uuid": ModelTag.Id(),
"firewall-mode": config.FwInstance,
"ssl-hostname-verification": true,
"development": false,
}
}
// FakeCloudSpec returns a cloud spec with sample data.
func FakeCloudSpec() environscloudspec.CloudSpec {
cred := cloud.NewCredential(cloud.UserPassAuthType, map[string]string{"username": "dummy", "password": "secret"})
return environscloudspec.CloudSpec{
Type: "dummy",
Name: "dummy",
Endpoint: "dummy-endpoint",
IdentityEndpoint: "dummy-identity-endpoint",
Region: "dummy-region",
StorageEndpoint: "dummy-storage-endpoint",
Credential: &cred,
}
}
// ModelConfig returns a default environment configuration suitable for
// setting in the state.
func ModelConfig(c *gc.C) *config.Config {
uuid := mustUUID()
return CustomModelConfig(c, Attrs{"uuid": uuid})
}
// mustUUID returns a stringified uuid or panics
func mustUUID() string {
uuid, err := uuid.NewUUID()
if err != nil {
panic(err)
}
return uuid.String()
}
// CustomModelConfig returns an environment configuration with
// additional specified keys added.
func CustomModelConfig(c *gc.C, extra Attrs) *config.Config {
attrs := FakeConfig().Merge(Attrs{
"agent-version": "2.0.0",
"charmhub-url": charmhub.DefaultServerURL,
}).Merge(extra).Delete("admin-secret")
cfg, err := config.New(config.NoDefaults, attrs)
c.Assert(err, jc.ErrorIsNil)
return cfg
}
const DefaultMongoPassword = "conn-from-name-secret"
// FakeJujuXDGDataHomeSuite isolates the user's home directory and
// sets up a Juju home with a sample environment and certificate.
type FakeJujuXDGDataHomeSuite struct {
JujuOSEnvSuite
testing.FakeHomeSuite
}
func (s *FakeJujuXDGDataHomeSuite) SetUpTest(c *gc.C) {
s.JujuOSEnvSuite.SetUpTest(c)
s.FakeHomeSuite.SetUpTest(c)
}
func (s *FakeJujuXDGDataHomeSuite) TearDownTest(c *gc.C) {
s.FakeHomeSuite.TearDownTest(c)
s.JujuOSEnvSuite.TearDownTest(c)
}
// AssertConfigParameterUpdated updates environment parameter and
// asserts that no errors were encountered.
func (s *FakeJujuXDGDataHomeSuite) AssertConfigParameterUpdated(c *gc.C, key, value string) {
s.PatchEnvironment(key, value)
}
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"github.com/juju/testing"
jc "github.com/juju/testing/checkers"
gc "gopkg.in/check.v1"
)
const jujuPkgPrefix = "github.com/juju/juju/"
// FindJujuCoreImports returns a sorted list of juju-core packages that are
// imported by the packageName parameter. The resulting list removes the
// common prefix "github.com/juju/juju/" leaving just the short names.
// Suites calling this MUST NOT override HOME or XDG_CACHE_HOME.
func FindJujuCoreImports(c *gc.C, packageName string) []string {
imps, err := testing.FindImports(packageName, jujuPkgPrefix)
c.Assert(err, jc.ErrorIsNil)
return imps
}
// Copyright 2020 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"fmt"
"net"
gc "gopkg.in/check.v1"
)
type ipsEqualChecker struct {
*gc.CheckerInfo
}
var IPsEqual gc.Checker = &ipsEqualChecker{
&gc.CheckerInfo{Name: "IPsEqual", Params: []string{"obtained", "expected"}},
}
func (c *ipsEqualChecker) Check(params []interface{}, name []string) (bool, string) {
ips1, ok := params[0].([]net.IP)
if !ok {
return false, "param 0 is not of type []net.IP"
}
ips2, ok := params[1].([]net.IP)
if !ok {
return false, "param 0 is not of type []net.IP"
}
if len(ips1) != len(ips2) {
return false, fmt.Sprintf("legnth of ip slices not equal %d != %d",
len(ips1), len(ips2))
}
for i := range ips1 {
if !ips1[i].Equal(ips2[i]) {
return false, "ip slices are not equal"
}
}
return true, ""
}
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"testing"
"time"
mgotesting "github.com/juju/mgo/v3/testing"
)
// MgoTestPackage should be called to register the tests for any package
// that requires a connection to a MongoDB server.
//
// The server will be configured without SSL enabled, which slows down
// tests. For tests that care about security (which should be few), use
// MgoSSLTestPackage.
func MgoTestPackage(t *testing.T) {
mgotesting.MgoServer.EnableReplicaSet = true
// Tests tend to cause enough contention that the default lock request
// timeout of 5ms is not enough. We may need to consider increasing the
// value for production also.
mgotesting.MgoServer.MaxTransactionLockRequestTimeout = 20 * time.Millisecond
mgotesting.MgoTestPackage(t, nil)
}
// MgoSSLTestPackage should be called to register the tests for any package
// that requires a secure (SSL) connection to a MongoDB server.
func MgoSSLTestPackage(t *testing.T) {
mgotesting.MgoServer.EnableReplicaSet = true
// Tests tend to cause enough contention that the default lock request
// timeout of 5ms is not enough. We may need to consider increasing the
// value for production also.
mgotesting.MgoServer.MaxTransactionLockRequestTimeout = 20 * time.Millisecond
mgotesting.MgoTestPackage(t, Certs)
}
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"fmt"
"io"
"net/http"
"strings"
)
// CannedRoundTripper can be used to provide canned "http" responses without
// actually starting an HTTP server.
//
// Use this in conjunction with ProxyRoundTripper. A ProxyRoundTripper is
// what gets registered as the default handler for a given protocol (such as
// "test") and then tests can direct the ProxyRoundTripper to delegate to a
// CannedRoundTripper. The reason for this is that we can register a
// roundtripper to handle a scheme, but there is no way to unregister it: you
// may need to re-use the same ProxyRoundTripper but use different
// CannedRoundTrippers to return different results.
type CannedRoundTripper struct {
// files maps file names to their contents. If the roundtripper
// receives a request for any of these files, and none of the entries
// in errorURLs below matches, it will return the contents associated
// with that filename here.
// TODO(jtv): Do something more sensible here: either make files take
// precedence over errors, or return the given error *with* the given
// contents, or just disallow overlap.
files map[string]string
// errorURLs are prefixes that should return specific HTTP status
// codes. If a request's URL matches any of these prefixes, the
// associated error status is returned.
// There is no clever longest-prefix selection here. If more than
// one prefix matches, any one of them may be used.
// TODO(jtv): Decide what to do about multiple matching prefixes.
errorURLS map[string]int
}
var _ http.RoundTripper = (*CannedRoundTripper)(nil)
// ProxyRoundTripper is an http.RoundTripper implementation that does nothing
// but delegate to another RoundTripper. This lets tests change how they handle
// requests for a given scheme, despite the fact that the standard library does
// not support un-registration, or registration of a new roundtripper with a
// URL scheme that's already handled.
//
// Use the RegisterForScheme method to install this as the standard handler
// for a particular protocol. For example, if you call
// prt.RegisterForScheme("test") then afterwards, any request to "test:///foo"
// will be routed to prt.
type ProxyRoundTripper struct {
// Sub is the roundtripper that this roundtripper delegates to, if any.
// If you leave this nil, this roundtripper is effectively disabled.
Sub http.RoundTripper
}
var _ http.RoundTripper = (*ProxyRoundTripper)(nil)
// RegisterForScheme registers a ProxyRoundTripper as the default roundtripper
// for the given URL scheme.
//
// This cannot be undone, nor overwritten with a different roundtripper. If
// you change your mind later about what the roundtripper should do, set its
// "Sub" field to delegate to a different roundtripper (or to nil if you don't
// want to handle its requests at all any more).
func (prt *ProxyRoundTripper) RegisterForScheme(scheme string) {
http.DefaultTransport.(*http.Transport).RegisterProtocol(scheme, prt)
}
// RegisterForTransportScheme registers a ProxyRoundTripper as the transport
// roundtripper for the given URL scheme.
func (prt *ProxyRoundTripper) RegisterForTransportScheme(transport *http.Transport, scheme string) {
transport.RegisterProtocol(scheme, prt)
}
func (prt *ProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
if prt.Sub == nil {
panic("An attempt was made to request file content without having" +
" the virtual filesystem initialized.")
}
return prt.Sub.RoundTrip(req)
}
func newHTTPResponse(status string, statusCode int, body string) *http.Response {
return &http.Response{
Proto: "HTTP/1.0",
ProtoMajor: 1,
Header: make(http.Header),
Close: true,
// Parameter fields:
Status: status,
StatusCode: statusCode,
Body: io.NopCloser(strings.NewReader(body)),
ContentLength: int64(len(body)),
}
}
// RoundTrip returns a canned error or body for the given request.
func (v *CannedRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
full := req.URL.String()
for urlPrefix, statusCode := range v.errorURLS {
if strings.HasPrefix(full, urlPrefix) {
status := fmt.Sprintf("%d Error", statusCode)
return newHTTPResponse(status, statusCode, ""), nil
}
}
if contents, found := v.files[req.URL.Path]; found {
return newHTTPResponse("200 OK", http.StatusOK, contents), nil
}
return newHTTPResponse("404 Not Found", http.StatusNotFound, ""), nil
}
// NewCannedRoundTripper returns a CannedRoundTripper with the given canned
// responses.
func NewCannedRoundTripper(files map[string]string, errorURLs map[string]int) *CannedRoundTripper {
return &CannedRoundTripper{files, errorURLs}
}
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"strings"
jc "github.com/juju/testing/checkers"
gc "gopkg.in/check.v1"
)
// CheckWriteFileCommand verifies that the given shell command
// correctly writes the expected content to the given filename. The
// provided parse function decomposes file content into structured data
// that may be correctly compared regardless of ordering within the
// content. If parse is nil then the content lines are used un-parsed.
func CheckWriteFileCommand(c *gc.C, cmd, filename, expected string, parse func(lines []string) interface{}) {
if parse == nil {
parse = func(lines []string) interface{} {
return lines
}
}
lines := strings.Split(strings.TrimSpace(cmd), "\n")
header := lines[0]
footer := lines[len(lines)-1]
parsed := parse(lines[1 : len(lines)-1])
// Check the cat portion.
c.Check(header, gc.Equals, "cat > "+filename+" << 'EOF'")
c.Check(footer, gc.Equals, "EOF")
// Check the conf portion.
expectedParsed := parse(strings.Split(expected, "\n"))
c.Check(parsed, jc.DeepEquals, expectedParsed)
}
// Copyright 2012, 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"archive/tar"
"bytes"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"os"
)
// TarFile represents a file to be archived.
type TarFile struct {
Header tar.Header
Contents string
}
var modes = map[os.FileMode]byte{
os.ModeDir: tar.TypeDir,
os.ModeSymlink: tar.TypeSymlink,
0: tar.TypeReg,
}
// NewTarFile returns a new TarFile instance with the given file
// mode and contents.
func NewTarFile(name string, mode os.FileMode, contents string) *TarFile {
ftype := modes[mode&os.ModeType]
if ftype == 0 {
panic(fmt.Errorf("unexpected mode %v", mode))
}
// NOTE: Do not set attributes (e.g. times) dynamically, as various
// tests expect the contents of fake tools archives to be unchanging.
return &TarFile{
Header: tar.Header{
Typeflag: ftype,
Name: name,
Size: int64(len(contents)),
Mode: int64(mode & 0777),
Uname: "ubuntu",
Gname: "ubuntu",
},
Contents: contents,
}
}
// TarGz returns the given files in gzipped tar-archive format, along with the sha256 checksum.
func TarGz(files ...*TarFile) ([]byte, string) {
var buf bytes.Buffer
sha256hash := sha256.New()
gzw := gzip.NewWriter(io.MultiWriter(&buf, sha256hash))
tarw := tar.NewWriter(gzw)
for _, f := range files {
err := tarw.WriteHeader(&f.Header)
if err != nil {
panic(err)
}
_, err = tarw.Write([]byte(f.Contents))
if err != nil {
panic(err)
}
}
err := tarw.Close()
if err != nil {
panic(err)
}
err = gzw.Close()
if err != nil {
panic(err)
}
checksum := fmt.Sprintf("%x", sha256hash.Sum(nil))
return buf.Bytes(), checksum
}
// Copyright 2018 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package testing
import (
"time"
)
// ZeroTime can be used in tests instead of time.Now() when the returned
// time.Time value is not relevant.
//
// Example: instead of now := time.Now() use now := testing.ZeroTime().
func ZeroTime() time.Time {
return time.Time{}
}
// NonZeroTime can be used in tests instead of time.Now() when the returned
// time.Time value must be non-zero (its IsZero() method returns false).
func NonZeroTime() time.Time {
return time.Unix(0, 1) // 1 nanosecond since epoch
}
// Copyright 2013 Canonical Ltd.
// Licensed under the LGPLv3, see LICENCE file for details.
package uuid
import (
"crypto/rand"
"encoding/hex"
"fmt"
"io"
"regexp"
"strings"
)
// UUID represent a universal identifier with 16 octets.
type UUID [16]byte
// regex for validating that the UUID matches RFC 4122.
// This package generates version 4 UUIDs but
// accepts any UUID version.
// http://www.ietf.org/rfc/rfc4122.txt
var (
block1 = "[0-9a-f]{8}"
block2 = "[0-9a-f]{4}"
block3 = "[0-9a-f]{4}"
block4 = "[0-9a-f]{4}"
block5 = "[0-9a-f]{12}"
UUIDSnippet = block1 + "-" + block2 + "-" + block3 + "-" + block4 + "-" + block5
validUUID = regexp.MustCompile("^" + UUIDSnippet + "$")
)
func UUIDFromString(s string) (UUID, error) {
if !IsValidUUIDString(s) {
return UUID{}, fmt.Errorf("invalid UUID: %q", s)
}
s = strings.Replace(s, "-", "", 4)
raw, err := hex.DecodeString(s)
if err != nil {
return UUID{}, err
}
var uuid UUID
copy(uuid[:], raw)
return uuid, nil
}
// IsValidUUIDString returns true, if the given string matches a valid UUID (version 4, variant 2).
func IsValidUUIDString(s string) bool {
return validUUID.MatchString(s)
}
// MustNewUUID returns a new uuid, if an error occurs it panics.
func MustNewUUID() UUID {
uuid, err := NewUUID()
if err != nil {
panic(err)
}
return uuid
}
// NewUUID generates a new version 4 UUID relying only on random numbers.
func NewUUID() (UUID, error) {
uuid := UUID{}
if _, err := io.ReadFull(rand.Reader, uuid[0:16]); err != nil {
return UUID{}, err
}
// Set version (4) and variant (2) according to RfC 4122.
var version byte = 4 << 4
var variant byte = 8 << 4
uuid[6] = version | (uuid[6] & 15)
uuid[8] = variant | (uuid[8] & 15)
return uuid, nil
}
// Copy returns a copy of the UUID.
func (uuid UUID) Copy() UUID {
uuidCopy := uuid
return uuidCopy
}
// Raw returns a copy of the UUID bytes.
func (uuid UUID) Raw() [16]byte {
return [16]byte(uuid)
}
// String returns a hexadecimal string representation with
// standardized separators.
func (uuid UUID) String() string {
return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:16])
}
// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package wrench
import (
"bufio"
"context"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"github.com/juju/juju/core/paths"
internallogger "github.com/juju/juju/internal/logger"
)
var (
enabledMu sync.Mutex
enabled = true
dataDir = paths.DataDir(paths.CurrentOS())
wrenchDir = filepath.Join(dataDir, "wrench")
jujuUid = os.Getuid()
)
var logger = internallogger.GetLogger("juju.wrench")
// IsActive returns true if a "wrench" of a certain category and
// feature should be "dropped in the works".
//
// This function may be called at specific points in the Juju codebase
// to introduce otherwise hard to induce failure modes for the
// purposes of manual or CI testing. The "<juju_datadir>/wrench/"
// directory will be checked for "wrench files" which this function
// looks for.
//
// Wrench files are line-based, with each line indicating some
// (mis-)feature to enable for a given part of the code. The should be
// created on the host where the fault should be triggered.
//
// For example, /var/lib/juju/wrench/machine-agent could contain:
//
// refuse-upgrade
// fail-api-server-start
//
// The caller need not worry about errors. Any errors that occur will
// be logged and false will be returned.
func IsActive(category, feature string) bool {
if !IsEnabled() {
return false
}
if !checkWrenchDir(wrenchDir) {
return false
}
fileName := filepath.Join(wrenchDir, category)
if !checkWrenchFile(category, feature, fileName) {
return false
}
wrenchFile, err := os.Open(fileName)
if err != nil {
logger.Errorf(context.TODO(), "unable to read wrench data for %s/%s (ignored): %v",
category, feature, err)
return false
}
defer wrenchFile.Close()
lines := bufio.NewScanner(wrenchFile)
for lines.Scan() {
line := strings.TrimSpace(lines.Text())
if line == feature {
logger.Tracef(context.TODO(), "wrench for %s/%s is active", category, feature)
return true
}
}
if err := lines.Err(); err != nil {
logger.Errorf(context.TODO(), "error while reading wrench data for %s/%s (ignored): %v",
category, feature, err)
}
return false
}
// SetEnabled turns the wrench feature on or off globally.
//
// If false is given, all future IsActive calls will unconditionally
// return false. If true is given, all future IsActive calls will
// return true for active wrenches.
//
// The previous value for the global wrench enable flag is returned.
func SetEnabled(next bool) bool {
enabledMu.Lock()
defer enabledMu.Unlock()
previous := enabled
enabled = next
return previous
}
// IsEnabled returns true if the wrench feature is turned on globally.
func IsEnabled() bool {
enabledMu.Lock()
defer enabledMu.Unlock()
return enabled
}
var stat = os.Stat // To support patching
func checkWrenchDir(dirName string) bool {
dirinfo, err := stat(dirName)
if err != nil {
logger.Tracef(context.TODO(), "couldn't read wrench directory: %v", err)
return false
}
if !isOwnedByJujuUser(dirinfo) {
logger.Errorf(context.TODO(), "wrench directory has incorrect ownership - wrench "+
"functionality disabled (%s)", wrenchDir)
return false
}
return true
}
func checkWrenchFile(category, feature, fileName string) bool {
fileinfo, err := stat(fileName)
if err != nil {
logger.Tracef(context.TODO(), "no wrench data for %s/%s (ignored): %v",
category, feature, err)
return false
}
if !isOwnedByJujuUser(fileinfo) {
logger.Errorf(context.TODO(), "wrench file for %s/%s has incorrect ownership "+
"- ignoring %s", category, feature, fileName)
return false
}
// Windows is not fully POSIX compliant
if runtime.GOOS != "windows" {
if fileinfo.Mode()&0022 != 0 {
logger.Errorf(context.TODO(), "wrench file for %s/%s should only be writable by "+
"owner - ignoring %s", category, feature, fileName)
return false
}
}
return true
}
// Copyright 2014 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
//go:build !windows
package wrench
import (
"os"
"syscall"
)
func isOwnedByJujuUser(fi os.FileInfo) bool {
statStruct, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
// Uid check is not supported on this platform so assume
// the owner is ok.
return true
}
return int(statStruct.Uid) == jujuUid
}
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package osenv
import (
"io/fs"
"os"
"path/filepath"
"runtime"
"sync"
"github.com/juju/utils/v4"
)
// jujuXDGDataHome stores the path to the juju configuration
// folder, which is only meaningful when running the juju
// CLI tool, and is typically defined by $JUJU_DATA or
// $XDG_DATA_HOME/juju or ~/.local/share/juju as default if none
// of the aforementioned variables are defined.
var (
jujuXDGDataHomeMu sync.Mutex
jujuXDGDataHome string
)
const (
// DirectorySubPathSSH is the sub directory under Juju home that holds ssh
// related information for the Juju client.
DirectorySubPathSSH = "ssh"
)
// SetJujuXDGDataHome sets the value of juju home and
// returns the current one.
func SetJujuXDGDataHome(newJujuXDGDataHomeHome string) string {
jujuXDGDataHomeMu.Lock()
defer jujuXDGDataHomeMu.Unlock()
oldJujuXDGDataHomeHome := jujuXDGDataHome
jujuXDGDataHome = newJujuXDGDataHomeHome
return oldJujuXDGDataHomeHome
}
// JujuXDGDataHome returns the current juju home.
func JujuXDGDataHome() string {
jujuXDGDataHomeMu.Lock()
defer jujuXDGDataHomeMu.Unlock()
return jujuXDGDataHome
}
// JujuXDGDDataHomeFS returns a file system rooted at home directory for the
// Juju data directory.
func JujuXDGDataHomeFS() fs.FS {
return os.DirFS(JujuXDGDataHomeDir())
}
// JujuXDGDataSSHFS return a file system rooted at the ssh directory in the Juju
// data directory.
func JujuXDGDataSSHFS() fs.FS {
return os.DirFS(filepath.Join(JujuXDGDataHomeDir(), DirectorySubPathSSH))
}
// JujuXDGDataHomePath returns the path to a file in the
// current juju home.
func JujuXDGDataHomePath(names ...string) string {
all := append([]string{JujuXDGDataHomeDir()}, names...)
return filepath.Join(all...)
}
// JujuXDGDataHomeDir returns the directory where juju should store application-specific files
func JujuXDGDataHomeDir() string {
homeDir := JujuXDGDataHome()
if homeDir != "" {
return homeDir
}
homeDir = os.Getenv(JujuXDGDataHomeEnvKey)
if homeDir == "" {
if runtime.GOOS == "windows" {
homeDir = jujuXDGDataHomeWin()
} else {
homeDir = jujuXDGDataHomeLinux()
}
}
return homeDir
}
// jujuXDGDataHomeLinux returns the directory where juju should store application-specific files on Linux.
func jujuXDGDataHomeLinux() string {
xdgConfig := os.Getenv(XDGDataHome)
if xdgConfig != "" {
return filepath.Join(xdgConfig, "juju")
}
// If xdg config home is not defined, the standard indicates that its default value
// is $HOME/.local/share
home := utils.Home()
return filepath.Join(home, ".local", "share", "juju")
}
// jujuXDGDataHomeWin returns the directory where juju should store application-specific files on Windows.
func jujuXDGDataHomeWin() string {
appdata := os.Getenv("APPDATA")
if appdata == "" {
return ""
}
return filepath.Join(appdata, "Juju")
}
// Copyright 2013 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package osenv
import (
"runtime"
"strings"
"github.com/juju/juju/internal/featureflag"
)
const (
// If you are adding variables here that could be defined
// in a system and therefore changing the behavior on test
// suites please take a moment to add them to JujuOSEnvSuite
// setup so they are cleared before running the suites using
// it.
JujuControllerEnvKey = "JUJU_CONTROLLER"
JujuModelEnvKey = "JUJU_MODEL"
JujuXDGDataHomeEnvKey = "JUJU_DATA"
JujuLoggingConfigEnvKey = "JUJU_LOGGING_CONFIG"
// JujuFeatureFlagEnvKey is used to enable prototype/developer only
// features that we don't want to expose by default to the general user.
// It is propagated to as an environment variable to all agents.
JujuFeatureFlagEnvKey = "JUJU_DEV_FEATURE_FLAGS"
// JujuFeatures is used to allow the general user to opt in to new, polished
// features (primarily CLI enhancements) that may break backwards compatibility
// so cannot be enabled by default until the next major Juju revision.
// The features enabled by this flag are expected to have full doc available.
JujuFeatures = "JUJU_FEATURES"
// JujuStartupLoggingConfigEnvKey if set is used to configure the initial
// logging before the command objects are even created to allow debugging
// of the command creation and initialisation process.
JujuStartupLoggingConfigEnvKey = "JUJU_STARTUP_LOGGING_CONFIG"
// Registry key containing juju related information
JujuRegistryKey = `HKLM:\SOFTWARE\juju-core`
// Registry value where the jujud password resides
JujuRegistryPasswordKey = `jujud-password`
// TODO(thumper): 2013-09-02 bug 1219630
// As much as I'd like to remove JujuContainerType now, it is still
// needed as MAAS still needs it at this stage, and we can't fix
// everything at once.
JujuContainerTypeEnvKey = "JUJU_CONTAINER_TYPE"
// JujuStatusIsoTimeEnvKey is the env var which if true, will cause status
// timestamps to be written in RFC3339 format.
JujuStatusIsoTimeEnvKey = "JUJU_STATUS_ISO_TIME"
// XDGDataHome is a path where data for the running user
// should be stored according to the xdg standard.
XDGDataHome = "XDG_DATA_HOME"
)
// FeatureFlags returns a map that can be merged with os.Environ.
func FeatureFlags() map[string]string {
result := make(map[string]string)
if envVar := featureflag.AsEnvironmentValue(); envVar != "" {
result[JujuFeatureFlagEnvKey] = envVar
}
return result
}
// MergeEnvironment will return the current environment updated with
// all the values from newValues. If current is nil, a new map is
// created. If current is not nil, it is mutated.
func MergeEnvironment(current, newValues map[string]string) map[string]string {
if current == nil {
current = make(map[string]string)
}
if runtime.GOOS == "windows" {
return mergeEnvWin(current, newValues)
}
return mergeEnvUnix(current, newValues)
}
// mergeEnvUnix merges the two evironment variable lists in a case sensitive way.
func mergeEnvUnix(current, newValues map[string]string) map[string]string {
for key, value := range newValues {
current[key] = value
}
return current
}
// mergeEnvWin merges the two environment variable lists in a case insensitive,
// but case preserving way. Thus, if FOO=bar is set, and newValues has foo=baz,
// then the resultant map will contain FOO=baz.
func mergeEnvWin(current, newValues map[string]string) map[string]string {
uppers := make(map[string]string, len(current))
news := map[string]string{}
for k, v := range current {
uppers[strings.ToUpper(k)] = v
}
for k, v := range newValues {
up := strings.ToUpper(k)
if _, ok := uppers[up]; ok {
uppers[up] = v
} else {
news[k] = v
}
}
for k := range current {
current[k] = uppers[strings.ToUpper(k)]
}
for k, v := range news {
current[k] = v
}
return current
}