// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cli
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"istio.io/istio/istioctl/pkg/util/handlers"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/ptr"
)
type Context interface {
// CLIClient returns a client for the default revision
CLIClient() (kube.CLIClient, error)
// CLIClientWithRevision returns a client for the given revision
CLIClientWithRevision(rev string) (kube.CLIClient, error)
// InferPodInfoFromTypedResource returns the pod name and namespace for the given typed resource
InferPodInfoFromTypedResource(name, namespace string) (pod string, ns string, err error)
// InferPodsFromTypedResource returns the pod names and namespace for the given typed resource
InferPodsFromTypedResource(name, namespace string) ([]string, string, error)
// Namespace returns the namespace specified by the user
Namespace() string
// IstioNamespace returns the Istio namespace specified by the user
IstioNamespace() string
// NamespaceOrDefault returns the namespace specified by the user, or the default namespace if none was specified
NamespaceOrDefault(namespace string) string
}
type instance struct {
// clients are cached clients for each revision
clients map[string]kube.CLIClient
RootFlags
}
func newKubeClientWithRevision(kubeconfig, configContext, revision string) (kube.CLIClient, error) {
rc, err := kube.DefaultRestConfig(kubeconfig, configContext, func(config *rest.Config) {
// We are running a one-off command locally, so we don't need to worry too much about rate limiting
// Bumping this up greatly decreases install time
config.QPS = 50
config.Burst = 100
})
if err != nil {
return nil, err
}
return kube.NewCLIClient(kube.NewClientConfigForRestConfig(rc), revision)
}
func NewCLIContext(rootFlags *RootFlags) Context {
if rootFlags == nil {
rootFlags = &RootFlags{
kubeconfig: ptr.Of[string](""),
configContext: ptr.Of[string](""),
namespace: ptr.Of[string](""),
istioNamespace: ptr.Of[string](""),
defaultNamespace: "",
}
}
return &instance{
RootFlags: *rootFlags,
}
}
func (i *instance) CLIClientWithRevision(rev string) (kube.CLIClient, error) {
if i.clients == nil {
i.clients = make(map[string]kube.CLIClient)
}
if i.clients[rev] == nil {
client, err := newKubeClientWithRevision(*i.kubeconfig, *i.configContext, rev)
if err != nil {
return nil, err
}
i.clients[rev] = client
}
return i.clients[rev], nil
}
func (i *instance) CLIClient() (kube.CLIClient, error) {
return i.CLIClientWithRevision("")
}
func (i *instance) InferPodInfoFromTypedResource(name, namespace string) (pod string, ns string, err error) {
client, err := i.CLIClient()
if err != nil {
return "", "", err
}
return handlers.InferPodInfoFromTypedResource(name, i.NamespaceOrDefault(namespace), MakeKubeFactory(client))
}
func (i *instance) InferPodsFromTypedResource(name, namespace string) ([]string, string, error) {
client, err := i.CLIClient()
if err != nil {
return nil, "", err
}
return handlers.InferPodsFromTypedResource(name, i.NamespaceOrDefault(namespace), MakeKubeFactory(client))
}
func (i *instance) NamespaceOrDefault(namespace string) string {
return handleNamespace(namespace, i.DefaultNamespace())
}
// handleNamespace returns the defaultNamespace if the namespace is empty
func handleNamespace(ns, defaultNamespace string) string {
if ns == corev1.NamespaceAll {
ns = defaultNamespace
}
return ns
}
type fakeInstance struct {
// clients are cached clients for each revision
clients map[string]kube.CLIClient
rootFlags *RootFlags
results map[string][]byte
objects []runtime.Object
version string
}
func (f *fakeInstance) CLIClientWithRevision(rev string) (kube.CLIClient, error) {
if _, ok := f.clients[rev]; !ok {
var cliclient kube.CLIClient
if f.version != "" {
cliclient = kube.NewFakeClientWithVersion(f.version, f.objects...)
} else {
cliclient = kube.NewFakeClient(f.objects...)
}
if rev != "" {
kube.SetRevisionForTest(cliclient, rev)
}
c := MockClient{
CLIClient: cliclient,
Results: f.results,
}
f.clients[rev] = c
}
return f.clients[rev], nil
}
func (f *fakeInstance) CLIClient() (kube.CLIClient, error) {
return f.CLIClientWithRevision("")
}
func (f *fakeInstance) InferPodInfoFromTypedResource(name, namespace string) (pod string, ns string, err error) {
client, err := f.CLIClient()
if err != nil {
return "", "", err
}
return handlers.InferPodInfoFromTypedResource(name, f.NamespaceOrDefault(namespace), MakeKubeFactory(client))
}
func (f *fakeInstance) InferPodsFromTypedResource(name, namespace string) ([]string, string, error) {
client, err := f.CLIClient()
if err != nil {
return nil, "", err
}
return handlers.InferPodsFromTypedResource(name, f.NamespaceOrDefault(namespace), MakeKubeFactory(client))
}
func (f *fakeInstance) NamespaceOrDefault(namespace string) string {
return handleNamespace(namespace, f.rootFlags.defaultNamespace)
}
func (f *fakeInstance) Namespace() string {
return f.rootFlags.Namespace()
}
func (f *fakeInstance) IstioNamespace() string {
return f.rootFlags.IstioNamespace()
}
type NewFakeContextOption struct {
Namespace string
IstioNamespace string
Results map[string][]byte
// Objects are the objects to be applied to the fake client
Objects []runtime.Object
// Version is the version of the fake client
Version string
}
func NewFakeContext(opts *NewFakeContextOption) Context {
if opts == nil {
opts = &NewFakeContextOption{}
}
ns := opts.Namespace
ins := opts.IstioNamespace
return &fakeInstance{
clients: map[string]kube.CLIClient{},
rootFlags: &RootFlags{
kubeconfig: ptr.Of[string](""),
configContext: ptr.Of[string](""),
namespace: &ns,
istioNamespace: &ins,
defaultNamespace: "",
},
results: opts.Results,
objects: opts.Objects,
version: opts.Version,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cli
import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/cli-runtime/pkg/resource"
openapiclient "k8s.io/client-go/openapi"
"k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/util/openapi"
"k8s.io/kubectl/pkg/validation"
"istio.io/istio/pkg/kube"
)
type Factory struct {
kube.PartialFactory
full util.Factory
}
func (f Factory) NewBuilder() *resource.Builder {
return f.full.NewBuilder()
}
func (f Factory) ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) {
return f.full.ClientForMapping(mapping)
}
func (f Factory) UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) {
return f.full.UnstructuredClientForMapping(mapping)
}
func (f Factory) Validator(validationDirective string) (validation.Schema, error) {
return f.full.Validator(validationDirective)
}
func (f Factory) OpenAPISchema() (openapi.Resources, error) {
return f.full.OpenAPISchema()
}
func (f Factory) OpenAPIV3Client() (openapiclient.Client, error) {
return f.full.OpenAPIV3Client()
}
var _ util.Factory = Factory{}
// MakeKubeFactory turns a partial kubetl factory from CLIClient into a full util.Factory
// This is done under istioctl/ to avoid excessive binary bloat in other packages; this pulls in around 10mb of
// dependencies.
var MakeKubeFactory = func(k kube.CLIClient) util.Factory {
kf := k.UtilFactory()
return Factory{
PartialFactory: kf,
full: util.NewFactory(kf),
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cli
import (
"context"
"fmt"
"istio.io/istio/pkg/kube"
)
type MockPortForwarder struct{}
func (m MockPortForwarder) Start() error {
return nil
}
func (m MockPortForwarder) Address() string {
return "localhost:3456"
}
func (m MockPortForwarder) Close() {
}
func (m MockPortForwarder) ErrChan() <-chan error {
return make(chan error)
}
func (m MockPortForwarder) WaitForStop() {
}
var _ kube.PortForwarder = MockPortForwarder{}
type MockClient struct {
// Results is a map of podName to the results of the expected test on the pod
Results map[string][]byte
kube.CLIClient
}
func (c MockClient) NewPortForwarder(_, _, _ string, _, _ int) (kube.PortForwarder, error) {
return MockPortForwarder{}, nil
}
func (c MockClient) AllDiscoveryDo(_ context.Context, _, _ string) (map[string][]byte, error) {
return c.Results, nil
}
func (c MockClient) EnvoyDo(ctx context.Context, podName, podNamespace, method, path string) ([]byte, error) {
results, ok := c.Results[podName]
if !ok {
return nil, fmt.Errorf("unable to retrieve Pod: pods %q not found", podName)
}
return results, nil
}
func (c MockClient) EnvoyDoWithPort(ctx context.Context, podName, podNamespace, method, path string, port int) ([]byte, error) {
results, ok := c.Results[podName]
if !ok {
return nil, fmt.Errorf("unable to retrieve Pod: pods %q not found", podName)
}
return results, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cli
import (
"github.com/spf13/pflag"
"github.com/spf13/viper"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/clientcmd"
"istio.io/istio/pkg/ptr"
)
const (
FlagKubeConfig = "kubeconfig"
FlagContext = "context"
FlagNamespace = "namespace"
FlagIstioNamespace = "istioNamespace"
)
type RootFlags struct {
kubeconfig *string
configContext *string
namespace *string
istioNamespace *string
defaultNamespace string
}
func AddRootFlags(flags *pflag.FlagSet) *RootFlags {
r := &RootFlags{
kubeconfig: ptr.Of[string](""),
configContext: ptr.Of[string](""),
namespace: ptr.Of[string](""),
istioNamespace: ptr.Of[string](""),
}
flags.StringVarP(r.kubeconfig, FlagKubeConfig, "c", "",
"Kubernetes configuration file")
flags.StringVar(r.configContext, FlagContext, "",
"Kubernetes configuration context")
flags.StringVarP(r.namespace, FlagNamespace, "n", v1.NamespaceAll,
"Kubernetes namespace")
flags.StringVarP(r.istioNamespace, FlagIstioNamespace, "i", viper.GetString(FlagIstioNamespace),
"Istio system namespace")
return r
}
// Namespace returns the namespace flag value.
func (r *RootFlags) Namespace() string {
return *r.namespace
}
// IstioNamespace returns the istioNamespace flag value.
func (r *RootFlags) IstioNamespace() string {
return *r.istioNamespace
}
// DefaultNamespace returns the default namespace to use.
func (r *RootFlags) DefaultNamespace() string {
if r.defaultNamespace == "" {
r.configureDefaultNamespace()
}
return r.defaultNamespace
}
func (r *RootFlags) configureDefaultNamespace() {
configAccess := clientcmd.NewDefaultPathOptions()
kubeconfig := *r.kubeconfig
if kubeconfig != "" {
// use specified kubeconfig file for the location of the
// config to read
configAccess.GlobalFile = kubeconfig
}
// gets existing kubeconfig or returns new empty config
config, err := configAccess.GetStartingConfig()
if err != nil {
r.defaultNamespace = v1.NamespaceDefault
return
}
// If a specific context was specified, use that. Otherwise, just use the current context from the kube config.
selectedContext := config.CurrentContext
if *r.configContext != "" {
selectedContext = *r.configContext
}
// Use the namespace associated with the selected context as default, if the context has one
context, ok := config.Contexts[selectedContext]
if !ok {
r.defaultNamespace = v1.NamespaceDefault
return
}
if context.Namespace == "" {
r.defaultNamespace = v1.NamespaceDefault
return
}
r.defaultNamespace = context.Namespace
}
// Copyright Istio Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8sversion
import (
"fmt"
goversion "github.com/hashicorp/go-version"
"k8s.io/apimachinery/pkg/version"
"istio.io/istio/operator/pkg/util/clog"
"istio.io/istio/pkg/kube"
pkgVersion "istio.io/istio/pkg/version"
)
const (
// MinK8SVersion is the minimum k8s version required to run this version of Istio
// https://istio.io/docs/setup/platform-setup/
MinK8SVersion = 26
UnSupportedK8SVersionLogMsg = "\nThe Kubernetes version %s is not supported by Istio %s. The minimum supported Kubernetes version is 1.%d.\n" +
"Proceeding with the installation, but you might experience problems. " +
"See https://istio.io/latest/docs/releases/supported-releases/ for a list of supported versions.\n"
)
// CheckKubernetesVersion checks if this Istio version is supported in the k8s version
func CheckKubernetesVersion(versionInfo *version.Info) (bool, error) {
v, err := extractKubernetesVersion(versionInfo)
if err != nil {
return false, err
}
return MinK8SVersion <= v, nil
}
// extractKubernetesVersion returns the Kubernetes minor version. For example, `v1.19.1` will return `19`
func extractKubernetesVersion(versionInfo *version.Info) (int, error) {
ver, err := goversion.NewVersion(versionInfo.String())
if err != nil {
return 0, fmt.Errorf("could not parse %v", err)
}
// Segments provide slice of int eg: v1.19.1 => [1, 19, 1]
num := ver.Segments()[1]
return num, nil
}
// IsK8VersionSupported checks minimum supported Kubernetes version for Istio.
// If the K8s version is not at least the `MinK8SVersion`, it logs a message warning the user that they
// may experience problems if they proceed with the install.
func IsK8VersionSupported(c kube.Client, l clog.Logger) error {
serverVersion, err := c.GetKubernetesVersion()
if err != nil {
return fmt.Errorf("error getting Kubernetes version: %w", err)
}
if !kube.IsAtLeastVersion(c, MinK8SVersion) {
l.LogAndPrintf(UnSupportedK8SVersionLogMsg, serverVersion.GitVersion, pkgVersion.Info.Version, MinK8SVersion)
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tag
import (
"bytes"
"context"
"fmt"
"net/url"
"strings"
admitv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"istio.io/istio/operator/pkg/helm"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/maps"
)
const (
IstioTagLabel = "istio.io/tag"
DefaultRevisionName = "default"
defaultChart = "default"
pilotDiscoveryChart = "istio-control/istio-discovery"
revisionTagTemplateName = "revision-tags.yaml"
vwhTemplateName = "validatingwebhook.yaml"
istioInjectionWebhookSuffix = "sidecar-injector.istio.io"
vwhBaseTemplateName = "istiod-default-validator"
operatorNamespace = "operator.istio.io"
)
// tagWebhookConfig holds config needed to render a tag webhook.
type tagWebhookConfig struct {
Tag string
Revision string
URL string
Path string
CABundle string
IstioNamespace string
Labels map[string]string
Annotations map[string]string
// FailurePolicy records the failure policy to use for the webhook.
FailurePolicy map[string]*admitv1.FailurePolicyType
}
// GenerateOptions is the group of options needed to generate a tag webhook.
type GenerateOptions struct {
// Tag is the name of the revision tag to generate.
Tag string
// Revision is the revision to associate the revision tag with.
Revision string
// WebhookName is an override for the mutating webhook name.
WebhookName string
// ManifestsPath specifies where the manifests to render the mutatingwebhook can be found.
// TODO(Monkeyanator) once we stop using Helm templating remove this.
ManifestsPath string
// Generate determines whether we should just generate the webhooks without applying. This
// applying is not done here, but we are looser with checks when doing generate.
Generate bool
// Overwrite removes analysis checks around existing webhooks.
Overwrite bool
// AutoInjectNamespaces controls, if the sidecars should be injected into all namespaces by default.
AutoInjectNamespaces bool
// CustomLabels are labels to add to the generated webhook.
CustomLabels map[string]string
// UserManaged indicates whether the revision tag is user managed.
// If true, the revision tag will not be affected by the installer.
UserManaged bool
}
// Generate generates the manifests for a revision tag pointed the given revision.
func Generate(ctx context.Context, client kube.Client, opts *GenerateOptions, istioNS string) (string, error) {
// abort if there exists a revision with the target tag name
revWebhookCollisions, err := GetWebhooksWithRevision(ctx, client.Kube(), opts.Tag)
if err != nil {
return "", err
}
if !opts.Generate && !opts.Overwrite &&
len(revWebhookCollisions) > 0 && opts.Tag != DefaultRevisionName {
return "", fmt.Errorf("cannot create revision tag %q: found existing control plane revision with same name", opts.Tag)
}
// find canonical revision webhook to base our tag webhook off of
revWebhooks, err := GetWebhooksWithRevision(ctx, client.Kube(), opts.Revision)
if err != nil {
return "", err
}
if len(revWebhooks) == 0 {
return "", fmt.Errorf("cannot modify tag: cannot find MutatingWebhookConfiguration with revision %q", opts.Revision)
}
if len(revWebhooks) > 1 {
return "", fmt.Errorf("cannot modify tag: found multiple canonical webhooks with revision %q", opts.Revision)
}
whs, err := GetWebhooksWithTag(ctx, client.Kube(), opts.Tag)
if err != nil {
return "", err
}
if len(whs) > 0 && !opts.Overwrite {
return "", fmt.Errorf("revision tag %q already exists, and --overwrite is false", opts.Tag)
}
tagWhConfig, err := tagWebhookConfigFromCanonicalWebhook(revWebhooks[0], opts.Tag, istioNS)
if err != nil {
return "", fmt.Errorf("failed to create tag webhook config: %w", err)
}
tagWhYAML, err := generateMutatingWebhook(tagWhConfig, opts)
if err != nil {
return "", fmt.Errorf("failed to create tag webhook: %w", err)
}
if opts.Tag == DefaultRevisionName {
if !opts.Generate {
// deactivate other istio-injection=enabled injectors if using default revisions.
err := DeactivateIstioInjectionWebhook(ctx, client.Kube())
if err != nil {
return "", fmt.Errorf("failed deactivating existing default revision: %w", err)
}
}
// TODO(Monkeyanator) should extract the validationURL from revision's validating webhook here. However,
// to ease complexity when pointing default to revision without per-revision validating webhook,
// instead grab the endpoint information from the mutating webhook. This is not strictly correct.
validationWhConfig, err := fixWhConfig(client, tagWhConfig)
if err != nil {
return "", fmt.Errorf("failed to create validating webhook config: %w", err)
}
vwhYAML, err := generateValidatingWebhook(validationWhConfig, opts)
if err != nil {
return "", fmt.Errorf("failed to create validating webhook: %w", err)
}
tagWhYAML = fmt.Sprintf(`%s
%s
%s`, tagWhYAML, helm.YAMLSeparator, vwhYAML)
}
return tagWhYAML, nil
}
func fixWhConfig(client kube.Client, whConfig *tagWebhookConfig) (*tagWebhookConfig, error) {
if whConfig.URL != "" {
webhookURL, err := url.Parse(whConfig.URL)
if err == nil {
webhookURL.Path = "/validate"
whConfig.URL = webhookURL.String()
}
}
// ValidatingWebhookConfiguration failurePolicy is managed by Istiod, so if currently we already have a webhook in cluster
// that is set to `Fail` by Istiod, we avoid of setting it back to the default `Ignore`.
vwh, err := client.Kube().AdmissionregistrationV1().ValidatingWebhookConfigurations().
Get(context.Background(), vwhBaseTemplateName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return nil, err
}
if vwh == nil {
return whConfig, nil
}
if whConfig.FailurePolicy == nil {
whConfig.FailurePolicy = map[string]*admitv1.FailurePolicyType{}
}
for _, wh := range vwh.Webhooks {
if wh.FailurePolicy != nil && *wh.FailurePolicy == admitv1.Fail {
whConfig.FailurePolicy[wh.Name] = nil
} else {
whConfig.FailurePolicy[wh.Name] = wh.FailurePolicy
}
}
return whConfig, nil
}
// Create applies the given tag manifests.
func Create(client kube.CLIClient, manifests, ns string) error {
if err := client.ApplyYAMLContents(ns, manifests); err != nil {
return fmt.Errorf("failed to apply tag manifests to cluster: %v", err)
}
return nil
}
// generateValidatingWebhook renders a validating webhook configuration from the given tagWebhookConfig.
func generateValidatingWebhook(config *tagWebhookConfig, opts *GenerateOptions) (string, error) {
r := helm.NewHelmRenderer(opts.ManifestsPath, defaultChart, "Pilot", config.IstioNamespace, nil)
if err := r.Run(); err != nil {
return "", fmt.Errorf("failed running Helm renderer: %v", err)
}
values := fmt.Sprintf(`
global:
istioNamespace: %s
revision: %q
base:
validationURL: %s
`, config.IstioNamespace, config.Revision, config.URL)
validatingWebhookYAML, err := r.RenderManifestFiltered(values, func(tmplName string) bool {
return strings.Contains(tmplName, vwhTemplateName)
})
if err != nil {
return "", fmt.Errorf("failed rendering istio-control manifest: %v", err)
}
scheme := runtime.NewScheme()
codecFactory := serializer.NewCodecFactory(scheme)
deserializer := codecFactory.UniversalDeserializer()
serializer := json.NewSerializerWithOptions(
json.DefaultMetaFactory, nil, nil, json.SerializerOptions{
Yaml: true,
Pretty: true,
Strict: true,
})
whObject, _, err := deserializer.Decode([]byte(validatingWebhookYAML), nil, &admitv1.ValidatingWebhookConfiguration{})
if err != nil {
return "", fmt.Errorf("could not decode generated webhook: %w", err)
}
decodedWh := whObject.(*admitv1.ValidatingWebhookConfiguration)
for i := range decodedWh.Webhooks {
decodedWh.Webhooks[i].ClientConfig.CABundle = []byte(config.CABundle)
}
decodedWh.Labels = generateLabels(decodedWh.Labels, config.Labels, opts.CustomLabels, opts.UserManaged)
decodedWh.Annotations = maps.MergeCopy(decodedWh.Annotations, config.Annotations)
for i := range decodedWh.Webhooks {
if failurePolicy, ok := config.FailurePolicy[decodedWh.Webhooks[i].Name]; ok {
decodedWh.Webhooks[i].FailurePolicy = failurePolicy
}
}
whBuf := new(bytes.Buffer)
if err = serializer.Encode(decodedWh, whBuf); err != nil {
return "", err
}
return whBuf.String(), nil
}
func generateLabels(whLabels, curLabels, customLabels map[string]string, userManaged bool) map[string]string {
whLabels = maps.MergeCopy(whLabels, curLabels)
whLabels = maps.MergeCopy(whLabels, customLabels)
if userManaged {
for label := range whLabels {
if strings.Contains(label, operatorNamespace) {
delete(whLabels, label)
}
}
}
return whLabels
}
// generateMutatingWebhook renders a mutating webhook configuration from the given tagWebhookConfig.
func generateMutatingWebhook(config *tagWebhookConfig, opts *GenerateOptions) (string, error) {
r := helm.NewHelmRenderer(opts.ManifestsPath, pilotDiscoveryChart, "Pilot", config.IstioNamespace, nil)
if err := r.Run(); err != nil {
return "", fmt.Errorf("failed running Helm renderer: %v", err)
}
values := fmt.Sprintf(`
revision: %q
revisionTags:
- %s
sidecarInjectorWebhook:
enableNamespacesByDefault: %t
objectSelector:
enabled: true
autoInject: true
istiodRemote:
injectionURL: %s
`, config.Revision, config.Tag, opts.AutoInjectNamespaces, config.URL)
tagWebhookYaml, err := r.RenderManifestFiltered(values, func(tmplName string) bool {
return strings.Contains(tmplName, revisionTagTemplateName)
})
if err != nil {
return "", fmt.Errorf("failed rendering istio-control manifest: %v", err)
}
scheme := runtime.NewScheme()
codecFactory := serializer.NewCodecFactory(scheme)
deserializer := codecFactory.UniversalDeserializer()
serializer := json.NewSerializerWithOptions(
json.DefaultMetaFactory, nil, nil, json.SerializerOptions{
Yaml: true,
Pretty: true,
Strict: true,
})
whObject, _, err := deserializer.Decode([]byte(tagWebhookYaml), nil, &admitv1.MutatingWebhookConfiguration{})
if err != nil {
return "", fmt.Errorf("could not decode generated webhook: %w", err)
}
decodedWh := whObject.(*admitv1.MutatingWebhookConfiguration)
for i := range decodedWh.Webhooks {
decodedWh.Webhooks[i].ClientConfig.CABundle = []byte(config.CABundle)
if decodedWh.Webhooks[i].ClientConfig.Service != nil {
decodedWh.Webhooks[i].ClientConfig.Service.Path = &config.Path
}
}
if opts.WebhookName != "" {
decodedWh.Name = opts.WebhookName
}
decodedWh.Labels = generateLabels(decodedWh.Labels, config.Labels, opts.CustomLabels, opts.UserManaged)
decodedWh.Annotations = maps.MergeCopy(decodedWh.Annotations, config.Annotations)
whBuf := new(bytes.Buffer)
if err = serializer.Encode(decodedWh, whBuf); err != nil {
return "", err
}
return whBuf.String(), nil
}
// tagWebhookConfigFromCanonicalWebhook parses configuration needed to create tag webhook from existing revision webhook.
func tagWebhookConfigFromCanonicalWebhook(wh admitv1.MutatingWebhookConfiguration, tagName, istioNS string) (*tagWebhookConfig, error) {
rev, err := GetWebhookRevision(wh)
if err != nil {
return nil, err
}
// if the revision is "default", render templates with an empty revision
if rev == DefaultRevisionName {
rev = ""
}
var injectionURL, caBundle, path string
found := false
for _, w := range wh.Webhooks {
if strings.HasSuffix(w.Name, istioInjectionWebhookSuffix) {
found = true
caBundle = string(w.ClientConfig.CABundle)
if w.ClientConfig.URL != nil {
injectionURL = *w.ClientConfig.URL
}
if w.ClientConfig.Service != nil {
if w.ClientConfig.Service.Path != nil {
path = *w.ClientConfig.Service.Path
}
}
break
}
}
if !found {
return nil, fmt.Errorf("could not find sidecar-injector webhook in canonical webhook %q", wh.Name)
}
// Here we filter out the "app" label, to generate a general label set for the incoming generated
// MutatingWebhookConfiguration and ValidatingWebhookConfiguration. The app of the webhooks are not general
// since they are functioned differently with different name.
// The filtered common labels are then added to the incoming generated
// webhooks, which aids in managing these webhooks via the istioctl/operator.
filteredLabels := make(map[string]string)
for k, v := range wh.Labels {
if k != "app" {
filteredLabels[k] = v
}
}
return &tagWebhookConfig{
Tag: tagName,
Revision: rev,
URL: injectionURL,
CABundle: caBundle,
IstioNamespace: istioNS,
Path: path,
Labels: filteredLabels,
Annotations: wh.Annotations,
FailurePolicy: map[string]*admitv1.FailurePolicyType{},
}, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tag
import (
"context"
"fmt"
admitv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/api/label"
iopv1alpha1 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/pkg/kube"
)
// PodFilteredInfo represents a small subset of fields from
// Pod object in Kubernetes. Exposed for integration test
type PodFilteredInfo struct {
Namespace string `json:"namespace"`
Name string `json:"name"`
Address string `json:"address"`
Status corev1.PodPhase `json:"status"`
Age string `json:"age"`
}
// IstioOperatorCRInfo represents a tiny subset of fields from
// IstioOperator CR. This structure is used for displaying data.
// Exposed for integration test
type IstioOperatorCRInfo struct {
IOP *iopv1alpha1.IstioOperator `json:"-"`
Namespace string `json:"namespace"`
Name string `json:"name"`
Profile string `json:"profile"`
Components []string `json:"components,omitempty"`
Customizations []IopDiff `json:"customizations,omitempty"`
}
type IopDiff struct {
Path string `json:"path"`
Value string `json:"value"`
}
// MutatingWebhookConfigInfo represents a tiny subset of fields from
// MutatingWebhookConfiguration kubernetes object. This is exposed for
// integration tests only
type MutatingWebhookConfigInfo struct {
Name string `json:"name"`
Revision string `json:"revision"`
Tag string `json:"tag,omitempty"`
}
// NsInfo represents namespace related information like pods running there.
// It is used to display data and is exposed for integration tests.
type NsInfo struct {
Name string `json:"name,omitempty"`
Pods []*PodFilteredInfo `json:"pods,omitempty"`
}
// RevisionDescription is used to display revision related information.
// This is exposed for integration tests.
type RevisionDescription struct {
IstioOperatorCRs []*IstioOperatorCRInfo `json:"istio_operator_crs,omitempty"`
Webhooks []*MutatingWebhookConfigInfo `json:"webhooks,omitempty"`
ControlPlanePods []*PodFilteredInfo `json:"control_plane_pods,omitempty"`
IngressGatewayPods []*PodFilteredInfo `json:"ingess_gateways,omitempty"`
EgressGatewayPods []*PodFilteredInfo `json:"egress_gateways,omitempty"`
NamespaceSummary map[string]*NsInfo `json:"namespace_summary,omitempty"`
}
func ListRevisionDescriptions(client kube.CLIClient) (map[string]*RevisionDescription, error) {
revisions := map[string]*RevisionDescription{}
// Get a list of control planes which are installed in remote clusters
// In this case, it is possible that they only have webhooks installed.
webhooks, err := Webhooks(context.Background(), client)
if err != nil {
return nil, fmt.Errorf("error while listing mutating webhooks: %v", err)
}
for _, hook := range webhooks {
rev := renderWithDefault(hook.GetLabels()[label.IoIstioRev.Name], DefaultRevisionName)
tagLabel := hook.GetLabels()[IstioTagLabel]
ri, revPresent := revisions[rev]
if revPresent {
if tagLabel != "" {
ri.Webhooks = append(ri.Webhooks, &MutatingWebhookConfigInfo{
Name: hook.Name,
Revision: rev,
Tag: tagLabel,
})
}
} else {
revisions[rev] = &RevisionDescription{
IstioOperatorCRs: []*IstioOperatorCRInfo{},
Webhooks: []*MutatingWebhookConfigInfo{{Name: hook.Name, Revision: rev, Tag: tagLabel}},
}
}
}
return revisions, nil
}
func Webhooks(ctx context.Context, client kube.CLIClient) ([]admitv1.MutatingWebhookConfiguration, error) {
hooks, err := client.Kube().AdmissionregistrationV1().MutatingWebhookConfigurations().List(ctx, metav1.ListOptions{})
if err != nil {
return []admitv1.MutatingWebhookConfiguration{}, err
}
return hooks.Items, nil
}
func renderWithDefault(s, def string) string {
if s != "" {
return s
}
return def
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tag
import (
"cmp"
"context"
"encoding/json"
"fmt"
"io"
"strings"
"text/tabwriter"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"istio.io/istio/istioctl/pkg/cli"
"istio.io/istio/istioctl/pkg/util"
"istio.io/istio/istioctl/pkg/util/formatting"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/webhook"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/analysis/local"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/slices"
)
const (
// help strings and long formatted user outputs
skipConfirmationFlagHelpStr = `The skipConfirmation determines whether the user is prompted for confirmation.
If set to true, the user is not prompted and a Yes response is assumed in all cases.`
overrideHelpStr = `If true, allow revision tags to be overwritten, otherwise reject revision tag updates that
overwrite existing revision tags.`
revisionHelpStr = "Control plane revision to reference from a given revision tag"
tagCreatedStr = `Revision tag %q created, referencing control plane revision %q. To enable injection using this
revision tag, use 'kubectl label namespace <NAMESPACE> istio.io/rev=%s'
`
webhookNameHelpStr = "Name to use for a revision tag's mutating webhook configuration."
autoInjectNamespacesHelpStr = "If set to true, the sidecars should be automatically injected into all namespaces by default"
)
// options for CLI
var (
// revision to point tag webhook at
revision = ""
manifestsPath = ""
overwrite = false
skipConfirmation = false
webhookName = ""
autoInjectNamespaces = false
outputFormat = util.TableFormat
)
type tagDescription struct {
Tag string `json:"tag"`
Revision string `json:"revision"`
Namespaces []string `json:"namespaces"`
}
func TagCommand(ctx cli.Context) *cobra.Command {
cmd := &cobra.Command{
Use: "tag",
Short: "Command group used to interact with revision tags",
Long: `Command group used to interact with revision tags. Revision tags allow for the creation of mutable aliases
referring to control plane revisions for sidecar injection.
With revision tags, rather than relabeling a namespace from "istio.io/rev=revision-a" to "istio.io/rev=revision-b" to
change which control plane revision handles injection, it's possible to create a revision tag "prod" and label our
namespace "istio.io/rev=prod". The "prod" revision tag could point to "1-7-6" initially and then be changed to point to "1-8-1"
at some later point.
This allows operators to change which Istio control plane revision should handle injection for a namespace or set of namespaces
without manual relabeling of the "istio.io/rev" tag.
`,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 0 {
return fmt.Errorf("unknown subcommand %q", args[0])
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
cmd.HelpFunc()(cmd, args)
return nil
},
}
cmd.AddCommand(tagSetCommand(ctx))
cmd.AddCommand(tagGenerateCommand(ctx))
cmd.AddCommand(tagListCommand(ctx))
cmd.AddCommand(tagRemoveCommand(ctx))
return cmd
}
func tagSetCommand(ctx cli.Context) *cobra.Command {
cmd := &cobra.Command{
Use: "set <revision-tag>",
Short: "Create or modify revision tags",
Long: `Create or modify revision tags. Tag an Istio control plane revision for use with namespace istio.io/rev
injection labels.`,
Example: ` # Create a revision tag from the "1-8-0" revision
istioctl tag set prod --revision 1-8-0
# Point namespace "test-ns" at the revision pointed to by the "prod" revision tag
kubectl label ns test-ns istio.io/rev=prod
# Change the revision tag to reference the "1-8-1" revision
istioctl tag set prod --revision 1-8-1 --overwrite
# Make revision "1-8-1" the default revision, both resulting in that revision handling injection for "istio-injection=enabled"
# and validating resources cluster-wide
istioctl tag set default --revision 1-8-1
# Rollout namespace "test-ns" to update workloads to the "1-8-1" revision
kubectl rollout restart deployments -n test-ns
`,
SuggestFor: []string{"create"},
Args: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return fmt.Errorf("must provide a tag for modification")
}
if len(args) > 1 {
return fmt.Errorf("must provide a single tag for creation")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
kubeClient, err := ctx.CLIClient()
if err != nil {
return fmt.Errorf("failed to create Kubernetes client: %v", err)
}
return setTag(context.Background(), kubeClient, args[0], revision, ctx.IstioNamespace(), false, cmd.OutOrStdout(), cmd.OutOrStderr())
},
}
cmd.PersistentFlags().BoolVar(&overwrite, "overwrite", false, overrideHelpStr)
cmd.PersistentFlags().StringVarP(&manifestsPath, "manifests", "d", "", util.ManifestsFlagHelpStr)
cmd.PersistentFlags().BoolVarP(&skipConfirmation, "skip-confirmation", "y", false, skipConfirmationFlagHelpStr)
cmd.PersistentFlags().StringVarP(&revision, "revision", "r", "", revisionHelpStr)
cmd.PersistentFlags().StringVarP(&webhookName, "webhook-name", "", "", webhookNameHelpStr)
cmd.PersistentFlags().BoolVar(&autoInjectNamespaces, "auto-inject-namespaces", false, autoInjectNamespacesHelpStr)
_ = cmd.MarkPersistentFlagRequired("revision")
return cmd
}
func tagGenerateCommand(ctx cli.Context) *cobra.Command {
cmd := &cobra.Command{
Use: "generate <revision-tag>",
Short: "Generate configuration for a revision tag to stdout",
Long: `Create a revision tag and output to the command's stdout. Tag an Istio control plane revision for use with namespace istio.io/rev
injection labels.`,
Example: ` # Create a revision tag from the "1-8-0" revision
istioctl tag generate prod --revision 1-8-0 > tag.yaml
# Apply the tag to cluster
kubectl apply -f tag.yaml
# Point namespace "test-ns" at the revision pointed to by the "prod" revision tag
kubectl label ns test-ns istio.io/rev=prod
# Rollout namespace "test-ns" to update workloads to the "1-8-0" revision
kubectl rollout restart deployments -n test-ns
`,
Args: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return fmt.Errorf("must provide a tag for modification")
}
if len(args) > 1 {
return fmt.Errorf("must provide a single tag for creation")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
kubeClient, err := ctx.CLIClient()
if err != nil {
return fmt.Errorf("failed to create Kubernetes client: %v", err)
}
return setTag(context.Background(), kubeClient, args[0], revision, ctx.IstioNamespace(), true, cmd.OutOrStdout(), cmd.OutOrStderr())
},
}
cmd.PersistentFlags().BoolVar(&overwrite, "overwrite", false, overrideHelpStr)
cmd.PersistentFlags().StringVarP(&manifestsPath, "manifests", "d", "", util.ManifestsFlagHelpStr)
cmd.PersistentFlags().BoolVarP(&skipConfirmation, "skip-confirmation", "y", false, skipConfirmationFlagHelpStr)
cmd.PersistentFlags().StringVarP(&revision, "revision", "r", "", revisionHelpStr)
cmd.PersistentFlags().StringVarP(&webhookName, "webhook-name", "", "", webhookNameHelpStr)
cmd.PersistentFlags().BoolVar(&autoInjectNamespaces, "auto-inject-namespaces", false, autoInjectNamespacesHelpStr)
_ = cmd.MarkPersistentFlagRequired("revision")
return cmd
}
func tagListCommand(ctx cli.Context) *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: "List existing revision tags",
Example: "istioctl tag list",
Aliases: []string{"show"},
Args: func(cmd *cobra.Command, args []string) error {
if len(args) != 0 {
return fmt.Errorf("tag list command does not accept arguments")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
kubeClient, err := ctx.CLIClient()
if err != nil {
return fmt.Errorf("failed to create Kubernetes client: %v", err)
}
return listTags(context.Background(), kubeClient.Kube(), cmd.OutOrStdout())
},
}
cmd.PersistentFlags().StringVarP(&outputFormat, "output", "o", util.TableFormat, "Output format for tag description "+
"(available formats: table,json)")
return cmd
}
func tagRemoveCommand(ctx cli.Context) *cobra.Command {
cmd := &cobra.Command{
Use: "remove <revision-tag>",
Short: "Remove Istio control plane revision tag",
Long: `Remove Istio control plane revision tag.
Removing a revision tag should be done with care. Removing a revision tag will disrupt sidecar injection in namespaces
that reference the tag in an "istio.io/rev" label. Verify that there are no remaining namespaces referencing a
revision tag before removing using the "istioctl tag list" command.
`,
Example: ` # Remove the revision tag "prod"
istioctl tag remove prod
`,
Aliases: []string{"delete"},
Args: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return fmt.Errorf("must provide a tag for removal")
}
if len(args) > 1 {
return fmt.Errorf("must provide a single tag for removal")
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
kubeClient, err := ctx.CLIClient()
if err != nil {
return fmt.Errorf("failed to create Kubernetes client: %v", err)
}
return removeTag(context.Background(), kubeClient.Kube(), args[0], skipConfirmation, cmd.OutOrStdout())
},
}
cmd.PersistentFlags().BoolVarP(&skipConfirmation, "skip-confirmation", "y", false, skipConfirmationFlagHelpStr)
return cmd
}
// setTag creates or modifies a revision tag.
func setTag(ctx context.Context, kubeClient kube.CLIClient, tagName, revision, istioNS string, generate bool, w, stderr io.Writer) error {
opts := &GenerateOptions{
Tag: tagName,
Revision: revision,
WebhookName: webhookName,
ManifestsPath: manifestsPath,
Generate: generate,
Overwrite: overwrite,
AutoInjectNamespaces: autoInjectNamespaces,
UserManaged: true,
}
tagWhYAML, err := Generate(ctx, kubeClient, opts, istioNS)
if err != nil {
return err
}
// Check the newly generated webhook does not conflict with existing ones.
resName := webhookName
if resName == "" {
resName = fmt.Sprintf("%s-%s", "istio-revision-tag", tagName)
}
if err := analyzeWebhook(resName, istioNS, tagWhYAML, revision, kubeClient.RESTConfig()); err != nil {
// if we have a conflict, we will fail. If --skip-confirmation is set, we will continue with a
// warning; when actually applying we will also confirm to ensure the user does not see the
// warning *after* it has applied
if !skipConfirmation {
_, _ = stderr.Write([]byte(err.Error()))
if !generate {
if !util.Confirm("Apply anyways? [y/N]", w) {
return nil
}
}
}
}
if generate {
_, err := w.Write([]byte(tagWhYAML))
if err != nil {
return err
}
return nil
}
if err := Create(kubeClient, tagWhYAML, istioNS); err != nil {
return fmt.Errorf("failed to apply tag webhook MutatingWebhookConfiguration to cluster: %v", err)
}
fmt.Fprintf(w, tagCreatedStr, tagName, revision, tagName)
return nil
}
func analyzeWebhook(name, istioNamespace, wh, revision string, config *rest.Config) error {
sa := local.NewSourceAnalyzer(analysis.Combine("webhook", &webhook.Analyzer{}), "", resource.Namespace(istioNamespace), nil)
if err := sa.AddReaderKubeSource([]local.ReaderSource{{Name: "", Reader: strings.NewReader(wh)}}); err != nil {
return err
}
k, err := kube.NewClient(kube.NewClientConfigForRestConfig(config), "")
if err != nil {
return err
}
sa.AddRunningKubeSourceWithRevision(k, revision)
res, err := sa.Analyze(make(chan struct{}))
if err != nil {
return err
}
relevantMessages := diag.Messages{}
for _, msg := range res.Messages.FilterOutLowerThan(diag.Error) {
if msg.Resource.Metadata.FullName.Name == resource.LocalName(name) {
relevantMessages = append(relevantMessages, msg)
}
}
if len(relevantMessages) > 0 {
o, err := formatting.Print(relevantMessages, formatting.LogFormat, false)
if err != nil {
return err
}
// nolint
return fmt.Errorf("creating tag would conflict, pass --skip-confirmation to proceed:\n%v\n", o)
}
return nil
}
// removeTag removes an existing revision tag.
func removeTag(ctx context.Context, kubeClient kubernetes.Interface, tagName string, skipConfirmation bool, w io.Writer) error {
webhooks, err := GetWebhooksWithTag(ctx, kubeClient, tagName)
if err != nil {
return fmt.Errorf("failed to retrieve tag with name %s: %v", tagName, err)
}
if len(webhooks) == 0 {
return fmt.Errorf("cannot remove tag %q: cannot find MutatingWebhookConfiguration for tag", tagName)
}
taggedNamespaces, err := GetNamespacesWithTag(ctx, kubeClient, tagName)
if err != nil {
return fmt.Errorf("failed to retrieve namespaces dependent on tag %q", tagName)
}
// warn user if deleting a tag that still has namespaces pointed to it
if len(taggedNamespaces) > 0 && !skipConfirmation {
if !util.Confirm(buildDeleteTagConfirmation(tagName, taggedNamespaces), w) {
fmt.Fprintf(w, "Aborting operation.\n")
return nil
}
}
// proceed with webhook deletion
err = DeleteTagWebhooks(ctx, kubeClient, tagName)
if err != nil {
return fmt.Errorf("failed to delete Istio revision tag MutatingConfigurationWebhook: %v", err)
}
fmt.Fprintf(w, "Revision tag %s removed\n", tagName)
return nil
}
type uniqTag struct {
revision, tag string
}
// listTags lists existing revision.
func listTags(ctx context.Context, kubeClient kubernetes.Interface, writer io.Writer) error {
tagWebhooks, err := GetRevisionWebhooks(ctx, kubeClient)
if err != nil {
return fmt.Errorf("failed to retrieve revision tags: %v", err)
}
if len(tagWebhooks) == 0 {
fmt.Fprintf(writer, "No Istio revision tag MutatingWebhookConfigurations to list\n")
return nil
}
rawTags := map[uniqTag]tagDescription{}
for _, wh := range tagWebhooks {
tagName := GetWebhookTagName(wh)
tagRevision, err := GetWebhookRevision(wh)
if err != nil {
return fmt.Errorf("error parsing revision from webhook %q: %v", wh.Name, err)
}
tagNamespaces, err := GetNamespacesWithTag(ctx, kubeClient, tagName)
if err != nil {
return fmt.Errorf("error retrieving namespaces for tag %q: %v", tagName, err)
}
tagDesc := tagDescription{
Tag: tagName,
Revision: tagRevision,
Namespaces: tagNamespaces,
}
key := uniqTag{
revision: tagRevision,
tag: tagName,
}
rawTags[key] = tagDesc
}
for k := range rawTags {
if k.tag != "" {
delete(rawTags, uniqTag{revision: k.revision})
}
}
tags := slices.SortFunc(maps.Values(rawTags), func(a, b tagDescription) int {
if r := cmp.Compare(a.Revision, b.Revision); r != 0 {
return r
}
return cmp.Compare(a.Tag, b.Tag)
})
switch outputFormat {
case util.JSONFormat:
return PrintJSON(writer, tags)
case util.TableFormat:
default:
return fmt.Errorf("unknown format: %s", outputFormat)
}
w := new(tabwriter.Writer).Init(writer, 0, 8, 1, ' ', 0)
fmt.Fprintln(w, "TAG\tREVISION\tNAMESPACES")
for _, t := range tags {
fmt.Fprintf(w, "%s\t%s\t%s\n", t.Tag, t.Revision, strings.Join(t.Namespaces, ","))
}
return w.Flush()
}
func PrintJSON(w io.Writer, res any) error {
out, err := json.MarshalIndent(res, "", "\t")
if err != nil {
return fmt.Errorf("error while marshaling to JSON: %v", err)
}
fmt.Fprintln(w, string(out))
return nil
}
// buildDeleteTagConfirmation takes a list of webhooks and creates a message prompting confirmation for their deletion.
func buildDeleteTagConfirmation(tag string, taggedNamespaces []string) string {
var sb strings.Builder
base := fmt.Sprintf("Caution, found %d namespace(s) still injected by tag %q:", len(taggedNamespaces), tag)
sb.WriteString(base)
for _, ns := range taggedNamespaces {
sb.WriteString(" " + ns)
}
sb.WriteString("\nProceed with operation? [y/N]")
return sb.String()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tag
import (
"context"
"fmt"
"github.com/hashicorp/go-multierror"
admitv1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"istio.io/api/label"
"istio.io/istio/istioctl/pkg/util"
)
func GetRevisionWebhooks(ctx context.Context, client kubernetes.Interface) ([]admitv1.MutatingWebhookConfiguration, error) {
webhooks, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(ctx, metav1.ListOptions{
LabelSelector: label.IoIstioRev.Name,
})
if err != nil {
return nil, err
}
return webhooks.Items, nil
}
// GetWebhooksWithTag returns webhooks tagged with istio.io/tag=<tag>.
func GetWebhooksWithTag(ctx context.Context, client kubernetes.Interface, tag string) ([]admitv1.MutatingWebhookConfiguration, error) {
webhooks, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", IstioTagLabel, tag),
})
if err != nil {
return nil, err
}
return webhooks.Items, nil
}
// GetWebhooksWithRevision returns webhooks tagged with istio.io/rev=<rev> and NOT TAGGED with istio.io/tag.
// this retrieves the webhook created at revision installation rather than tag webhooks
func GetWebhooksWithRevision(ctx context.Context, client kubernetes.Interface, rev string) ([]admitv1.MutatingWebhookConfiguration, error) {
webhooks, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s,!%s", label.IoIstioRev.Name, rev, IstioTagLabel),
})
if err != nil {
return nil, err
}
return webhooks.Items, nil
}
// GetNamespacesWithTag retrieves all namespaces pointed at the given tag.
func GetNamespacesWithTag(ctx context.Context, client kubernetes.Interface, tag string) ([]string, error) {
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{
LabelSelector: fmt.Sprintf("%s=%s", label.IoIstioRev.Name, tag),
})
if err != nil {
return nil, err
}
nsNames := make([]string, len(namespaces.Items))
for i, ns := range namespaces.Items {
nsNames[i] = ns.Name
}
return nsNames, nil
}
// GetWebhookTagName extracts tag name from webhook object.
func GetWebhookTagName(wh admitv1.MutatingWebhookConfiguration) string {
return wh.ObjectMeta.Labels[IstioTagLabel]
}
// GetWebhookRevision extracts tag target revision from webhook object.
func GetWebhookRevision(wh admitv1.MutatingWebhookConfiguration) (string, error) {
if tagName, ok := wh.ObjectMeta.Labels[label.IoIstioRev.Name]; ok {
return tagName, nil
}
return "", fmt.Errorf("could not extract tag revision from webhook")
}
// DeleteTagWebhooks deletes the given webhooks.
func DeleteTagWebhooks(ctx context.Context, client kubernetes.Interface, tag string) error {
webhooks, err := GetWebhooksWithTag(ctx, client, tag)
if err != nil {
return err
}
var result error
for _, wh := range webhooks {
result = multierror.Append(result, client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(ctx, wh.Name, metav1.DeleteOptions{})).ErrorOrNil()
}
return result
}
// PreviousInstallExists checks whether there is an existing Istio installation. Should be used in installer when deciding
// whether to make an installation the default.
func PreviousInstallExists(ctx context.Context, client kubernetes.Interface) bool {
mwhs, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(ctx, metav1.ListOptions{
LabelSelector: "app=sidecar-injector",
})
if err != nil {
return false
}
return len(mwhs.Items) > 0
}
// DeactivateIstioInjectionWebhook deactivates the istio-injection webhook from the given MutatingWebhookConfiguration if exists.
// used rather than just deleting the webhook since we want to keep it around after changing the default so user can later
// switch back to it. This is a hack but it is meant to cover a corner case where a user wants to migrate from a non-revisioned
// old version and then later decides to switch back to the old revision again.
func DeactivateIstioInjectionWebhook(ctx context.Context, client kubernetes.Interface) error {
whs, err := GetWebhooksWithRevision(ctx, client, DefaultRevisionName)
if err != nil {
return err
}
if len(whs) == 0 {
// no revision with default, no action required.
return nil
}
if len(whs) > 1 {
return fmt.Errorf("expected a single webhook for default revision")
}
webhook := whs[0]
for i := range webhook.Webhooks {
wh := webhook.Webhooks[i]
// this is an abomination, but if this isn't a per-revision webhook, we want to make it ineffectual
// without deleting it. Add a nonsense match.
wh.NamespaceSelector = util.NeverMatch
wh.ObjectSelector = util.NeverMatch
webhook.Webhooks[i] = wh
}
admit := client.AdmissionregistrationV1().MutatingWebhookConfigurations()
_, err = admit.Update(ctx, &webhook, metav1.UpdateOptions{})
if err != nil {
return err
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"io"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
binversion "istio.io/istio/operator/version"
)
var NeverMatch = &metav1.LabelSelector{
MatchLabels: map[string]string{
"istio.io/deactivated": "never-match",
},
}
var ManifestsFlagHelpStr = `Specify a path to a directory of charts and profiles
(e.g. ~/Downloads/istio-` + binversion.OperatorVersionString + `/manifests).`
// CommandParseError distinguishes an error parsing istioctl CLI arguments from an error processing
type CommandParseError struct {
Err error
}
func (c CommandParseError) Error() string {
return c.Err.Error()
}
// Confirm waits for a user to confirm with the supplied message.
func Confirm(msg string, writer io.Writer) bool {
for {
_, _ = fmt.Fprintf(writer, "%s ", msg)
var response string
_, err := fmt.Scanln(&response)
if err != nil {
return false
}
switch strings.ToUpper(response) {
case "Y", "YES":
return true
case "N", "NO":
return false
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configdump
import (
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
)
// GetBootstrapConfigDump retrieves the bootstrap config dump from the ConfigDump
func (w *Wrapper) GetBootstrapConfigDump() (*admin.BootstrapConfigDump, error) {
bootstrapDumpAny, err := w.getSection(bootstrap)
if err != nil {
return nil, err
}
bootstrapDump := &admin.BootstrapConfigDump{}
err = bootstrapDumpAny.UnmarshalTo(bootstrapDump)
if err != nil {
return nil, err
}
return bootstrapDump, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configdump
import (
"sort"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
v3 "istio.io/istio/pilot/pkg/xds/v3"
)
// GetDynamicClusterDump retrieves a cluster dump with just dynamic active clusters in it
func (w *Wrapper) GetDynamicClusterDump(stripVersions bool) (*admin.ClustersConfigDump, error) {
clusterDump, err := w.GetClusterConfigDump()
if err != nil {
return nil, err
}
dac := clusterDump.GetDynamicActiveClusters()
// Allow sorting to work even if we don't have the exact same type
for i := range dac {
dac[i].Cluster.TypeUrl = v3.ClusterType
}
sort.Slice(dac, func(i, j int) bool {
cluster := &cluster.Cluster{}
err = dac[i].Cluster.UnmarshalTo(cluster)
if err != nil {
return false
}
name := cluster.Name
err = dac[j].Cluster.UnmarshalTo(cluster)
if err != nil {
return false
}
return name < cluster.Name
})
if stripVersions {
for i := range dac {
dac[i].VersionInfo = ""
dac[i].LastUpdated = nil
}
}
return &admin.ClustersConfigDump{DynamicActiveClusters: dac}, nil
}
// GetClusterConfigDump retrieves the cluster config dump from the ConfigDump
func (w *Wrapper) GetClusterConfigDump() (*admin.ClustersConfigDump, error) {
clusterDumpAny, err := w.getSection(clusters)
if err != nil {
return nil, err
}
clusterDump := &admin.ClustersConfigDump{}
err = clusterDumpAny.UnmarshalTo(clusterDump)
if err != nil {
return nil, err
}
return clusterDump, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configdump
import (
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
)
// GetEcdsConfigDump retrieves the extension config dump from the ConfigDump
func (w *Wrapper) GetEcdsConfigDump() (*admin.EcdsConfigDump, error) {
ecdsDumpAny, err := w.getSections(ecds)
if err != nil {
return nil, err
}
ecdsDump := &admin.EcdsConfigDump{}
for _, dump := range ecdsDumpAny {
ecds := &admin.EcdsConfigDump{}
err = dump.UnmarshalTo(ecds)
if err != nil {
return nil, err
}
ecdsDump.EcdsFilters = append(ecdsDump.EcdsFilters, ecds.EcdsFilters...)
}
if err != nil {
return nil, err
}
return ecdsDump, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configdump
import (
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
)
// GetEndpointsConfigDump retrieves the listener config dump from the ConfigDump
func (w *Wrapper) GetEndpointsConfigDump() (*admin.EndpointsConfigDump, error) {
endpointsDumpAny, err := w.getSection(endpoints)
if err != nil {
return nil, nil
}
endpointsDump := &admin.EndpointsConfigDump{}
err = endpointsDumpAny.UnmarshalTo(endpointsDump)
if err != nil {
return nil, err
}
return endpointsDump, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configdump
import (
"sort"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
v3 "istio.io/istio/pilot/pkg/xds/v3"
)
// GetDynamicListenerDump retrieves a listener dump with just dynamic active listeners in it
func (w *Wrapper) GetDynamicListenerDump(stripVersions bool) (*admin.ListenersConfigDump, error) {
listenerDump, err := w.GetListenerConfigDump()
if err != nil {
return nil, err
}
dal := make([]*admin.ListenersConfigDump_DynamicListener, 0)
for _, l := range listenerDump.DynamicListeners {
// If a listener was reloaded, it would contain both the active and draining state
// delete the draining state for proper comparison
l.DrainingState = nil
if l.ActiveState != nil {
dal = append(dal, l)
}
}
// Support v2 or v3 in config dump. See ads.go:RequestedTypes for more info.
for i := range dal {
dal[i].ActiveState.Listener.TypeUrl = v3.ListenerType
}
sort.Slice(dal, func(i, j int) bool {
l := &listener.Listener{}
err = dal[i].ActiveState.Listener.UnmarshalTo(l)
if err != nil {
return false
}
name := l.Name
err = dal[j].ActiveState.Listener.UnmarshalTo(l)
if err != nil {
return false
}
return name < l.Name
})
if stripVersions {
for i := range dal {
dal[i].ActiveState.VersionInfo = ""
dal[i].ActiveState.LastUpdated = nil
dal[i].Name = "" // In Istio 1.5, Envoy creates this; suppress it
}
}
return &admin.ListenersConfigDump{DynamicListeners: dal}, nil
}
// GetListenerConfigDump retrieves the listener config dump from the ConfigDump
func (w *Wrapper) GetListenerConfigDump() (*admin.ListenersConfigDump, error) {
listenerDumpAny, err := w.getSection(listeners)
if err != nil {
return nil, err
}
listenerDump := &admin.ListenersConfigDump{}
err = listenerDumpAny.UnmarshalTo(listenerDump)
if err != nil {
return nil, err
}
return listenerDump, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configdump
import (
"sort"
"time"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
"istio.io/istio/pilot/pkg/util/protoconv"
v3 "istio.io/istio/pilot/pkg/xds/v3"
)
// GetLastUpdatedDynamicRouteTime retrieves the LastUpdated timestamp of the
// most recently updated DynamicRouteConfig
func (w *Wrapper) GetLastUpdatedDynamicRouteTime() (*time.Time, error) {
routeDump, err := w.GetRouteConfigDump()
if err != nil {
return nil, err
}
drc := routeDump.GetDynamicRouteConfigs()
lastUpdated := time.Unix(0, 0) // get the oldest possible timestamp
for i := range drc {
if drc[i].LastUpdated != nil {
drLastUpdated := drc[i].LastUpdated.AsTime()
if drLastUpdated.After(lastUpdated) {
lastUpdated = drLastUpdated
}
}
}
if lastUpdated.After(time.Unix(0, 0)) { // if a timestamp was obtained from a drc
return &lastUpdated, nil
}
return nil, nil
}
// GetDynamicRouteDump retrieves a route dump with just dynamic active routes in it
func (w *Wrapper) GetDynamicRouteDump(stripVersions bool) (*admin.RoutesConfigDump, error) {
routeDump, err := w.GetRouteConfigDump()
if err != nil {
return nil, err
}
drc := routeDump.GetDynamicRouteConfigs()
// Support v2 or v3 in config dump. See ads.go:RequestedTypes for more info.
for i := range drc {
drc[i].RouteConfig.TypeUrl = v3.RouteType
}
sort.Slice(drc, func(i, j int) bool {
r := &route.RouteConfiguration{}
err = drc[i].RouteConfig.UnmarshalTo(r)
if err != nil {
return false
}
name := r.Name
err = drc[j].RouteConfig.UnmarshalTo(r)
if err != nil {
return false
}
return name < r.Name
})
// In Istio 1.5, it is not enough just to sort the routes. The virtual hosts
// within a route might have a different order. Sort those too.
for i := range drc {
route := &route.RouteConfiguration{}
err = drc[i].RouteConfig.UnmarshalTo(route)
if err != nil {
return nil, err
}
sort.Slice(route.VirtualHosts, func(i, j int) bool {
return route.VirtualHosts[i].Name < route.VirtualHosts[j].Name
})
drc[i].RouteConfig = protoconv.MessageToAny(route)
}
if stripVersions {
for i := range drc {
drc[i].VersionInfo = ""
drc[i].LastUpdated = nil
}
}
return &admin.RoutesConfigDump{DynamicRouteConfigs: drc}, nil
}
// GetRouteConfigDump retrieves the route config dump from the ConfigDump
func (w *Wrapper) GetRouteConfigDump() (*admin.RoutesConfigDump, error) {
routeDumpAny, err := w.getSection(routes)
if err != nil {
return nil, err
}
routeDump := &admin.RoutesConfigDump{}
err = routeDumpAny.UnmarshalTo(routeDump)
if err != nil {
return nil, err
}
return routeDump, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configdump
import (
"encoding/base64"
"fmt"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
extapi "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
anypb "google.golang.org/protobuf/types/known/anypb"
)
// GetSecretConfigDump retrieves a secret dump from a config dump wrapper
func (w *Wrapper) GetSecretConfigDump() (*admin.SecretsConfigDump, error) {
secretDumpAny, err := w.getSection(secrets)
if err != nil {
return nil, err
}
secretDump := &admin.SecretsConfigDump{}
err = secretDumpAny.UnmarshalTo(secretDump)
if err != nil {
return nil, err
}
return secretDump, nil
}
// GetRootCAFromSecretConfigDump retrieves root CA from a secret config dump wrapper
func (w *Wrapper) GetRootCAFromSecretConfigDump(anySec *anypb.Any) (string, error) {
var secret extapi.Secret
if err := anySec.UnmarshalTo(&secret); err != nil {
return "", fmt.Errorf("failed to unmarshall ROOTCA secret: %v", err)
}
var returnStr string
var returnErr error
rCASecret := secret.GetValidationContext()
if rCASecret != nil {
trustCA := rCASecret.GetTrustedCa()
if trustCA != nil {
inlineBytes := trustCA.GetInlineBytes()
if inlineBytes != nil {
returnStr = base64.StdEncoding.EncodeToString(inlineBytes)
returnErr = nil
} else {
returnStr = ""
returnErr = fmt.Errorf("can not retrieve inlineBytes from trustCA section")
}
} else {
returnStr = ""
returnErr = fmt.Errorf("can not retrieve trustedCa from secret ROOTCA")
}
} else {
returnStr = ""
returnErr = fmt.Errorf("can not find ROOTCA from secret config dump")
}
return returnStr, returnErr
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configdump
import (
"fmt"
anypb "google.golang.org/protobuf/types/known/anypb"
)
type configTypeURL string
// See https://www.envoyproxy.io/docs/envoy/latest/api-v3/admin/v3/config_dump.proto
const (
bootstrap configTypeURL = "type.googleapis.com/envoy.admin.v3.BootstrapConfigDump"
listeners configTypeURL = "type.googleapis.com/envoy.admin.v3.ListenersConfigDump"
endpoints configTypeURL = "type.googleapis.com/envoy.admin.v3.EndpointsConfigDump"
clusters configTypeURL = "type.googleapis.com/envoy.admin.v3.ClustersConfigDump"
routes configTypeURL = "type.googleapis.com/envoy.admin.v3.RoutesConfigDump"
secrets configTypeURL = "type.googleapis.com/envoy.admin.v3.SecretsConfigDump"
ecds configTypeURL = "type.googleapis.com/envoy.admin.v3.EcdsConfigDump"
)
// getSection takes a TypeURL and returns the types.Any from the config dump corresponding to that URL
func (w *Wrapper) getSection(sectionTypeURL configTypeURL) (*anypb.Any, error) {
var dumpAny *anypb.Any
for _, conf := range w.Configs {
if conf.TypeUrl == string(sectionTypeURL) {
dumpAny = conf
}
}
if dumpAny == nil {
return nil, fmt.Errorf("config dump has no configuration type %s", sectionTypeURL)
}
return dumpAny, nil
}
func (w *Wrapper) getSections(sectionTypeURL configTypeURL) ([]*anypb.Any, error) {
var dumpAny []*anypb.Any
for _, conf := range w.Configs {
if conf.TypeUrl == string(sectionTypeURL) {
dumpAny = append(dumpAny, conf)
}
}
if dumpAny == nil {
return nil, fmt.Errorf("config dump has no configuration type %s", sectionTypeURL)
}
return dumpAny, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configdump
import (
"strings"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
legacyproto "github.com/golang/protobuf/proto" // nolint: staticcheck
emptypb "github.com/golang/protobuf/ptypes/empty"
exprpb "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"istio.io/istio/pkg/util/protomarshal"
)
// nonstrictResolver is an AnyResolver that ignores unknown proto messages
type nonstrictResolver struct{}
var envoyResolver nonstrictResolver
func (m *nonstrictResolver) Resolve(typeURL string) (legacyproto.Message, error) {
// See https://github.com/golang/protobuf/issues/747#issuecomment-437463120
mname := typeURL
if slash := strings.LastIndex(typeURL, "/"); slash >= 0 {
mname = mname[slash+1:]
}
mt, err := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(mname))
if err != nil {
// istioctl should keep going if it encounters new Envoy versions; ignore unknown types
return &exprpb.Type{TypeKind: &exprpb.Type_Dyn{Dyn: &emptypb.Empty{}}}, nil
}
return legacyproto.MessageV1(mt.New().Interface()), nil
}
// Wrapper is a wrapper around the Envoy ConfigDump
// It has extra helper functions for handling any/struct/marshal protobuf pain
type Wrapper struct {
*admin.ConfigDump
}
// MarshalJSON is a custom marshaller to handle protobuf pain
func (w *Wrapper) MarshalJSON() ([]byte, error) {
return protomarshal.Marshal(w)
}
// UnmarshalJSON is a custom unmarshaller to handle protobuf pain
func (w *Wrapper) UnmarshalJSON(b []byte) error {
cd := &admin.ConfigDump{}
err := protomarshal.UnmarshalAllowUnknownWithAnyResolver(&envoyResolver, b, cd)
*w = Wrapper{cd}
return err
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package formatting
import (
"encoding/json"
"fmt"
"io"
"os"
"strings"
"github.com/mattn/go-isatty"
"sigs.k8s.io/yaml"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/env"
)
// Formatting options for Messages
const (
LogFormat = "log"
JSONFormat = "json"
YAMLFormat = "yaml"
)
var (
MsgOutputFormatKeys = []string{LogFormat, JSONFormat, YAMLFormat}
MsgOutputFormats = make(map[string]bool)
termEnvVar = env.Register("TERM", "", "Specifies terminal type. Use 'dumb' to suppress color output")
)
func init() {
for _, key := range MsgOutputFormatKeys {
MsgOutputFormats[key] = true
}
}
// Print output messages in the specified format with color options
func Print(ms diag.Messages, format string, colorize bool) (string, error) {
switch format {
case LogFormat:
return printLog(ms, colorize), nil
case JSONFormat:
return printJSON(ms)
case YAMLFormat:
return printYAML(ms)
default:
return "", fmt.Errorf("invalid format, expected one of %v but got %q", MsgOutputFormatKeys, format)
}
}
func printLog(ms diag.Messages, colorize bool) string {
logOutput := make([]string, 0, len(ms))
for _, m := range ms {
logOutput = append(logOutput, render(m, colorize))
}
return strings.Join(logOutput, "\n")
}
func printJSON(ms diag.Messages) (string, error) {
jsonOutput, err := json.MarshalIndent(ms, "", "\t")
return string(jsonOutput), err
}
func printYAML(ms diag.Messages) (string, error) {
yamlOutput, err := yaml.Marshal(ms)
return string(yamlOutput), err
}
// Formatting options for Message
var (
colorPrefixes = map[diag.Level]string{
diag.Info: "", // no special color for info messages
diag.Warning: "\033[33m", // yellow
diag.Error: "\033[1;31m", // bold red
}
)
// render turns a Message instance into a string with an option of colored bash output
func render(m diag.Message, colorize bool) string {
return fmt.Sprintf("%s%v%s [%v]%s %s",
colorPrefix(m, colorize), m.Type.Level(), colorSuffix(colorize),
m.Type.Code(), m.Origin(), fmt.Sprintf(m.Type.Template(), m.Parameters...),
)
}
func colorPrefix(m diag.Message, colorize bool) string {
if !colorize {
return ""
}
prefix, ok := colorPrefixes[m.Type.Level()]
if !ok {
return ""
}
return prefix
}
func colorSuffix(colorize bool) string {
if !colorize {
return ""
}
return "\033[0m"
}
func IstioctlColorDefault(writer io.Writer) bool {
if strings.EqualFold(termEnvVar.Get(), "dumb") {
return false
}
file, ok := writer.(*os.File)
if ok {
if !isatty.IsTerminal(file.Fd()) {
return false
}
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package formatting
import (
"errors"
"strings"
"istio.io/istio/pkg/config/analysis/diag"
)
// MessageThreshold is a wrapper around Level to be used as a cobra command line argument.
// It should satisfy the pflag.Value interface.
type MessageThreshold struct {
diag.Level
}
// String is a function declared in the pflag.Value interface
func (m *MessageThreshold) String() string {
return m.Level.String()
}
// Type is a function declared in the pflag.Value interface
func (m *MessageThreshold) Type() string {
return "Level"
}
// Set is a function declared in the pflag.Value interface
func (m *MessageThreshold) Set(s string) error {
levelMap := diag.GetUppercaseStringToLevelMap()
level, ok := levelMap[strings.ToUpper(s)]
if !ok {
return errors.New("invalid level option")
}
m.Level = level
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package handlers
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
"k8s.io/kubectl/pkg/polymorphichelpers"
"k8s.io/kubectl/pkg/util/podutils"
gatewayapi "sigs.k8s.io/gateway-api/apis/v1alpha2"
gatewayapibeta "sigs.k8s.io/gateway-api/apis/v1beta1"
"istio.io/istio/pilot/pkg/config/kube/gateway"
"istio.io/istio/pkg/config/constants"
kubelib "istio.io/istio/pkg/kube"
)
// InferPodInfo Uses name to infer namespace if the passed name contains namespace information.
// Otherwise uses the namespace value passed into the function
func InferPodInfo(name, defaultNS string) (string, string) {
return inferNsInfo(name, defaultNS)
}
// inferNsInfo Uses name to infer namespace if the passed name contains namespace information.
// Otherwise uses the namespace value passed into the function
func inferNsInfo(name, namespace string) (string, string) {
if idx := strings.LastIndex(name, "/"); idx > 0 {
// If there is a / in it, we need to handle differently. This is resourcetype/name.namespace.
// However, resourcetype can have . in it as well, so we should only look for namespace after the /.
separator := strings.LastIndex(name[idx:], ".")
if separator < 0 {
return name, namespace
}
return name[0 : idx+separator], name[idx+separator+1:]
}
separator := strings.LastIndex(name, ".")
if separator < 0 {
return name, namespace
}
return name[0:separator], name[separator+1:]
}
func InferPodsFromTypedResource(name, defaultNS string, factory cmdutil.Factory) ([]string, string, error) {
resname, ns := inferNsInfo(name, defaultNS)
if !strings.Contains(resname, "/") {
return []string{resname}, ns, nil
}
client, podName, namespace, selector, err := getClientForResource(resname, ns, factory)
if err != nil {
return []string{}, "", err
}
if podName != "" {
return []string{podName}, namespace, err
}
options := metav1.ListOptions{LabelSelector: selector}
podList, err := client.Pods(namespace).List(context.TODO(), options)
if err != nil {
return []string{}, "", err
}
pods := []string{}
for i := range podList.Items {
pods = append(pods, podList.Items[i].Name)
}
return pods, namespace, nil
}
func getClientForResource(resname, ns string, factory cmdutil.Factory) (*corev1client.CoreV1Client, string, string, string, error) {
// Pod is referred to using something like "deployment/httpbin". Use the kubectl
// libraries to look up the resource name, find the pods it selects, and return
// one of those pods.
builder := factory.NewBuilder().
WithScheme(kubelib.IstioScheme, kubelib.IstioScheme.PrioritizedVersionsAllGroups()...).
NamespaceParam(ns).DefaultNamespace().
SingleResourceType()
builder.ResourceNames("pods", resname)
infos, err := builder.Do().Infos()
if err != nil {
return nil, "", "", "", fmt.Errorf("failed retrieving: %v in the %q namespace", err, ns)
}
if len(infos) != 1 {
return nil, "", "", "", errors.New("expected a resource")
}
_, ok := infos[0].Object.(*corev1.Pod)
if ok {
// If we got a pod, just use its name
return nil, infos[0].Name, infos[0].Namespace, "", nil
}
namespace, selector, err := SelectorsForObject(infos[0].Object)
if err != nil {
return nil, "", "", "", fmt.Errorf("%q does not refer to a pod: %v", resname, err)
}
clientConfig, err := factory.ToRESTConfig()
if err != nil {
return nil, "", "", "", err
}
clientset, err := corev1client.NewForConfig(clientConfig)
if err != nil {
return nil, "", "", "", err
}
return clientset, "", namespace, selector.String(), nil
}
// this is used for testing. it should not be changed in regular code.
var getFirstPodFunc func(client corev1client.PodsGetter, namespace string, selector string, timeout time.Duration,
sortBy func([]*corev1.Pod) sort.Interface) (*corev1.Pod, int, error)
// InferPodInfoFromTypedResource gets a pod name, from an expression like Deployment/httpbin, or Deployment/productpage-v1.bookinfo
func InferPodInfoFromTypedResource(name, defaultNS string, factory cmdutil.Factory) (string, string, error) {
resname, ns := inferNsInfo(name, defaultNS)
if !strings.Contains(resname, "/") {
return resname, ns, nil
}
client, podName, namespace, selector, err := getClientForResource(resname, ns, factory)
if err != nil {
return "", "", err
}
if podName != "" {
return podName, namespace, nil
}
// We need to pass in a sorter, and the one used by `kubectl logs` is good enough.
sortBy := func(pods []*corev1.Pod) sort.Interface { return podutils.ByLogging(pods) }
timeout := 2 * time.Second
if getFirstPodFunc == nil {
getFirstPodFunc = polymorphichelpers.GetFirstPod
}
pod, _, err := getFirstPodFunc(client, namespace, selector, timeout, sortBy)
if err != nil {
return "", "", fmt.Errorf("no pods match %q", resname)
}
return pod.Name, namespace, nil
}
// SelectorsForObject is a fork of upstream function to add additional Istio type support
func SelectorsForObject(object runtime.Object) (namespace string, selector labels.Selector, err error) {
switch t := object.(type) {
case *gatewayapi.Gateway:
if !gateway.IsManaged(&t.Spec) {
return "", nil, fmt.Errorf("gateway is not a managed gateway")
}
namespace = t.Namespace
selector, err = labels.Parse(constants.GatewayNameLabel + "=" + t.Name)
case *gatewayapibeta.Gateway:
if !gateway.IsManaged(&t.Spec) {
return "", nil, fmt.Errorf("gateway is not a managed gateway")
}
namespace = t.Namespace
selector, err = labels.Parse(constants.GatewayNameLabel + "=" + t.Name)
default:
return polymorphichelpers.SelectorsForObject(object)
}
return
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compare
import (
"bytes"
"fmt"
"github.com/pmezard/go-difflib/difflib"
"istio.io/istio/pkg/util/protomarshal"
)
// ClusterDiff prints a diff between Istiod and Envoy clusters to the passed writer
func (c *Comparator) ClusterDiff() error {
envoyBytes, istiodBytes := &bytes.Buffer{}, &bytes.Buffer{}
envoyClusterDump, err := c.envoy.GetDynamicClusterDump(true)
if err != nil {
envoyBytes.WriteString(err.Error())
} else {
envoy, err := protomarshal.ToJSONWithIndent(envoyClusterDump, " ")
if err != nil {
return err
}
envoyBytes.WriteString(envoy)
}
istiodClusterDump, err := c.istiod.GetDynamicClusterDump(true)
if err != nil {
istiodBytes.WriteString(err.Error())
} else {
istiod, err := protomarshal.ToJSONWithIndent(istiodClusterDump, " ")
if err != nil {
return err
}
istiodBytes.WriteString(istiod)
}
diff := difflib.UnifiedDiff{
FromFile: "Istiod Clusters",
A: difflib.SplitLines(istiodBytes.String()),
ToFile: "Envoy Clusters",
B: difflib.SplitLines(envoyBytes.String()),
Context: c.context,
}
text, err := difflib.GetUnifiedDiffString(diff)
if err != nil {
return err
}
if text != "" {
fmt.Fprintln(c.w, text)
} else {
fmt.Fprintln(c.w, "Clusters Match")
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compare
import (
"encoding/json"
"fmt"
"io"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"istio.io/istio/istioctl/pkg/util/configdump"
)
// Comparator diffs between a config dump from Istiod and one from Envoy
type Comparator struct {
envoy, istiod *configdump.Wrapper
w io.Writer
context int
location string
}
// NewComparator is a comparator constructor
func NewComparator(w io.Writer, istiodResponses map[string][]byte, envoyResponse []byte) (*Comparator, error) {
c := &Comparator{}
for _, resp := range istiodResponses {
istiodDump := &configdump.Wrapper{}
err := json.Unmarshal(resp, istiodDump)
if err != nil {
continue
}
c.istiod = istiodDump
break
}
if c.istiod == nil {
return nil, fmt.Errorf("unable to find config dump in Istiod responses")
}
envoyDump := &configdump.Wrapper{}
err := json.Unmarshal(envoyResponse, envoyDump)
if err != nil {
return nil, err
}
c.envoy = envoyDump
c.w = w
c.context = 7
c.location = "Local" // the time.Location for formatting time.Time instances
return c, nil
}
// NewXdsComparator is a comparator constructor
func NewXdsComparator(w io.Writer, istiodResponses map[string]*discovery.DiscoveryResponse, envoyResponse []byte) (*Comparator, error) {
c := &Comparator{}
for _, resp := range istiodResponses {
if len(resp.Resources) > 0 {
c.istiod = &configdump.Wrapper{
ConfigDump: &admin.ConfigDump{
Configs: resp.Resources,
},
}
break
}
}
if c.istiod == nil {
return nil, fmt.Errorf("unable to find config dump in Istiod responses")
}
envoyDump := &configdump.Wrapper{}
err := json.Unmarshal(envoyResponse, envoyDump)
if err != nil {
return nil, err
}
c.envoy = envoyDump
c.w = w
c.context = 7
c.location = "Local" // the time.Location for formatting time.Time instances
return c, nil
}
// Diff prints a diff between Istiod and Envoy to the passed writer
func (c *Comparator) Diff() error {
if err := c.ClusterDiff(); err != nil {
return err
}
if err := c.ListenerDiff(); err != nil {
return err
}
return c.RouteDiff()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compare
import (
"bytes"
"fmt"
"github.com/pmezard/go-difflib/difflib"
"istio.io/istio/pkg/util/protomarshal"
)
// ListenerDiff prints a diff between Istiod and Envoy listeners to the passed writer
func (c *Comparator) ListenerDiff() error {
envoyBytes, istiodBytes := &bytes.Buffer{}, &bytes.Buffer{}
envoyListenerDump, err := c.envoy.GetDynamicListenerDump(true)
if err != nil {
envoyBytes.WriteString(err.Error())
} else {
envoy, err := protomarshal.ToJSONWithIndent(envoyListenerDump, " ")
if err != nil {
return err
}
envoyBytes.WriteString(envoy)
}
istiodListenerDump, err := c.istiod.GetDynamicListenerDump(true)
if err != nil {
istiodBytes.WriteString(err.Error())
} else {
istiod, err := protomarshal.ToJSONWithIndent(istiodListenerDump, " ")
if err != nil {
return err
}
istiodBytes.WriteString(istiod)
}
diff := difflib.UnifiedDiff{
FromFile: "Istiod Listeners",
A: difflib.SplitLines(istiodBytes.String()),
ToFile: "Envoy Listeners",
B: difflib.SplitLines(envoyBytes.String()),
Context: c.context,
}
text, err := difflib.GetUnifiedDiffString(diff)
if err != nil {
return err
}
if text != "" {
fmt.Fprintln(c.w, text)
} else {
fmt.Fprintln(c.w, "Listeners Match")
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compare
import (
"bytes"
"fmt"
"time"
"github.com/pmezard/go-difflib/difflib"
"istio.io/istio/pkg/util/protomarshal"
)
// RouteDiff prints a diff between Istiod and Envoy routes to the passed writer
func (c *Comparator) RouteDiff() error {
envoyBytes, istiodBytes := &bytes.Buffer{}, &bytes.Buffer{}
envoyRouteDump, err := c.envoy.GetDynamicRouteDump(true)
if err != nil {
envoyBytes.WriteString(err.Error())
} else {
envoy, err := protomarshal.ToJSONWithIndent(envoyRouteDump, " ")
if err != nil {
return err
}
envoyBytes.WriteString(envoy)
}
istiodRouteDump, err := c.istiod.GetDynamicRouteDump(true)
if err != nil {
istiodBytes.WriteString(err.Error())
} else {
istiod, err := protomarshal.ToJSONWithIndent(istiodRouteDump, " ")
if err != nil {
return err
}
istiodBytes.WriteString(istiod)
}
diff := difflib.UnifiedDiff{
FromFile: "Istiod Routes",
A: difflib.SplitLines(istiodBytes.String()),
ToFile: "Envoy Routes",
B: difflib.SplitLines(envoyBytes.String()),
Context: c.context,
}
text, err := difflib.GetUnifiedDiffString(diff)
if err != nil {
return err
}
lastUpdatedStr := ""
if lastUpdated, err := c.envoy.GetLastUpdatedDynamicRouteTime(); err != nil {
return err
} else if lastUpdated != nil {
loc, err := time.LoadLocation(c.location)
if err != nil {
loc, _ = time.LoadLocation("UTC")
}
lastUpdatedStr = fmt.Sprintf(" (RDS last loaded at %s)", lastUpdated.In(loc).Format(time.RFC1123))
}
if text != "" {
fmt.Fprintf(c.w, "Routes Don't Match%s\n", lastUpdatedStr)
fmt.Fprintln(c.w, text)
} else {
fmt.Fprintf(c.w, "Routes Match%s\n", lastUpdatedStr)
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package manifests
import (
"embed"
"io/fs"
"os"
)
// FS embeds the manifests
//
//go:embed all:charts/* profiles/*
var FS embed.FS
// BuiltinOrDir returns a FS for the provided directory. If no directory is passed, the compiled in
// FS will be used
func BuiltinOrDir(dir string) fs.FS {
if dir == "" {
return FS
}
return os.DirFS(dir)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package istio
import (
"fmt"
"sigs.k8s.io/yaml"
"istio.io/api/operator/v1alpha1"
operator_v1alpha1 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/operator/pkg/validate"
)
// UnmarshalAndValidateIOPS unmarshals a string containing IstioOperator YAML, validates it, and returns a struct
// representation if successful. In case of validation errors, it returns both the IstioOperatorSpec struct and
// an error, so the caller can decide how to handle it.
func UnmarshalAndValidateIOPS(iopsYAML string) (*v1alpha1.IstioOperatorSpec, error) {
iops := &v1alpha1.IstioOperatorSpec{}
if err := util.UnmarshalWithJSONPB(iopsYAML, iops, false); err != nil {
return nil, fmt.Errorf("could not unmarshal the merged YAML: %s\n\nYAML:\n%s", err, iopsYAML)
}
if errs := validate.CheckIstioOperatorSpec(iops, true); len(errs) != 0 {
return iops, fmt.Errorf(errs.Error())
}
return iops, nil
}
// UnmarshalIstioOperator unmarshals a string containing IstioOperator YAML.
func UnmarshalIstioOperator(iopYAML string, allowUnknownField bool) (*operator_v1alpha1.IstioOperator, error) {
iop := &operator_v1alpha1.IstioOperator{}
if allowUnknownField {
if err := yaml.Unmarshal([]byte(iopYAML), iop); err != nil {
return nil, fmt.Errorf("could not unmarshal: %v", err)
}
} else {
if err := yaml.UnmarshalStrict([]byte(iopYAML), iop); err != nil {
return nil, fmt.Errorf("could not unmarshal: %v", err)
}
}
return iop, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"istio.io/api/operator/v1alpha1"
)
const (
globalKey = "global"
istioNamespaceKey = "istioNamespace"
)
// Namespace returns the namespace of the containing CR.
func Namespace(iops *v1alpha1.IstioOperatorSpec) string {
if iops.Namespace != "" {
return iops.Namespace
}
if iops.Values == nil {
return ""
}
v := iops.Values.AsMap()
if v[globalKey] == nil {
return ""
}
vg := v[globalKey].(map[string]any)
n := vg[istioNamespaceKey]
if n == nil {
return ""
}
return n.(string)
}
// SetNamespace returns the namespace of the containing CR.
func SetNamespace(iops *v1alpha1.IstioOperatorSpec, namespace string) {
if namespace != "" {
iops.Namespace = namespace
}
// TODO implement
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// NOTE: Boilerplate only. Ignore this file.
// Package v1alpha1 contains API Schema definitions for the istio v1alpha1 API group
// +k8s:deepcopy-gen=package,register
// +groupName=install.istio.io
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// IstioOperatorGVK is GVK for IstioOperator
IstioOperatorGVK = schema.GroupVersionKind{
Version: "v1alpha1",
Group: "install.istio.io",
Kind: "IstioOperator",
}
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = IstioOperatorGVK.GroupVersion()
IstioOperatorGVR = schema.GroupVersionResource{
Group: SchemeGroupVersion.Group,
Version: SchemeGroupVersion.Version,
Resource: "istiooperators",
}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
)
// Register the IstioOperator and IstioOperatorList API kind
func init() {
SchemeBuilder.Register(&IstioOperator{}, &IstioOperatorList{})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validation
import (
"errors"
"fmt"
"reflect"
"strings"
"unicode"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
"k8s.io/apimachinery/pkg/util/intstr"
"istio.io/api/operator/v1alpha1"
valuesv1alpha1 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/util"
)
const (
validationMethodName = "Validate"
)
type deprecatedSettings struct {
old string
new string
// In ordered to distinguish between unset for non-pointer values, we need to specify the default value
def any
}
// ValidateConfig calls validation func for every defined element in Values
func ValidateConfig(failOnMissingValidation bool, iopls *v1alpha1.IstioOperatorSpec) (util.Errors, string) {
var validationErrors util.Errors
var warningMessages []string
iopvalString := util.ToYAMLWithJSONPB(iopls.Values)
values := &valuesv1alpha1.Values{}
if err := util.UnmarshalWithJSONPB(iopvalString, values, true); err != nil {
return util.NewErrs(err), ""
}
validationErrors = util.AppendErrs(validationErrors, ValidateSubTypes(reflect.ValueOf(values).Elem(), failOnMissingValidation, values, iopls))
featureErrors, featureWarningMessages := validateFeatures(values, iopls)
validationErrors = util.AppendErrs(validationErrors, featureErrors)
warningMessages = append(warningMessages, featureWarningMessages...)
deprecatedErrors, deprecatedWarningMessages := checkDeprecatedSettings(iopls)
if deprecatedErrors != nil {
validationErrors = util.AppendErr(validationErrors, deprecatedErrors)
}
warningMessages = append(warningMessages, deprecatedWarningMessages...)
return validationErrors, strings.Join(warningMessages, "\n")
}
// Converts from struct paths to helm paths
// Global.Proxy.AccessLogFormat -> global.proxy.accessLogFormat
func firstCharsToLower(s string) string {
// Use a closure here to remember state.
// Hackish but effective. Depends on Map scanning in order and calling
// the closure once per rune.
prev := '.'
return strings.Map(
func(r rune) rune {
if prev == '.' {
prev = r
return unicode.ToLower(r)
}
prev = r
return r
},
s)
}
func checkDeprecatedSettings(iop *v1alpha1.IstioOperatorSpec) (util.Errors, []string) {
var errs util.Errors
messages := []string{}
warningSettings := []deprecatedSettings{
{"Values.global.certificates", "meshConfig.certificates", nil},
{"Values.global.outboundTrafficPolicy", "meshConfig.outboundTrafficPolicy", nil},
{"Values.global.localityLbSetting", "meshConfig.localityLbSetting", nil},
{"Values.global.policyCheckFailOpen", "meshConfig.policyCheckFailOpen", false},
{"Values.global.enableTracing", "meshConfig.enableTracing", false},
{"Values.global.proxy.accessLogFormat", "meshConfig.accessLogFormat", ""},
{"Values.global.proxy.accessLogFile", "meshConfig.accessLogFile", ""},
{"Values.global.proxy.concurrency", "meshConfig.defaultConfig.concurrency", uint32(0)},
{"Values.global.proxy.envoyAccessLogService", "meshConfig.defaultConfig.envoyAccessLogService", nil},
{"Values.global.proxy.envoyAccessLogService.enabled", "meshConfig.enableEnvoyAccessLogService", nil},
{"Values.global.proxy.envoyMetricsService", "meshConfig.defaultConfig.envoyMetricsService", nil},
{"Values.global.proxy.protocolDetectionTimeout", "meshConfig.protocolDetectionTimeout", ""},
{"Values.global.proxy.holdApplicationUntilProxyStarts", "meshConfig.defaultConfig.holdApplicationUntilProxyStarts", false},
{"Values.pilot.ingress", "meshConfig.ingressService, meshConfig.ingressControllerMode, and meshConfig.ingressClass", nil},
{"Values.global.mtls.enabled", "the PeerAuthentication resource", nil},
{"Values.global.mtls.auto", "meshConfig.enableAutoMtls", nil},
{"Values.global.tracer.lightstep.address", "meshConfig.defaultConfig.tracing.lightstep.address", ""},
{"Values.global.tracer.lightstep.accessToken", "meshConfig.defaultConfig.tracing.lightstep.accessToken", ""},
{"Values.global.tracer.zipkin.address", "meshConfig.defaultConfig.tracing.zipkin.address", nil},
{"Values.global.tracer.datadog.address", "meshConfig.defaultConfig.tracing.datadog.address", ""},
{"Values.global.meshExpansion.enabled", "Gateway and other Istio networking resources, such as in samples/multicluster/", false},
{"Values.gateways.istio-ingressgateway.meshExpansionPorts", "components.ingressGateways[name=istio-ingressgateway].k8s.service.ports", nil},
{"AddonComponents.istiocoredns.Enabled", "the in-proxy DNS capturing (ISTIO_META_DNS_CAPTURE)", false},
{"Values.istiocoredns.enabled", "the in-proxy DNS capturing (ISTIO_META_DNS_CAPTURE)", false},
// nolint: lll
{"Values.global.jwtPolicy", "Values.global.jwtPolicy=third-party-jwt. See https://istio.io/latest/docs/ops/best-practices/security/#configure-third-party-service-account-tokens for more information", "third-party-jwt"},
{"Values.global.centralIstiod", "Values.global.externalIstiod", false},
{"Values.global.arch", "the affinity of k8s settings", nil},
}
failHardSettings := []deprecatedSettings{
{"Values.grafana.enabled", "the samples/addons/ deployments", false},
{"Values.tracing.enabled", "the samples/addons/ deployments", false},
{"Values.kiali.enabled", "the samples/addons/ deployments", false},
{"Values.prometheus.enabled", "the samples/addons/ deployments", false},
{"AddonComponents.grafana.Enabled", "the samples/addons/ deployments", false},
{"AddonComponents.tracing.Enabled", "the samples/addons/ deployments", false},
{"AddonComponents.kiali.Enabled", "the samples/addons/ deployments", false},
{"AddonComponents.prometheus.Enabled", "the samples/addons/ deployments", false},
{"Values.global.tracer.stackdriver.debug", "meshConfig.defaultConfig.tracing.stackdriver.debug", false},
{"Values.global.tracer.stackdriver.maxNumberOfAttributes", "meshConfig.defaultConfig.tracing.stackdriver.maxNumberOfAttributes", 0},
{"Values.global.tracer.stackdriver.maxNumberOfAnnotations", "meshConfig.defaultConfig.tracing.stackdriver.maxNumberOfAnnotations", 0},
{"Values.global.tracer.stackdriver.maxNumberOfMessageEvents", "meshConfig.defaultConfig.tracing.stackdriver.maxNumberOfMessageEvents", 0},
{"telemetry.v2.prometheus.configOverride", "custom configuration", nil},
{"telemetry.v2.stackdriver.configOverride", "custom configuration", nil},
{"telemetry.v2.stackdriver.disableOutbound", "custom configuration", nil},
{"telemetry.v2.stackdriver.outboundAccessLogging", "custom configuration", nil},
}
for _, d := range warningSettings {
v, f, _ := tpath.GetFromStructPath(iop, d.old)
if f {
switch t := v.(type) {
// need to do conversion for bool value defined in IstioOperator component spec.
case *wrappers.BoolValue:
v = t.Value
}
if v != d.def {
messages = append(messages, fmt.Sprintf("! %s is deprecated; use %s instead", firstCharsToLower(d.old), d.new))
}
}
}
for _, d := range failHardSettings {
v, f, _ := tpath.GetFromStructPath(iop, d.old)
if f {
switch t := v.(type) {
// need to do conversion for bool value defined in IstioOperator component spec.
case *wrappers.BoolValue:
v = t.Value
}
if v != d.def {
ms := fmt.Sprintf("! %s is deprecated; use %s instead", firstCharsToLower(d.old), d.new)
errs = util.AppendErr(errs, errors.New(ms+"\n"))
}
}
}
return errs, messages
}
type FeatureValidator func(*valuesv1alpha1.Values, *v1alpha1.IstioOperatorSpec) (util.Errors, []string)
// validateFeatures check whether the config semantically make sense. For example, feature X and feature Y can't be enabled together.
func validateFeatures(values *valuesv1alpha1.Values, spec *v1alpha1.IstioOperatorSpec) (errs util.Errors, warnings []string) {
validators := []FeatureValidator{
CheckServicePorts,
CheckAutoScaleAndReplicaCount,
}
for _, validator := range validators {
newErrs, newWarnings := validator(values, spec)
errs = util.AppendErrs(errs, newErrs)
warnings = append(warnings, newWarnings...)
}
return
}
// CheckAutoScaleAndReplicaCount warns when autoscaleEnabled is true and k8s replicaCount is set.
func CheckAutoScaleAndReplicaCount(values *valuesv1alpha1.Values, spec *v1alpha1.IstioOperatorSpec) (errs util.Errors, warnings []string) {
if values.GetPilot().GetAutoscaleEnabled().GetValue() && spec.GetComponents().GetPilot().GetK8S().GetReplicaCount() > 1 {
warnings = append(warnings,
"components.pilot.k8s.replicaCount should not be set when values.pilot.autoscaleEnabled is true")
}
validateGateways := func(gateways []*v1alpha1.GatewaySpec, gwType string) {
const format = "components.%sGateways[name=%s].k8s.replicaCount should not be set when values.gateways.istio-%sgateway.autoscaleEnabled is true"
for _, gw := range gateways {
if gw.GetK8S().GetReplicaCount() != 0 {
warnings = append(warnings, fmt.Sprintf(format, gwType, gw.Name, gwType))
}
}
}
if values.GetGateways().GetIstioIngressgateway().GetAutoscaleEnabled().GetValue() {
validateGateways(spec.GetComponents().GetIngressGateways(), "ingress")
}
if values.GetGateways().GetIstioEgressgateway().GetAutoscaleEnabled().GetValue() {
validateGateways(spec.GetComponents().GetEgressGateways(), "egress")
}
return
}
// CheckServicePorts validates Service ports. Specifically, this currently
// asserts that all ports will bind to a port number greater than 1024 when not
// running as root.
func CheckServicePorts(values *valuesv1alpha1.Values, spec *v1alpha1.IstioOperatorSpec) (errs util.Errors, warnings []string) {
if !values.GetGateways().GetIstioIngressgateway().GetRunAsRoot().GetValue() {
errs = util.AppendErrs(errs, validateGateways(spec.GetComponents().GetIngressGateways(), "istio-ingressgateway"))
}
if !values.GetGateways().GetIstioEgressgateway().GetRunAsRoot().GetValue() {
errs = util.AppendErrs(errs, validateGateways(spec.GetComponents().GetEgressGateways(), "istio-egressgateway"))
}
for _, raw := range values.GetGateways().GetIstioIngressgateway().GetIngressPorts() {
p := raw.AsMap()
var tp int
if p["targetPort"] != nil {
t, ok := p["targetPort"].(float64)
if !ok {
continue
}
tp = int(t)
}
rport, ok := p["port"].(float64)
if !ok {
continue
}
portnum := int(rport)
if tp == 0 && portnum > 1024 {
// Target port defaults to port. If its >1024, it is safe.
continue
}
if tp < 1024 {
// nolint: lll
errs = util.AppendErr(errs, fmt.Errorf("port %v is invalid: targetPort is set to %v, which requires root. Set targetPort to be greater than 1024 or configure values.gateways.istio-ingressgateway.runAsRoot=true", portnum, tp))
}
}
return
}
func validateGateways(gw []*v1alpha1.GatewaySpec, name string) util.Errors {
// nolint: lll
format := "port %v/%v in gateway %v invalid: targetPort is set to %d, which requires root. Set targetPort to be greater than 1024 or configure values.gateways.%s.runAsRoot=true"
var errs util.Errors
for _, gw := range gw {
for _, p := range gw.GetK8S().GetService().GetPorts() {
tp := 0
if p == nil {
continue
}
if p.TargetPort != nil && p.TargetPort.Type == int64(intstr.String) {
// Do not validate named ports
continue
}
if p.TargetPort != nil && p.TargetPort.Type == int64(intstr.Int) {
tp = int(p.TargetPort.IntVal.GetValue())
}
if tp == 0 && p.Port > 1024 {
// Target port defaults to port. If its >1024, it is safe.
continue
}
if tp < 1024 {
errs = util.AppendErr(errs, fmt.Errorf(format, p.Name, p.Port, gw.Name, tp, name))
}
}
}
return errs
}
func ValidateSubTypes(e reflect.Value, failOnMissingValidation bool, values *valuesv1alpha1.Values, iopls *v1alpha1.IstioOperatorSpec) util.Errors {
// Dealing with receiver pointer and receiver value
ptr := e
k := e.Kind()
if k == reflect.Ptr || k == reflect.Interface {
e = e.Elem()
}
if !e.IsValid() {
return nil
}
// check for method on value
method := e.MethodByName(validationMethodName)
if !method.IsValid() {
method = ptr.MethodByName(validationMethodName)
}
var validationErrors util.Errors
if util.IsNilOrInvalidValue(method) {
if failOnMissingValidation {
validationErrors = append(validationErrors, fmt.Errorf("type %s is missing Validation method", e.Type().String()))
}
} else {
r := method.Call([]reflect.Value{reflect.ValueOf(failOnMissingValidation), reflect.ValueOf(values), reflect.ValueOf(iopls)})[0].Interface().(util.Errors)
if len(r) != 0 {
validationErrors = append(validationErrors, r...)
}
}
// If it is not a struct nothing to do, returning previously collected validation errors
if e.Kind() != reflect.Struct {
return validationErrors
}
for i := 0; i < e.NumField(); i++ {
// Corner case of a slice of something, if something is defined type, then process it recursively.
if e.Field(i).Kind() == reflect.Slice {
validationErrors = append(validationErrors, processSlice(e.Field(i), failOnMissingValidation, values, iopls)...)
continue
}
if e.Field(i).Kind() == reflect.Map {
validationErrors = append(validationErrors, processMap(e.Field(i), failOnMissingValidation, values, iopls)...)
continue
}
// Validation is not required if it is not a defined type
if e.Field(i).Kind() != reflect.Interface && e.Field(i).Kind() != reflect.Ptr {
continue
}
val := e.Field(i).Elem()
if util.IsNilOrInvalidValue(val) {
continue
}
validationErrors = append(validationErrors, ValidateSubTypes(e.Field(i), failOnMissingValidation, values, iopls)...)
}
return validationErrors
}
func processSlice(e reflect.Value, failOnMissingValidation bool, values *valuesv1alpha1.Values, iopls *v1alpha1.IstioOperatorSpec) util.Errors {
var validationErrors util.Errors
for i := 0; i < e.Len(); i++ {
validationErrors = append(validationErrors, ValidateSubTypes(e.Index(i), failOnMissingValidation, values, iopls)...)
}
return validationErrors
}
func processMap(e reflect.Value, failOnMissingValidation bool, values *valuesv1alpha1.Values, iopls *v1alpha1.IstioOperatorSpec) util.Errors {
var validationErrors util.Errors
for _, k := range e.MapKeys() {
v := e.MapIndex(k)
validationErrors = append(validationErrors, ValidateSubTypes(v, failOnMissingValidation, values, iopls)...)
}
return validationErrors
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-golang. DO NOT EDIT.
// source: operator/v1alpha1/operator.proto
// Configuration affecting Istio control plane installation version and shape.
package v1alpha1
import (
"encoding/json"
github_com_golang_protobuf_jsonpb "github.com/golang/protobuf/jsonpb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
"k8s.io/apimachinery/pkg/util/intstr"
)
var _ github_com_golang_protobuf_jsonpb.JSONPBUnmarshaler = &IntOrString{}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (this *IntOrString) UnmarshalJSON(value []byte) error {
if value[0] == '"' {
this.Type = int64(intstr.String)
var s string
err := json.Unmarshal(value, &s)
if err != nil {
return err
}
this.StrVal = &wrappers.StringValue{Value: s}
return nil
}
this.Type = int64(intstr.Int)
var s int32
err := json.Unmarshal(value, &s)
if err != nil {
return err
}
this.IntVal = &wrappers.Int32Value{Value: s}
return nil
}
func (this *IntOrString) MarshalJSONPB(_ *github_com_golang_protobuf_jsonpb.Marshaler) ([]byte, error) {
return this.MarshalJSON()
}
func (this *IntOrString) MarshalJSON() ([]byte, error) {
if this.IntVal != nil {
return json.Marshal(this.IntVal.GetValue())
}
return json.Marshal(this.StrVal.GetValue())
}
func (this *IntOrString) UnmarshalJSONPB(_ *github_com_golang_protobuf_jsonpb.Unmarshaler, value []byte) error {
return this.UnmarshalJSON(value)
}
func (this *IntOrString) ToKubernetes() intstr.IntOrString {
if this.IntVal != nil {
return intstr.FromInt32(this.GetIntVal().GetValue())
}
return intstr.FromString(this.GetStrVal().GetValue())
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc (unknown)
// source: pkg/apis/istio/v1alpha1/values_types.proto
package v1alpha1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
_ "google.golang.org/protobuf/types/known/anypb"
durationpb "google.golang.org/protobuf/types/known/durationpb"
structpb "google.golang.org/protobuf/types/known/structpb"
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Mode for the ingress controller.
type IngressControllerMode int32
const (
// Unspecified Istio ingress controller.
IngressControllerMode_UNSPECIFIED IngressControllerMode = 0
// Selects all Ingress resources, with or without Istio annotation.
IngressControllerMode_DEFAULT IngressControllerMode = 1
// Selects only resources with istio annotation.
IngressControllerMode_STRICT IngressControllerMode = 2
// No ingress or sync.
IngressControllerMode_OFF IngressControllerMode = 3
)
// Enum value maps for IngressControllerMode.
var (
IngressControllerMode_name = map[int32]string{
0: "UNSPECIFIED",
1: "DEFAULT",
2: "STRICT",
3: "OFF",
}
IngressControllerMode_value = map[string]int32{
"UNSPECIFIED": 0,
"DEFAULT": 1,
"STRICT": 2,
"OFF": 3,
}
)
func (x IngressControllerMode) Enum() *IngressControllerMode {
p := new(IngressControllerMode)
*p = x
return p
}
func (x IngressControllerMode) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (IngressControllerMode) Descriptor() protoreflect.EnumDescriptor {
return file_pkg_apis_istio_v1alpha1_values_types_proto_enumTypes[0].Descriptor()
}
func (IngressControllerMode) Type() protoreflect.EnumType {
return &file_pkg_apis_istio_v1alpha1_values_types_proto_enumTypes[0]
}
func (x IngressControllerMode) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use IngressControllerMode.Descriptor instead.
func (IngressControllerMode) EnumDescriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{0}
}
// Specifies which tracer to use.
type Tracer int32
const (
Tracer_zipkin Tracer = 0
Tracer_lightstep Tracer = 1
Tracer_datadog Tracer = 2
Tracer_stackdriver Tracer = 3
Tracer_openCensusAgent Tracer = 4
Tracer_none Tracer = 5
)
// Enum value maps for Tracer.
var (
Tracer_name = map[int32]string{
0: "zipkin",
1: "lightstep",
2: "datadog",
3: "stackdriver",
4: "openCensusAgent",
5: "none",
}
Tracer_value = map[string]int32{
"zipkin": 0,
"lightstep": 1,
"datadog": 2,
"stackdriver": 3,
"openCensusAgent": 4,
"none": 5,
}
)
func (x Tracer) Enum() *Tracer {
p := new(Tracer)
*p = x
return p
}
func (x Tracer) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Tracer) Descriptor() protoreflect.EnumDescriptor {
return file_pkg_apis_istio_v1alpha1_values_types_proto_enumTypes[1].Descriptor()
}
func (Tracer) Type() protoreflect.EnumType {
return &file_pkg_apis_istio_v1alpha1_values_types_proto_enumTypes[1]
}
func (x Tracer) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Tracer.Descriptor instead.
func (Tracer) EnumDescriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{1}
}
// Specifies the sidecar's default behavior when handling outbound traffic from the application.
type OutboundTrafficPolicyConfig_Mode int32
const (
// Outbound traffic to unknown destinations will be allowed, in case there are no services or ServiceEntries for the destination port
OutboundTrafficPolicyConfig_ALLOW_ANY OutboundTrafficPolicyConfig_Mode = 0
// Restrict outbound traffic to services defined in the service registry as well as those defined through ServiceEntries
OutboundTrafficPolicyConfig_REGISTRY_ONLY OutboundTrafficPolicyConfig_Mode = 1
)
// Enum value maps for OutboundTrafficPolicyConfig_Mode.
var (
OutboundTrafficPolicyConfig_Mode_name = map[int32]string{
0: "ALLOW_ANY",
1: "REGISTRY_ONLY",
}
OutboundTrafficPolicyConfig_Mode_value = map[string]int32{
"ALLOW_ANY": 0,
"REGISTRY_ONLY": 1,
}
)
func (x OutboundTrafficPolicyConfig_Mode) Enum() *OutboundTrafficPolicyConfig_Mode {
p := new(OutboundTrafficPolicyConfig_Mode)
*p = x
return p
}
func (x OutboundTrafficPolicyConfig_Mode) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (OutboundTrafficPolicyConfig_Mode) Descriptor() protoreflect.EnumDescriptor {
return file_pkg_apis_istio_v1alpha1_values_types_proto_enumTypes[2].Descriptor()
}
func (OutboundTrafficPolicyConfig_Mode) Type() protoreflect.EnumType {
return &file_pkg_apis_istio_v1alpha1_values_types_proto_enumTypes[2]
}
func (x OutboundTrafficPolicyConfig_Mode) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use OutboundTrafficPolicyConfig_Mode.Descriptor instead.
func (OutboundTrafficPolicyConfig_Mode) EnumDescriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{19, 0}
}
// ArchConfig specifies the pod scheduling target architecture(amd64, ppc64le, s390x, arm64)
// for all the Istio control plane components.
type ArchConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Sets pod scheduling weight for amd64 arch
Amd64 uint32 `protobuf:"varint,1,opt,name=amd64,proto3" json:"amd64,omitempty"`
// Sets pod scheduling weight for ppc64le arch.
Ppc64Le uint32 `protobuf:"varint,2,opt,name=ppc64le,proto3" json:"ppc64le,omitempty"`
// Sets pod scheduling weight for s390x arch.
S390X uint32 `protobuf:"varint,3,opt,name=s390x,proto3" json:"s390x,omitempty"`
// Sets pod scheduling weight for arm64 arch.
Arm64 uint32 `protobuf:"varint,4,opt,name=arm64,proto3" json:"arm64,omitempty"`
}
func (x *ArchConfig) Reset() {
*x = ArchConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ArchConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ArchConfig) ProtoMessage() {}
func (x *ArchConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ArchConfig.ProtoReflect.Descriptor instead.
func (*ArchConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{0}
}
func (x *ArchConfig) GetAmd64() uint32 {
if x != nil {
return x.Amd64
}
return 0
}
func (x *ArchConfig) GetPpc64Le() uint32 {
if x != nil {
return x.Ppc64Le
}
return 0
}
func (x *ArchConfig) GetS390X() uint32 {
if x != nil {
return x.S390X
}
return 0
}
func (x *ArchConfig) GetArm64() uint32 {
if x != nil {
return x.Arm64
}
return 0
}
// Configuration for CNI.
type CNIConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether CNI is enabled.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
Hub string `protobuf:"bytes,2,opt,name=hub,proto3" json:"hub,omitempty"`
Tag *structpb.Value `protobuf:"bytes,3,opt,name=tag,proto3" json:"tag,omitempty"`
Variant string `protobuf:"bytes,29,opt,name=variant,proto3" json:"variant,omitempty"`
Image string `protobuf:"bytes,4,opt,name=image,proto3" json:"image,omitempty"`
PullPolicy string `protobuf:"bytes,5,opt,name=pullPolicy,proto3" json:"pullPolicy,omitempty"`
CniBinDir string `protobuf:"bytes,6,opt,name=cniBinDir,proto3" json:"cniBinDir,omitempty"`
CniConfDir string `protobuf:"bytes,7,opt,name=cniConfDir,proto3" json:"cniConfDir,omitempty"`
CniConfFileName string `protobuf:"bytes,8,opt,name=cniConfFileName,proto3" json:"cniConfFileName,omitempty"`
CniNetnsDir string `protobuf:"bytes,31,opt,name=cniNetnsDir,proto3" json:"cniNetnsDir,omitempty"`
ExcludeNamespaces []string `protobuf:"bytes,9,rep,name=excludeNamespaces,proto3" json:"excludeNamespaces,omitempty"`
Affinity *structpb.Struct `protobuf:"bytes,20,opt,name=affinity,proto3" json:"affinity,omitempty"`
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
PodAnnotations *structpb.Struct `protobuf:"bytes,10,opt,name=podAnnotations,proto3" json:"podAnnotations,omitempty"`
PspClusterRole string `protobuf:"bytes,11,opt,name=psp_cluster_role,json=pspClusterRole,proto3" json:"psp_cluster_role,omitempty"`
LogLevel string `protobuf:"bytes,12,opt,name=logLevel,proto3" json:"logLevel,omitempty"`
Repair *CNIRepairConfig `protobuf:"bytes,13,opt,name=repair,proto3" json:"repair,omitempty"`
Chained *wrapperspb.BoolValue `protobuf:"bytes,14,opt,name=chained,proto3" json:"chained,omitempty"`
ResourceQuotas *ResourceQuotas `protobuf:"bytes,16,opt,name=resource_quotas,json=resourceQuotas,proto3" json:"resource_quotas,omitempty"`
Resources *Resources `protobuf:"bytes,17,opt,name=resources,proto3" json:"resources,omitempty"`
Privileged *wrapperspb.BoolValue `protobuf:"bytes,18,opt,name=privileged,proto3" json:"privileged,omitempty"`
// The Container seccompProfile
//
// See: https://kubernetes.io/docs/tutorials/security/seccomp/
SeccompProfile *structpb.Struct `protobuf:"bytes,19,opt,name=seccompProfile,proto3" json:"seccompProfile,omitempty"`
Ambient *CNIAmbientConfig `protobuf:"bytes,21,opt,name=ambient,proto3" json:"ambient,omitempty"`
Provider string `protobuf:"bytes,22,opt,name=provider,proto3" json:"provider,omitempty"`
// K8s rolling update strategy
RollingMaxUnavailable *IntOrString `protobuf:"bytes,23,opt,name=rollingMaxUnavailable,proto3" json:"rollingMaxUnavailable,omitempty"`
}
func (x *CNIConfig) Reset() {
*x = CNIConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CNIConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CNIConfig) ProtoMessage() {}
func (x *CNIConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CNIConfig.ProtoReflect.Descriptor instead.
func (*CNIConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{1}
}
func (x *CNIConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *CNIConfig) GetHub() string {
if x != nil {
return x.Hub
}
return ""
}
func (x *CNIConfig) GetTag() *structpb.Value {
if x != nil {
return x.Tag
}
return nil
}
func (x *CNIConfig) GetVariant() string {
if x != nil {
return x.Variant
}
return ""
}
func (x *CNIConfig) GetImage() string {
if x != nil {
return x.Image
}
return ""
}
func (x *CNIConfig) GetPullPolicy() string {
if x != nil {
return x.PullPolicy
}
return ""
}
func (x *CNIConfig) GetCniBinDir() string {
if x != nil {
return x.CniBinDir
}
return ""
}
func (x *CNIConfig) GetCniConfDir() string {
if x != nil {
return x.CniConfDir
}
return ""
}
func (x *CNIConfig) GetCniConfFileName() string {
if x != nil {
return x.CniConfFileName
}
return ""
}
func (x *CNIConfig) GetCniNetnsDir() string {
if x != nil {
return x.CniNetnsDir
}
return ""
}
func (x *CNIConfig) GetExcludeNamespaces() []string {
if x != nil {
return x.ExcludeNamespaces
}
return nil
}
func (x *CNIConfig) GetAffinity() *structpb.Struct {
if x != nil {
return x.Affinity
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *CNIConfig) GetPodAnnotations() *structpb.Struct {
if x != nil {
return x.PodAnnotations
}
return nil
}
func (x *CNIConfig) GetPspClusterRole() string {
if x != nil {
return x.PspClusterRole
}
return ""
}
func (x *CNIConfig) GetLogLevel() string {
if x != nil {
return x.LogLevel
}
return ""
}
func (x *CNIConfig) GetRepair() *CNIRepairConfig {
if x != nil {
return x.Repair
}
return nil
}
func (x *CNIConfig) GetChained() *wrapperspb.BoolValue {
if x != nil {
return x.Chained
}
return nil
}
func (x *CNIConfig) GetResourceQuotas() *ResourceQuotas {
if x != nil {
return x.ResourceQuotas
}
return nil
}
func (x *CNIConfig) GetResources() *Resources {
if x != nil {
return x.Resources
}
return nil
}
func (x *CNIConfig) GetPrivileged() *wrapperspb.BoolValue {
if x != nil {
return x.Privileged
}
return nil
}
func (x *CNIConfig) GetSeccompProfile() *structpb.Struct {
if x != nil {
return x.SeccompProfile
}
return nil
}
func (x *CNIConfig) GetAmbient() *CNIAmbientConfig {
if x != nil {
return x.Ambient
}
return nil
}
func (x *CNIConfig) GetProvider() string {
if x != nil {
return x.Provider
}
return ""
}
func (x *CNIConfig) GetRollingMaxUnavailable() *IntOrString {
if x != nil {
return x.RollingMaxUnavailable
}
return nil
}
type CNIAmbientConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether ambient redirection is enabled
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
ConfigDir string `protobuf:"bytes,3,opt,name=configDir,proto3" json:"configDir,omitempty"`
DnsCapture *wrapperspb.BoolValue `protobuf:"bytes,5,opt,name=dnsCapture,proto3" json:"dnsCapture,omitempty"`
}
func (x *CNIAmbientConfig) Reset() {
*x = CNIAmbientConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CNIAmbientConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CNIAmbientConfig) ProtoMessage() {}
func (x *CNIAmbientConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CNIAmbientConfig.ProtoReflect.Descriptor instead.
func (*CNIAmbientConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{2}
}
func (x *CNIAmbientConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *CNIAmbientConfig) GetConfigDir() string {
if x != nil {
return x.ConfigDir
}
return ""
}
func (x *CNIAmbientConfig) GetDnsCapture() *wrapperspb.BoolValue {
if x != nil {
return x.DnsCapture
}
return nil
}
type CNIRepairConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether repair behavior is enabled.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
Hub string `protobuf:"bytes,2,opt,name=hub,proto3" json:"hub,omitempty"`
Tag *structpb.Value `protobuf:"bytes,3,opt,name=tag,proto3" json:"tag,omitempty"`
Image string `protobuf:"bytes,4,opt,name=image,proto3" json:"image,omitempty"`
// Controls whether various repair behaviors are enabled.
LabelPods bool `protobuf:"varint,5,opt,name=labelPods,proto3" json:"labelPods,omitempty"`
RepairPods bool `protobuf:"varint,11,opt,name=repairPods,proto3" json:"repairPods,omitempty"`
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
CreateEvents string `protobuf:"bytes,6,opt,name=createEvents,proto3" json:"createEvents,omitempty"`
DeletePods bool `protobuf:"varint,7,opt,name=deletePods,proto3" json:"deletePods,omitempty"`
BrokenPodLabelKey string `protobuf:"bytes,8,opt,name=brokenPodLabelKey,proto3" json:"brokenPodLabelKey,omitempty"`
BrokenPodLabelValue string `protobuf:"bytes,9,opt,name=brokenPodLabelValue,proto3" json:"brokenPodLabelValue,omitempty"`
InitContainerName string `protobuf:"bytes,10,opt,name=initContainerName,proto3" json:"initContainerName,omitempty"`
}
func (x *CNIRepairConfig) Reset() {
*x = CNIRepairConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *CNIRepairConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*CNIRepairConfig) ProtoMessage() {}
func (x *CNIRepairConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use CNIRepairConfig.ProtoReflect.Descriptor instead.
func (*CNIRepairConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{3}
}
func (x *CNIRepairConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *CNIRepairConfig) GetHub() string {
if x != nil {
return x.Hub
}
return ""
}
func (x *CNIRepairConfig) GetTag() *structpb.Value {
if x != nil {
return x.Tag
}
return nil
}
func (x *CNIRepairConfig) GetImage() string {
if x != nil {
return x.Image
}
return ""
}
func (x *CNIRepairConfig) GetLabelPods() bool {
if x != nil {
return x.LabelPods
}
return false
}
func (x *CNIRepairConfig) GetRepairPods() bool {
if x != nil {
return x.RepairPods
}
return false
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *CNIRepairConfig) GetCreateEvents() string {
if x != nil {
return x.CreateEvents
}
return ""
}
func (x *CNIRepairConfig) GetDeletePods() bool {
if x != nil {
return x.DeletePods
}
return false
}
func (x *CNIRepairConfig) GetBrokenPodLabelKey() string {
if x != nil {
return x.BrokenPodLabelKey
}
return ""
}
func (x *CNIRepairConfig) GetBrokenPodLabelValue() string {
if x != nil {
return x.BrokenPodLabelValue
}
return ""
}
func (x *CNIRepairConfig) GetInitContainerName() string {
if x != nil {
return x.InitContainerName
}
return ""
}
type ResourceQuotas struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether to create resource quotas or not for the CNI DaemonSet.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
Pods int64 `protobuf:"varint,2,opt,name=pods,proto3" json:"pods,omitempty"`
}
func (x *ResourceQuotas) Reset() {
*x = ResourceQuotas{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ResourceQuotas) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResourceQuotas) ProtoMessage() {}
func (x *ResourceQuotas) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResourceQuotas.ProtoReflect.Descriptor instead.
func (*ResourceQuotas) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{4}
}
func (x *ResourceQuotas) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *ResourceQuotas) GetPods() int64 {
if x != nil {
return x.Pods
}
return 0
}
// Configuration for CPU or memory target utilization for HorizontalPodAutoscaler target.
type TargetUtilizationConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// K8s utilization setting for HorizontalPodAutoscaler target.
//
// See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
TargetAverageUtilization int32 `protobuf:"varint,1,opt,name=targetAverageUtilization,proto3" json:"targetAverageUtilization,omitempty"`
}
func (x *TargetUtilizationConfig) Reset() {
*x = TargetUtilizationConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TargetUtilizationConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TargetUtilizationConfig) ProtoMessage() {}
func (x *TargetUtilizationConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TargetUtilizationConfig.ProtoReflect.Descriptor instead.
func (*TargetUtilizationConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{5}
}
func (x *TargetUtilizationConfig) GetTargetAverageUtilization() int32 {
if x != nil {
return x.TargetAverageUtilization
}
return 0
}
// Mirrors Resources for unmarshaling.
type Resources struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Limits map[string]string `protobuf:"bytes,1,rep,name=limits,proto3" json:"limits,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Requests map[string]string `protobuf:"bytes,2,rep,name=requests,proto3" json:"requests,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *Resources) Reset() {
*x = Resources{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Resources) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Resources) ProtoMessage() {}
func (x *Resources) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Resources.ProtoReflect.Descriptor instead.
func (*Resources) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{6}
}
func (x *Resources) GetLimits() map[string]string {
if x != nil {
return x.Limits
}
return nil
}
func (x *Resources) GetRequests() map[string]string {
if x != nil {
return x.Requests
}
return nil
}
// Mirrors ServiceAccount for unmarshaling.
type ServiceAccount struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Annotations *structpb.Struct `protobuf:"bytes,1,opt,name=annotations,proto3" json:"annotations,omitempty"`
}
func (x *ServiceAccount) Reset() {
*x = ServiceAccount{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ServiceAccount) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ServiceAccount) ProtoMessage() {}
func (x *ServiceAccount) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead.
func (*ServiceAccount) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{7}
}
func (x *ServiceAccount) GetAnnotations() *structpb.Struct {
if x != nil {
return x.Annotations
}
return nil
}
// DefaultPodDisruptionBudgetConfig specifies the default pod disruption budget configuration.
//
// See https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
type DefaultPodDisruptionBudgetConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether a PodDisruptionBudget with a default minAvailable value of 1 is created for each deployment.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
}
func (x *DefaultPodDisruptionBudgetConfig) Reset() {
*x = DefaultPodDisruptionBudgetConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DefaultPodDisruptionBudgetConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DefaultPodDisruptionBudgetConfig) ProtoMessage() {}
func (x *DefaultPodDisruptionBudgetConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DefaultPodDisruptionBudgetConfig.ProtoReflect.Descriptor instead.
func (*DefaultPodDisruptionBudgetConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{8}
}
func (x *DefaultPodDisruptionBudgetConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
// DefaultResourcesConfig specifies the default k8s resources settings for all Istio control plane components.
type DefaultResourcesConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// k8s resources settings.
//
// See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container
Requests *ResourcesRequestsConfig `protobuf:"bytes,1,opt,name=requests,proto3" json:"requests,omitempty"`
}
func (x *DefaultResourcesConfig) Reset() {
*x = DefaultResourcesConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *DefaultResourcesConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*DefaultResourcesConfig) ProtoMessage() {}
func (x *DefaultResourcesConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use DefaultResourcesConfig.ProtoReflect.Descriptor instead.
func (*DefaultResourcesConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{9}
}
func (x *DefaultResourcesConfig) GetRequests() *ResourcesRequestsConfig {
if x != nil {
return x.Requests
}
return nil
}
// Configuration for an egress gateway.
type EgressGatewayConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether auto scaling with a HorizontalPodAutoscaler is enabled.
AutoscaleEnabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=autoscaleEnabled,proto3" json:"autoscaleEnabled,omitempty"`
// maxReplicas setting for HorizontalPodAutoscaler.
AutoscaleMax uint32 `protobuf:"varint,2,opt,name=autoscaleMax,proto3" json:"autoscaleMax,omitempty"`
// minReplicas setting for HorizontalPodAutoscaler.
AutoscaleMin uint32 `protobuf:"varint,3,opt,name=autoscaleMin,proto3" json:"autoscaleMin,omitempty"`
// K8s memory utilization setting for HorizontalPodAutoscaler target.
//
// See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Memory *TargetUtilizationConfig `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"`
// K8s utilization setting for HorizontalPodAutoscaler target.
//
// See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Cpu *TargetUtilizationConfig `protobuf:"bytes,5,opt,name=cpu,proto3" json:"cpu,omitempty"`
CustomService *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=customService,proto3" json:"customService,omitempty"`
// Controls whether an egress gateway is enabled.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,7,opt,name=enabled,proto3" json:"enabled,omitempty"`
// Environment variables passed to the proxy container.
Env *structpb.Struct `protobuf:"bytes,8,opt,name=env,proto3" json:"env,omitempty"`
Labels map[string]string `protobuf:"bytes,9,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Name string `protobuf:"bytes,25,opt,name=name,proto3" json:"name,omitempty"`
// K8s node selector.
//
// See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
NodeSelector *structpb.Struct `protobuf:"bytes,10,opt,name=nodeSelector,proto3" json:"nodeSelector,omitempty"`
// K8s annotations for pods.
//
// See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
PodAnnotations *structpb.Struct `protobuf:"bytes,11,opt,name=podAnnotations,proto3" json:"podAnnotations,omitempty"`
// Pod anti-affinity label selector.
//
// Specify the pod anti-affinity that allows you to constrain which nodes
// your pod is eligible to be scheduled based on labels on pods that are
// already running on the node rather than based on labels on nodes.
// There are currently two types of anti-affinity:
//
// "requiredDuringSchedulingIgnoredDuringExecution"
// "preferredDuringSchedulingIgnoredDuringExecution"
//
// which denote “hard” vs. “soft” requirements, you can define your values
// in "podAntiAffinityLabelSelector" and "podAntiAffinityTermLabelSelector"
// correspondingly.
// See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
//
// Examples:
// podAntiAffinityLabelSelector:
// - key: security
// operator: In
// values: S1,S2
// topologyKey: "kubernetes.io/hostname"
// This pod anti-affinity rule says that the pod requires not to be scheduled
// onto a node if that node is already running a pod with label having key
// “security” and value “S1”.
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
PodAntiAffinityLabelSelector []*structpb.Struct `protobuf:"bytes,12,rep,name=podAntiAffinityLabelSelector,proto3" json:"podAntiAffinityLabelSelector,omitempty"`
// See PodAntiAffinityLabelSelector.
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
PodAntiAffinityTermLabelSelector []*structpb.Struct `protobuf:"bytes,13,rep,name=podAntiAffinityTermLabelSelector,proto3" json:"podAntiAffinityTermLabelSelector,omitempty"`
// Ports Configuration for the egress gateway service.
Ports []*PortsConfig `protobuf:"bytes,14,rep,name=ports,proto3" json:"ports,omitempty"`
// K8s resources settings.
//
// See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Resources *Resources `protobuf:"bytes,15,opt,name=resources,proto3" json:"resources,omitempty"`
// Config for secret volume mounts.
SecretVolumes []*SecretVolume `protobuf:"bytes,16,rep,name=secretVolumes,proto3" json:"secretVolumes,omitempty"`
// Annotations to add to the egress gateway service.
ServiceAnnotations *structpb.Struct `protobuf:"bytes,17,opt,name=serviceAnnotations,proto3" json:"serviceAnnotations,omitempty"`
// Service type.
//
// See https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
Type string `protobuf:"bytes,18,opt,name=type,proto3" json:"type,omitempty"`
// Enables cross-cluster access using SNI matching.
Zvpn *ZeroVPNConfig `protobuf:"bytes,19,opt,name=zvpn,proto3" json:"zvpn,omitempty"`
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Tolerations []*structpb.Struct `protobuf:"bytes,20,rep,name=tolerations,proto3" json:"tolerations,omitempty"`
// K8s rolling update strategy
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
RollingMaxSurge *IntOrString `protobuf:"bytes,21,opt,name=rollingMaxSurge,proto3" json:"rollingMaxSurge,omitempty"`
// K8s rolling update strategy
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
RollingMaxUnavailable *IntOrString `protobuf:"bytes,22,opt,name=rollingMaxUnavailable,proto3" json:"rollingMaxUnavailable,omitempty"`
ConfigVolumes []*structpb.Struct `protobuf:"bytes,23,rep,name=configVolumes,proto3" json:"configVolumes,omitempty"`
AdditionalContainers []*structpb.Struct `protobuf:"bytes,24,rep,name=additionalContainers,proto3" json:"additionalContainers,omitempty"`
RunAsRoot *wrapperspb.BoolValue `protobuf:"bytes,26,opt,name=runAsRoot,proto3" json:"runAsRoot,omitempty"`
// The injection template to use for the gateway. If not set, no injection will be performed.
InjectionTemplate string `protobuf:"bytes,27,opt,name=injectionTemplate,proto3" json:"injectionTemplate,omitempty"`
ServiceAccount *ServiceAccount `protobuf:"bytes,28,opt,name=serviceAccount,proto3" json:"serviceAccount,omitempty"`
IpFamilies []string `protobuf:"bytes,29,rep,name=ipFamilies,proto3" json:"ipFamilies,omitempty"`
IpFamilyPolicy string `protobuf:"bytes,30,opt,name=ipFamilyPolicy,proto3" json:"ipFamilyPolicy,omitempty"` // Next available 31.
}
func (x *EgressGatewayConfig) Reset() {
*x = EgressGatewayConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *EgressGatewayConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*EgressGatewayConfig) ProtoMessage() {}
func (x *EgressGatewayConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use EgressGatewayConfig.ProtoReflect.Descriptor instead.
func (*EgressGatewayConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{10}
}
func (x *EgressGatewayConfig) GetAutoscaleEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.AutoscaleEnabled
}
return nil
}
func (x *EgressGatewayConfig) GetAutoscaleMax() uint32 {
if x != nil {
return x.AutoscaleMax
}
return 0
}
func (x *EgressGatewayConfig) GetAutoscaleMin() uint32 {
if x != nil {
return x.AutoscaleMin
}
return 0
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *EgressGatewayConfig) GetMemory() *TargetUtilizationConfig {
if x != nil {
return x.Memory
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *EgressGatewayConfig) GetCpu() *TargetUtilizationConfig {
if x != nil {
return x.Cpu
}
return nil
}
func (x *EgressGatewayConfig) GetCustomService() *wrapperspb.BoolValue {
if x != nil {
return x.CustomService
}
return nil
}
func (x *EgressGatewayConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *EgressGatewayConfig) GetEnv() *structpb.Struct {
if x != nil {
return x.Env
}
return nil
}
func (x *EgressGatewayConfig) GetLabels() map[string]string {
if x != nil {
return x.Labels
}
return nil
}
func (x *EgressGatewayConfig) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *EgressGatewayConfig) GetNodeSelector() *structpb.Struct {
if x != nil {
return x.NodeSelector
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *EgressGatewayConfig) GetPodAnnotations() *structpb.Struct {
if x != nil {
return x.PodAnnotations
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *EgressGatewayConfig) GetPodAntiAffinityLabelSelector() []*structpb.Struct {
if x != nil {
return x.PodAntiAffinityLabelSelector
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *EgressGatewayConfig) GetPodAntiAffinityTermLabelSelector() []*structpb.Struct {
if x != nil {
return x.PodAntiAffinityTermLabelSelector
}
return nil
}
func (x *EgressGatewayConfig) GetPorts() []*PortsConfig {
if x != nil {
return x.Ports
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *EgressGatewayConfig) GetResources() *Resources {
if x != nil {
return x.Resources
}
return nil
}
func (x *EgressGatewayConfig) GetSecretVolumes() []*SecretVolume {
if x != nil {
return x.SecretVolumes
}
return nil
}
func (x *EgressGatewayConfig) GetServiceAnnotations() *structpb.Struct {
if x != nil {
return x.ServiceAnnotations
}
return nil
}
func (x *EgressGatewayConfig) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *EgressGatewayConfig) GetZvpn() *ZeroVPNConfig {
if x != nil {
return x.Zvpn
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *EgressGatewayConfig) GetTolerations() []*structpb.Struct {
if x != nil {
return x.Tolerations
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *EgressGatewayConfig) GetRollingMaxSurge() *IntOrString {
if x != nil {
return x.RollingMaxSurge
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *EgressGatewayConfig) GetRollingMaxUnavailable() *IntOrString {
if x != nil {
return x.RollingMaxUnavailable
}
return nil
}
func (x *EgressGatewayConfig) GetConfigVolumes() []*structpb.Struct {
if x != nil {
return x.ConfigVolumes
}
return nil
}
func (x *EgressGatewayConfig) GetAdditionalContainers() []*structpb.Struct {
if x != nil {
return x.AdditionalContainers
}
return nil
}
func (x *EgressGatewayConfig) GetRunAsRoot() *wrapperspb.BoolValue {
if x != nil {
return x.RunAsRoot
}
return nil
}
func (x *EgressGatewayConfig) GetInjectionTemplate() string {
if x != nil {
return x.InjectionTemplate
}
return ""
}
func (x *EgressGatewayConfig) GetServiceAccount() *ServiceAccount {
if x != nil {
return x.ServiceAccount
}
return nil
}
func (x *EgressGatewayConfig) GetIpFamilies() []string {
if x != nil {
return x.IpFamilies
}
return nil
}
func (x *EgressGatewayConfig) GetIpFamilyPolicy() string {
if x != nil {
return x.IpFamilyPolicy
}
return ""
}
// Configuration for gateways.
type GatewaysConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Configuration for an egress gateway.
IstioEgressgateway *EgressGatewayConfig `protobuf:"bytes,1,opt,name=istio_egressgateway,json=istio-egressgateway,proto3" json:"istio_egressgateway,omitempty"`
// Controls whether any gateways are enabled.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=enabled,proto3" json:"enabled,omitempty"`
// Configuration for an ingress gateway.
IstioIngressgateway *IngressGatewayConfig `protobuf:"bytes,4,opt,name=istio_ingressgateway,json=istio-ingressgateway,proto3" json:"istio_ingressgateway,omitempty"`
}
func (x *GatewaysConfig) Reset() {
*x = GatewaysConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GatewaysConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GatewaysConfig) ProtoMessage() {}
func (x *GatewaysConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GatewaysConfig.ProtoReflect.Descriptor instead.
func (*GatewaysConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{11}
}
func (x *GatewaysConfig) GetIstioEgressgateway() *EgressGatewayConfig {
if x != nil {
return x.IstioEgressgateway
}
return nil
}
func (x *GatewaysConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *GatewaysConfig) GetIstioIngressgateway() *IngressGatewayConfig {
if x != nil {
return x.IstioIngressgateway
}
return nil
}
// Global Configuration for Istio components.
type GlobalConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Specifies pod scheduling arch(amd64, ppc64le, s390x, arm64) and weight as follows:
//
// 0 - Never scheduled
// 1 - Least preferred
// 2 - No preference
// 3 - Most preferred
//
// Deprecated: replaced by the affinity k8s settings which allows architecture nodeAffinity configuration of this behavior.
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Arch *ArchConfig `protobuf:"bytes,1,opt,name=arch,proto3" json:"arch,omitempty"`
// List of certSigners to allow "approve" action in the ClusterRole
CertSigners []string `protobuf:"bytes,68,rep,name=certSigners,proto3" json:"certSigners,omitempty"`
ConfigRootNamespace string `protobuf:"bytes,50,opt,name=configRootNamespace,proto3" json:"configRootNamespace,omitempty"`
// Controls whether the server-side validation is enabled.
ConfigValidation *wrapperspb.BoolValue `protobuf:"bytes,3,opt,name=configValidation,proto3" json:"configValidation,omitempty"`
DefaultConfigVisibilitySettings []string `protobuf:"bytes,52,rep,name=defaultConfigVisibilitySettings,proto3" json:"defaultConfigVisibilitySettings,omitempty"`
// Default k8s node selector for all the Istio control plane components
//
// See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
DefaultNodeSelector *structpb.Struct `protobuf:"bytes,6,opt,name=defaultNodeSelector,proto3" json:"defaultNodeSelector,omitempty"`
// Specifies the default pod disruption budget configuration.
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
DefaultPodDisruptionBudget *DefaultPodDisruptionBudgetConfig `protobuf:"bytes,7,opt,name=defaultPodDisruptionBudget,proto3" json:"defaultPodDisruptionBudget,omitempty"`
// Default k8s resources settings for all Istio control plane components.
//
// See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
DefaultResources *DefaultResourcesConfig `protobuf:"bytes,9,opt,name=defaultResources,proto3" json:"defaultResources,omitempty"`
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
DefaultTolerations []*structpb.Struct `protobuf:"bytes,55,rep,name=defaultTolerations,proto3" json:"defaultTolerations,omitempty"`
// Specifies the docker hub for Istio images.
Hub string `protobuf:"bytes,12,opt,name=hub,proto3" json:"hub,omitempty"`
// Specifies the image pull policy for the Istio images. one of Always, Never, IfNotPresent.
// Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated.
//
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
ImagePullPolicy string `protobuf:"bytes,13,opt,name=imagePullPolicy,proto3" json:"imagePullPolicy,omitempty"` // ImagePullPolicy v1.PullPolicy `json:"imagePullPolicy,omitempty"`
ImagePullSecrets []string `protobuf:"bytes,37,rep,name=imagePullSecrets,proto3" json:"imagePullSecrets,omitempty"`
// Specifies the default namespace for the Istio control plane components.
IstioNamespace string `protobuf:"bytes,14,opt,name=istioNamespace,proto3" json:"istioNamespace,omitempty"`
LogAsJson *wrapperspb.BoolValue `protobuf:"bytes,36,opt,name=logAsJson,proto3" json:"logAsJson,omitempty"`
// Specifies the global logging level settings for the Istio control plane components.
Logging *GlobalLoggingConfig `protobuf:"bytes,17,opt,name=logging,proto3" json:"logging,omitempty"`
MeshID string `protobuf:"bytes,53,opt,name=meshID,proto3" json:"meshID,omitempty"`
// Configure the mesh networks to be used by the Split Horizon EDS.
//
// The following example defines two networks with different endpoints association methods.
// For `network1` all endpoints that their IP belongs to the provided CIDR range will be
// mapped to network1. The gateway for this network example is specified by its public IP
// address and port.
// The second network, `network2`, in this example is defined differently with all endpoints
// retrieved through the specified Multi-Cluster registry being mapped to network2. The
// gateway is also defined differently with the name of the gateway service on the remote
// cluster. The public IP for the gateway will be determined from that remote service (only
// LoadBalancer gateway service type is currently supported, for a NodePort type gateway service,
// it still need to be configured manually).
//
// meshNetworks:
//
// network1:
// endpoints:
// - fromCidr: "192.168.0.1/24"
// gateways:
// - address: 1.1.1.1
// port: 80
// network2:
// endpoints:
// - fromRegistry: reg1
// gateways:
// - registryServiceName: istio-ingressgateway.istio-system.svc.cluster.local
// port: 443
MeshNetworks *structpb.Struct `protobuf:"bytes,19,opt,name=meshNetworks,proto3" json:"meshNetworks,omitempty"`
// Specifies the Configuration for Istio mesh across multiple clusters through Istio gateways.
MultiCluster *MultiClusterConfig `protobuf:"bytes,22,opt,name=multiCluster,proto3" json:"multiCluster,omitempty"`
Network string `protobuf:"bytes,39,opt,name=network,proto3" json:"network,omitempty"`
// Custom DNS config for the pod to resolve names of services in other
// clusters. Use this to add additional search domains, and other settings.
// see https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#dns-config
// This does not apply to gateway pods as they typically need a different
// set of DNS settings than the normal application pods (e.g. in multicluster scenarios).
PodDNSSearchNamespaces []string `protobuf:"bytes,43,rep,name=podDNSSearchNamespaces,proto3" json:"podDNSSearchNamespaces,omitempty"`
OmitSidecarInjectorConfigMap *wrapperspb.BoolValue `protobuf:"bytes,38,opt,name=omitSidecarInjectorConfigMap,proto3" json:"omitSidecarInjectorConfigMap,omitempty"`
// Controls whether to restrict the applications namespace the controller manages;
// If set it to false, the controller watches all namespaces.
OneNamespace *wrapperspb.BoolValue `protobuf:"bytes,23,opt,name=oneNamespace,proto3" json:"oneNamespace,omitempty"`
OperatorManageWebhooks *wrapperspb.BoolValue `protobuf:"bytes,41,opt,name=operatorManageWebhooks,proto3" json:"operatorManageWebhooks,omitempty"`
// Specifies the k8s priorityClassName for the istio control plane components.
//
// See https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
PriorityClassName string `protobuf:"bytes,27,opt,name=priorityClassName,proto3" json:"priorityClassName,omitempty"`
// Specifies how proxies are configured within Istio.
Proxy *ProxyConfig `protobuf:"bytes,28,opt,name=proxy,proto3" json:"proxy,omitempty"`
// Specifies the Configuration for proxy_init container which sets the pods' networking to intercept the inbound/outbound traffic.
ProxyInit *ProxyInitConfig `protobuf:"bytes,29,opt,name=proxy_init,proto3" json:"proxy_init,omitempty"`
// Specifies the Configuration for the SecretDiscoveryService instead of using K8S secrets to mount the certificates.
Sds *SDSConfig `protobuf:"bytes,30,opt,name=sds,proto3" json:"sds,omitempty"`
// Specifies the tag for the Istio docker images.
Tag *structpb.Value `protobuf:"bytes,31,opt,name=tag,proto3" json:"tag,omitempty"`
Variant string `protobuf:"bytes,67,opt,name=variant,proto3" json:"variant,omitempty"`
// Specifies the Configuration for each of the supported tracers.
Tracer *TracerConfig `protobuf:"bytes,33,opt,name=tracer,proto3" json:"tracer,omitempty"`
// Controls whether to use of Mesh Configuration Protocol to distribute configuration.
UseMCP *wrapperspb.BoolValue `protobuf:"bytes,35,opt,name=useMCP,proto3" json:"useMCP,omitempty"`
// Specifies the Istio control plane’s pilot Pod IP address or remote cluster DNS resolvable hostname.
RemotePilotAddress string `protobuf:"bytes,48,opt,name=remotePilotAddress,proto3" json:"remotePilotAddress,omitempty"`
// Specifies the configution of istiod
Istiod *IstiodConfig `protobuf:"bytes,54,opt,name=istiod,proto3" json:"istiod,omitempty"`
// Configure the Pilot certificate provider.
// Currently, four providers are supported: "kubernetes", "istiod", "custom" and "none".
PilotCertProvider string `protobuf:"bytes,56,opt,name=pilotCertProvider,proto3" json:"pilotCertProvider,omitempty"`
// Configure the policy for validating JWT.
// Currently, two options are supported: "third-party-jwt" and "first-party-jwt".
JwtPolicy string `protobuf:"bytes,57,opt,name=jwtPolicy,proto3" json:"jwtPolicy,omitempty"`
// Specifies the configuration for Security Token Service.
Sts *STSConfig `protobuf:"bytes,58,opt,name=sts,proto3" json:"sts,omitempty"`
// Configures the revision this control plane is a part of
Revision string `protobuf:"bytes,59,opt,name=revision,proto3" json:"revision,omitempty"`
// Controls whether the in-cluster MTLS key and certs are loaded from the secret volume mounts.
MountMtlsCerts *wrapperspb.BoolValue `protobuf:"bytes,60,opt,name=mountMtlsCerts,proto3" json:"mountMtlsCerts,omitempty"`
// The address of the CA for CSR.
CaAddress string `protobuf:"bytes,61,opt,name=caAddress,proto3" json:"caAddress,omitempty"`
// Controls whether one external istiod is enabled.
ExternalIstiod *wrapperspb.BoolValue `protobuf:"bytes,62,opt,name=externalIstiod,proto3" json:"externalIstiod,omitempty"`
// Controls whether a remote cluster is the config cluster for an external istiod
ConfigCluster *wrapperspb.BoolValue `protobuf:"bytes,64,opt,name=configCluster,proto3" json:"configCluster,omitempty"`
// The name of the CA for workloads.
// For example, when caName=GkeWorkloadCertificate, GKE workload certificates
// will be used as the certificates for workloads.
// The default value is "" and when caName="", the CA will be configured by other
// mechanisms (e.g., environmental variable CA_PROVIDER).
CaName string `protobuf:"bytes,65,opt,name=caName,proto3" json:"caName,omitempty"`
Autoscalingv2API *wrapperspb.BoolValue `protobuf:"bytes,66,opt,name=autoscalingv2API,proto3" json:"autoscalingv2API,omitempty"`
// Platform in which Istio is deployed. Possible values are: "openshift" and "gcp"
// An empty value means it is a vanilla Kubernetes distribution, therefore no special
// treatment will be considered.
Platform string `protobuf:"bytes,69,opt,name=platform,proto3" json:"platform,omitempty"`
IpFamilies []string `protobuf:"bytes,70,rep,name=ipFamilies,proto3" json:"ipFamilies,omitempty"`
IpFamilyPolicy string `protobuf:"bytes,71,opt,name=ipFamilyPolicy,proto3" json:"ipFamilyPolicy,omitempty"` // The next available key is 72
}
func (x *GlobalConfig) Reset() {
*x = GlobalConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GlobalConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GlobalConfig) ProtoMessage() {}
func (x *GlobalConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GlobalConfig.ProtoReflect.Descriptor instead.
func (*GlobalConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{12}
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *GlobalConfig) GetArch() *ArchConfig {
if x != nil {
return x.Arch
}
return nil
}
func (x *GlobalConfig) GetCertSigners() []string {
if x != nil {
return x.CertSigners
}
return nil
}
func (x *GlobalConfig) GetConfigRootNamespace() string {
if x != nil {
return x.ConfigRootNamespace
}
return ""
}
func (x *GlobalConfig) GetConfigValidation() *wrapperspb.BoolValue {
if x != nil {
return x.ConfigValidation
}
return nil
}
func (x *GlobalConfig) GetDefaultConfigVisibilitySettings() []string {
if x != nil {
return x.DefaultConfigVisibilitySettings
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *GlobalConfig) GetDefaultNodeSelector() *structpb.Struct {
if x != nil {
return x.DefaultNodeSelector
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *GlobalConfig) GetDefaultPodDisruptionBudget() *DefaultPodDisruptionBudgetConfig {
if x != nil {
return x.DefaultPodDisruptionBudget
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *GlobalConfig) GetDefaultResources() *DefaultResourcesConfig {
if x != nil {
return x.DefaultResources
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *GlobalConfig) GetDefaultTolerations() []*structpb.Struct {
if x != nil {
return x.DefaultTolerations
}
return nil
}
func (x *GlobalConfig) GetHub() string {
if x != nil {
return x.Hub
}
return ""
}
func (x *GlobalConfig) GetImagePullPolicy() string {
if x != nil {
return x.ImagePullPolicy
}
return ""
}
func (x *GlobalConfig) GetImagePullSecrets() []string {
if x != nil {
return x.ImagePullSecrets
}
return nil
}
func (x *GlobalConfig) GetIstioNamespace() string {
if x != nil {
return x.IstioNamespace
}
return ""
}
func (x *GlobalConfig) GetLogAsJson() *wrapperspb.BoolValue {
if x != nil {
return x.LogAsJson
}
return nil
}
func (x *GlobalConfig) GetLogging() *GlobalLoggingConfig {
if x != nil {
return x.Logging
}
return nil
}
func (x *GlobalConfig) GetMeshID() string {
if x != nil {
return x.MeshID
}
return ""
}
func (x *GlobalConfig) GetMeshNetworks() *structpb.Struct {
if x != nil {
return x.MeshNetworks
}
return nil
}
func (x *GlobalConfig) GetMultiCluster() *MultiClusterConfig {
if x != nil {
return x.MultiCluster
}
return nil
}
func (x *GlobalConfig) GetNetwork() string {
if x != nil {
return x.Network
}
return ""
}
func (x *GlobalConfig) GetPodDNSSearchNamespaces() []string {
if x != nil {
return x.PodDNSSearchNamespaces
}
return nil
}
func (x *GlobalConfig) GetOmitSidecarInjectorConfigMap() *wrapperspb.BoolValue {
if x != nil {
return x.OmitSidecarInjectorConfigMap
}
return nil
}
func (x *GlobalConfig) GetOneNamespace() *wrapperspb.BoolValue {
if x != nil {
return x.OneNamespace
}
return nil
}
func (x *GlobalConfig) GetOperatorManageWebhooks() *wrapperspb.BoolValue {
if x != nil {
return x.OperatorManageWebhooks
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *GlobalConfig) GetPriorityClassName() string {
if x != nil {
return x.PriorityClassName
}
return ""
}
func (x *GlobalConfig) GetProxy() *ProxyConfig {
if x != nil {
return x.Proxy
}
return nil
}
func (x *GlobalConfig) GetProxyInit() *ProxyInitConfig {
if x != nil {
return x.ProxyInit
}
return nil
}
func (x *GlobalConfig) GetSds() *SDSConfig {
if x != nil {
return x.Sds
}
return nil
}
func (x *GlobalConfig) GetTag() *structpb.Value {
if x != nil {
return x.Tag
}
return nil
}
func (x *GlobalConfig) GetVariant() string {
if x != nil {
return x.Variant
}
return ""
}
func (x *GlobalConfig) GetTracer() *TracerConfig {
if x != nil {
return x.Tracer
}
return nil
}
func (x *GlobalConfig) GetUseMCP() *wrapperspb.BoolValue {
if x != nil {
return x.UseMCP
}
return nil
}
func (x *GlobalConfig) GetRemotePilotAddress() string {
if x != nil {
return x.RemotePilotAddress
}
return ""
}
func (x *GlobalConfig) GetIstiod() *IstiodConfig {
if x != nil {
return x.Istiod
}
return nil
}
func (x *GlobalConfig) GetPilotCertProvider() string {
if x != nil {
return x.PilotCertProvider
}
return ""
}
func (x *GlobalConfig) GetJwtPolicy() string {
if x != nil {
return x.JwtPolicy
}
return ""
}
func (x *GlobalConfig) GetSts() *STSConfig {
if x != nil {
return x.Sts
}
return nil
}
func (x *GlobalConfig) GetRevision() string {
if x != nil {
return x.Revision
}
return ""
}
func (x *GlobalConfig) GetMountMtlsCerts() *wrapperspb.BoolValue {
if x != nil {
return x.MountMtlsCerts
}
return nil
}
func (x *GlobalConfig) GetCaAddress() string {
if x != nil {
return x.CaAddress
}
return ""
}
func (x *GlobalConfig) GetExternalIstiod() *wrapperspb.BoolValue {
if x != nil {
return x.ExternalIstiod
}
return nil
}
func (x *GlobalConfig) GetConfigCluster() *wrapperspb.BoolValue {
if x != nil {
return x.ConfigCluster
}
return nil
}
func (x *GlobalConfig) GetCaName() string {
if x != nil {
return x.CaName
}
return ""
}
func (x *GlobalConfig) GetAutoscalingv2API() *wrapperspb.BoolValue {
if x != nil {
return x.Autoscalingv2API
}
return nil
}
func (x *GlobalConfig) GetPlatform() string {
if x != nil {
return x.Platform
}
return ""
}
func (x *GlobalConfig) GetIpFamilies() []string {
if x != nil {
return x.IpFamilies
}
return nil
}
func (x *GlobalConfig) GetIpFamilyPolicy() string {
if x != nil {
return x.IpFamilyPolicy
}
return ""
}
// Configuration for Security Token Service (STS) server.
//
// See https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16
type STSConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
ServicePort uint32 `protobuf:"varint,1,opt,name=servicePort,proto3" json:"servicePort,omitempty"`
}
func (x *STSConfig) Reset() {
*x = STSConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *STSConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*STSConfig) ProtoMessage() {}
func (x *STSConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use STSConfig.ProtoReflect.Descriptor instead.
func (*STSConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{13}
}
func (x *STSConfig) GetServicePort() uint32 {
if x != nil {
return x.ServicePort
}
return 0
}
type IstiodConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// If enabled, istiod will perform config analysis
EnableAnalysis *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=enableAnalysis,proto3" json:"enableAnalysis,omitempty"`
}
func (x *IstiodConfig) Reset() {
*x = IstiodConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[14]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IstiodConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IstiodConfig) ProtoMessage() {}
func (x *IstiodConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[14]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IstiodConfig.ProtoReflect.Descriptor instead.
func (*IstiodConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{14}
}
func (x *IstiodConfig) GetEnableAnalysis() *wrapperspb.BoolValue {
if x != nil {
return x.EnableAnalysis
}
return nil
}
// GlobalLoggingConfig specifies the global logging level settings for the Istio control plane components.
type GlobalLoggingConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Comma-separated minimum per-scope logging level of messages to output, in the form of <scope>:<level>,<scope>:<level>
// The control plane has different scopes depending on component, but can configure default log level across all components
// If empty, default scope and level will be used as configured in code
Level string `protobuf:"bytes,1,opt,name=level,proto3" json:"level,omitempty"`
}
func (x *GlobalLoggingConfig) Reset() {
*x = GlobalLoggingConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[15]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GlobalLoggingConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GlobalLoggingConfig) ProtoMessage() {}
func (x *GlobalLoggingConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[15]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GlobalLoggingConfig.ProtoReflect.Descriptor instead.
func (*GlobalLoggingConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{15}
}
func (x *GlobalLoggingConfig) GetLevel() string {
if x != nil {
return x.Level
}
return ""
}
// Configuration for an ingress gateway.
type IngressGatewayConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether auto scaling with a HorizontalPodAutoscaler is enabled.
AutoscaleEnabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=autoscaleEnabled,proto3" json:"autoscaleEnabled,omitempty"`
// maxReplicas setting for HorizontalPodAutoscaler.
AutoscaleMax uint32 `protobuf:"varint,2,opt,name=autoscaleMax,proto3" json:"autoscaleMax,omitempty"`
// minReplicas setting for HorizontalPodAutoscaler.
AutoscaleMin uint32 `protobuf:"varint,3,opt,name=autoscaleMin,proto3" json:"autoscaleMin,omitempty"`
// K8s memory utilization setting for HorizontalPodAutoscaler target.
//
// See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Memory *TargetUtilizationConfig `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"`
// K8s cpu utilization setting for HorizontalPodAutoscaler target.
//
// See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Cpu *TargetUtilizationConfig `protobuf:"bytes,5,opt,name=cpu,proto3" json:"cpu,omitempty"`
CustomService *wrapperspb.BoolValue `protobuf:"bytes,6,opt,name=customService,proto3" json:"customService,omitempty"`
// Controls whether an ingress gateway is enabled.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,10,opt,name=enabled,proto3" json:"enabled,omitempty"`
// Environment variables passed to the proxy container.
Env *structpb.Struct `protobuf:"bytes,11,opt,name=env,proto3" json:"env,omitempty"`
Labels map[string]string `protobuf:"bytes,15,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
LoadBalancerIP string `protobuf:"bytes,16,opt,name=loadBalancerIP,proto3" json:"loadBalancerIP,omitempty"`
LoadBalancerSourceRanges []string `protobuf:"bytes,17,rep,name=loadBalancerSourceRanges,proto3" json:"loadBalancerSourceRanges,omitempty"`
Name string `protobuf:"bytes,44,opt,name=name,proto3" json:"name,omitempty"`
// K8s node selector.
//
// See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
NodeSelector *structpb.Struct `protobuf:"bytes,19,opt,name=nodeSelector,proto3" json:"nodeSelector,omitempty"`
// K8s annotations for pods.
//
// See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
PodAnnotations *structpb.Struct `protobuf:"bytes,20,opt,name=podAnnotations,proto3" json:"podAnnotations,omitempty"`
// See EgressGatewayConfig.
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
PodAntiAffinityLabelSelector []*structpb.Struct `protobuf:"bytes,21,rep,name=podAntiAffinityLabelSelector,proto3" json:"podAntiAffinityLabelSelector,omitempty"`
// See EgressGatewayConfig.
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
PodAntiAffinityTermLabelSelector []*structpb.Struct `protobuf:"bytes,22,rep,name=podAntiAffinityTermLabelSelector,proto3" json:"podAntiAffinityTermLabelSelector,omitempty"`
// Port Configuration for the ingress gateway.
Ports []*PortsConfig `protobuf:"bytes,23,rep,name=ports,proto3" json:"ports,omitempty"`
// Number of replicas for the ingress gateway Deployment.
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
ReplicaCount uint32 `protobuf:"varint,24,opt,name=replicaCount,proto3" json:"replicaCount,omitempty"`
// K8s resources settings.
//
// See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Resources *structpb.Struct `protobuf:"bytes,25,opt,name=resources,proto3" json:"resources,omitempty"`
// Config for secret volume mounts.
SecretVolumes []*SecretVolume `protobuf:"bytes,27,rep,name=secretVolumes,proto3" json:"secretVolumes,omitempty"`
// Annotations to add to the egress gateway service.
ServiceAnnotations *structpb.Struct `protobuf:"bytes,28,opt,name=serviceAnnotations,proto3" json:"serviceAnnotations,omitempty"`
// Service type.
//
// See https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
Type string `protobuf:"bytes,29,opt,name=type,proto3" json:"type,omitempty"`
// Enables cross-cluster access using SNI matching.
Zvpn *IngressGatewayZvpnConfig `protobuf:"bytes,30,opt,name=zvpn,proto3" json:"zvpn,omitempty"`
// K8s rolling update strategy
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
RollingMaxSurge *IntOrString `protobuf:"bytes,31,opt,name=rollingMaxSurge,proto3" json:"rollingMaxSurge,omitempty"`
// K8s rolling update strategy
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
RollingMaxUnavailable *IntOrString `protobuf:"bytes,32,opt,name=rollingMaxUnavailable,proto3" json:"rollingMaxUnavailable,omitempty"`
ExternalTrafficPolicy string `protobuf:"bytes,34,opt,name=externalTrafficPolicy,proto3" json:"externalTrafficPolicy,omitempty"`
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Tolerations []*structpb.Struct `protobuf:"bytes,35,rep,name=tolerations,proto3" json:"tolerations,omitempty"`
IngressPorts []*structpb.Struct `protobuf:"bytes,36,rep,name=ingressPorts,proto3" json:"ingressPorts,omitempty"`
AdditionalContainers []*structpb.Struct `protobuf:"bytes,37,rep,name=additionalContainers,proto3" json:"additionalContainers,omitempty"`
ConfigVolumes []*structpb.Struct `protobuf:"bytes,38,rep,name=configVolumes,proto3" json:"configVolumes,omitempty"`
RunAsRoot *wrapperspb.BoolValue `protobuf:"bytes,45,opt,name=runAsRoot,proto3" json:"runAsRoot,omitempty"`
// The injection template to use for the gateway. If not set, no injection will be performed.
InjectionTemplate string `protobuf:"bytes,46,opt,name=injectionTemplate,proto3" json:"injectionTemplate,omitempty"`
ServiceAccount *ServiceAccount `protobuf:"bytes,47,opt,name=serviceAccount,proto3" json:"serviceAccount,omitempty"`
IpFamilies []string `protobuf:"bytes,48,rep,name=ipFamilies,proto3" json:"ipFamilies,omitempty"`
IpFamilyPolicy string `protobuf:"bytes,49,opt,name=ipFamilyPolicy,proto3" json:"ipFamilyPolicy,omitempty"` // Next available 50.
}
func (x *IngressGatewayConfig) Reset() {
*x = IngressGatewayConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[16]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IngressGatewayConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IngressGatewayConfig) ProtoMessage() {}
func (x *IngressGatewayConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[16]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IngressGatewayConfig.ProtoReflect.Descriptor instead.
func (*IngressGatewayConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{16}
}
func (x *IngressGatewayConfig) GetAutoscaleEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.AutoscaleEnabled
}
return nil
}
func (x *IngressGatewayConfig) GetAutoscaleMax() uint32 {
if x != nil {
return x.AutoscaleMax
}
return 0
}
func (x *IngressGatewayConfig) GetAutoscaleMin() uint32 {
if x != nil {
return x.AutoscaleMin
}
return 0
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetMemory() *TargetUtilizationConfig {
if x != nil {
return x.Memory
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetCpu() *TargetUtilizationConfig {
if x != nil {
return x.Cpu
}
return nil
}
func (x *IngressGatewayConfig) GetCustomService() *wrapperspb.BoolValue {
if x != nil {
return x.CustomService
}
return nil
}
func (x *IngressGatewayConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *IngressGatewayConfig) GetEnv() *structpb.Struct {
if x != nil {
return x.Env
}
return nil
}
func (x *IngressGatewayConfig) GetLabels() map[string]string {
if x != nil {
return x.Labels
}
return nil
}
func (x *IngressGatewayConfig) GetLoadBalancerIP() string {
if x != nil {
return x.LoadBalancerIP
}
return ""
}
func (x *IngressGatewayConfig) GetLoadBalancerSourceRanges() []string {
if x != nil {
return x.LoadBalancerSourceRanges
}
return nil
}
func (x *IngressGatewayConfig) GetName() string {
if x != nil {
return x.Name
}
return ""
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetNodeSelector() *structpb.Struct {
if x != nil {
return x.NodeSelector
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetPodAnnotations() *structpb.Struct {
if x != nil {
return x.PodAnnotations
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetPodAntiAffinityLabelSelector() []*structpb.Struct {
if x != nil {
return x.PodAntiAffinityLabelSelector
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetPodAntiAffinityTermLabelSelector() []*structpb.Struct {
if x != nil {
return x.PodAntiAffinityTermLabelSelector
}
return nil
}
func (x *IngressGatewayConfig) GetPorts() []*PortsConfig {
if x != nil {
return x.Ports
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetReplicaCount() uint32 {
if x != nil {
return x.ReplicaCount
}
return 0
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetResources() *structpb.Struct {
if x != nil {
return x.Resources
}
return nil
}
func (x *IngressGatewayConfig) GetSecretVolumes() []*SecretVolume {
if x != nil {
return x.SecretVolumes
}
return nil
}
func (x *IngressGatewayConfig) GetServiceAnnotations() *structpb.Struct {
if x != nil {
return x.ServiceAnnotations
}
return nil
}
func (x *IngressGatewayConfig) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *IngressGatewayConfig) GetZvpn() *IngressGatewayZvpnConfig {
if x != nil {
return x.Zvpn
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetRollingMaxSurge() *IntOrString {
if x != nil {
return x.RollingMaxSurge
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetRollingMaxUnavailable() *IntOrString {
if x != nil {
return x.RollingMaxUnavailable
}
return nil
}
func (x *IngressGatewayConfig) GetExternalTrafficPolicy() string {
if x != nil {
return x.ExternalTrafficPolicy
}
return ""
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *IngressGatewayConfig) GetTolerations() []*structpb.Struct {
if x != nil {
return x.Tolerations
}
return nil
}
func (x *IngressGatewayConfig) GetIngressPorts() []*structpb.Struct {
if x != nil {
return x.IngressPorts
}
return nil
}
func (x *IngressGatewayConfig) GetAdditionalContainers() []*structpb.Struct {
if x != nil {
return x.AdditionalContainers
}
return nil
}
func (x *IngressGatewayConfig) GetConfigVolumes() []*structpb.Struct {
if x != nil {
return x.ConfigVolumes
}
return nil
}
func (x *IngressGatewayConfig) GetRunAsRoot() *wrapperspb.BoolValue {
if x != nil {
return x.RunAsRoot
}
return nil
}
func (x *IngressGatewayConfig) GetInjectionTemplate() string {
if x != nil {
return x.InjectionTemplate
}
return ""
}
func (x *IngressGatewayConfig) GetServiceAccount() *ServiceAccount {
if x != nil {
return x.ServiceAccount
}
return nil
}
func (x *IngressGatewayConfig) GetIpFamilies() []string {
if x != nil {
return x.IpFamilies
}
return nil
}
func (x *IngressGatewayConfig) GetIpFamilyPolicy() string {
if x != nil {
return x.IpFamilyPolicy
}
return ""
}
// IngressGatewayZvpnConfig enables cross-cluster access using SNI matching.
type IngressGatewayZvpnConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether ZeroVPN is enabled.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
Suffix string `protobuf:"bytes,2,opt,name=suffix,proto3" json:"suffix,omitempty"`
}
func (x *IngressGatewayZvpnConfig) Reset() {
*x = IngressGatewayZvpnConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[17]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IngressGatewayZvpnConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IngressGatewayZvpnConfig) ProtoMessage() {}
func (x *IngressGatewayZvpnConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[17]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IngressGatewayZvpnConfig.ProtoReflect.Descriptor instead.
func (*IngressGatewayZvpnConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{17}
}
func (x *IngressGatewayZvpnConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *IngressGatewayZvpnConfig) GetSuffix() string {
if x != nil {
return x.Suffix
}
return ""
}
// MultiClusterConfig specifies the Configuration for Istio mesh across multiple clusters through the istio gateways.
type MultiClusterConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Enables the connection between two kubernetes clusters via their respective ingressgateway services.
// Use if the pods in each cluster cannot directly talk to one another.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
ClusterName string `protobuf:"bytes,2,opt,name=clusterName,proto3" json:"clusterName,omitempty"`
GlobalDomainSuffix string `protobuf:"bytes,3,opt,name=globalDomainSuffix,proto3" json:"globalDomainSuffix,omitempty"`
IncludeEnvoyFilter *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=includeEnvoyFilter,proto3" json:"includeEnvoyFilter,omitempty"`
}
func (x *MultiClusterConfig) Reset() {
*x = MultiClusterConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MultiClusterConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MultiClusterConfig) ProtoMessage() {}
func (x *MultiClusterConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MultiClusterConfig.ProtoReflect.Descriptor instead.
func (*MultiClusterConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{18}
}
func (x *MultiClusterConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *MultiClusterConfig) GetClusterName() string {
if x != nil {
return x.ClusterName
}
return ""
}
func (x *MultiClusterConfig) GetGlobalDomainSuffix() string {
if x != nil {
return x.GlobalDomainSuffix
}
return ""
}
func (x *MultiClusterConfig) GetIncludeEnvoyFilter() *wrapperspb.BoolValue {
if x != nil {
return x.IncludeEnvoyFilter
}
return nil
}
// OutboundTrafficPolicyConfig controls the default behavior of the sidecar for handling outbound traffic from the application.
type OutboundTrafficPolicyConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Mode OutboundTrafficPolicyConfig_Mode `protobuf:"varint,2,opt,name=mode,proto3,enum=v1alpha1.OutboundTrafficPolicyConfig_Mode" json:"mode,omitempty"`
}
func (x *OutboundTrafficPolicyConfig) Reset() {
*x = OutboundTrafficPolicyConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[19]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *OutboundTrafficPolicyConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*OutboundTrafficPolicyConfig) ProtoMessage() {}
func (x *OutboundTrafficPolicyConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[19]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use OutboundTrafficPolicyConfig.ProtoReflect.Descriptor instead.
func (*OutboundTrafficPolicyConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{19}
}
func (x *OutboundTrafficPolicyConfig) GetMode() OutboundTrafficPolicyConfig_Mode {
if x != nil {
return x.Mode
}
return OutboundTrafficPolicyConfig_ALLOW_ANY
}
// Configuration for Pilot.
type PilotConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether Pilot is enabled.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
// Controls whether a HorizontalPodAutoscaler is installed for Pilot.
AutoscaleEnabled *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=autoscaleEnabled,proto3" json:"autoscaleEnabled,omitempty"`
// Minimum number of replicas in the HorizontalPodAutoscaler for Pilot.
AutoscaleMin uint32 `protobuf:"varint,3,opt,name=autoscaleMin,proto3" json:"autoscaleMin,omitempty"`
// Maximum number of replicas in the HorizontalPodAutoscaler for Pilot.
AutoscaleMax uint32 `protobuf:"varint,4,opt,name=autoscaleMax,proto3" json:"autoscaleMax,omitempty"`
// See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#configurable-scaling-behavior
AutoscaleBehavior *structpb.Struct `protobuf:"bytes,40,opt,name=autoscaleBehavior,proto3" json:"autoscaleBehavior,omitempty"`
// Number of replicas in the Pilot Deployment.
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
ReplicaCount uint32 `protobuf:"varint,5,opt,name=replicaCount,proto3" json:"replicaCount,omitempty"`
// Image name used for Pilot.
//
// This can be set either to image name if hub is also set, or can be set to the full hub:name string.
//
// Examples: custom-pilot, docker.io/someuser:custom-pilot
Image string `protobuf:"bytes,6,opt,name=image,proto3" json:"image,omitempty"`
// Trace sampling fraction.
//
// Used to set the fraction of time that traces are sampled. Higher values are more accurate but add CPU overhead.
//
// Allowed values: 0.0 to 1.0
TraceSampling float64 `protobuf:"fixed64,8,opt,name=traceSampling,proto3" json:"traceSampling,omitempty"`
// K8s resources settings.
//
// See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Resources *Resources `protobuf:"bytes,9,opt,name=resources,proto3" json:"resources,omitempty"`
// Namespace that the configuration management feature is installed into, if different from Pilot namespace.
ConfigNamespace string `protobuf:"bytes,10,opt,name=configNamespace,proto3" json:"configNamespace,omitempty"`
// Target CPU utilization used in HorizontalPodAutoscaler.
//
// See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Cpu *TargetUtilizationConfig `protobuf:"bytes,11,opt,name=cpu,proto3" json:"cpu,omitempty"`
// K8s node selector.
//
// See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
NodeSelector *structpb.Struct `protobuf:"bytes,12,opt,name=nodeSelector,proto3" json:"nodeSelector,omitempty"`
// Maximum duration that a sidecar can be connected to a pilot.
//
// This setting balances out load across pilot instances, but adds some resource overhead.
//
// Examples: 300s, 30m, 1h
KeepaliveMaxServerConnectionAge *durationpb.Duration `protobuf:"bytes,13,opt,name=keepaliveMaxServerConnectionAge,proto3" json:"keepaliveMaxServerConnectionAge,omitempty"`
// Labels that are added to Pilot deployment and pods.
//
// See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
DeploymentLabels *structpb.Struct `protobuf:"bytes,14,opt,name=deploymentLabels,proto3" json:"deploymentLabels,omitempty"`
PodLabels *structpb.Struct `protobuf:"bytes,36,opt,name=podLabels,proto3" json:"podLabels,omitempty"`
// Configuration settings passed to Pilot as a ConfigMap.
//
// This controls whether the mesh config map, generated from values.yaml is generated.
// If false, pilot wil use default values or user-supplied values, in that order of preference.
ConfigMap *wrapperspb.BoolValue `protobuf:"bytes,18,opt,name=configMap,proto3" json:"configMap,omitempty"`
// Controls whether Pilot is configured through the Mesh Control Protocol (MCP).
//
// If set to true, Pilot requires an MCP server (like Galley) to be installed.
UseMCP *wrapperspb.BoolValue `protobuf:"bytes,20,opt,name=useMCP,proto3" json:"useMCP,omitempty"`
// Environment variables passed to the Pilot container.
//
// Examples:
// env:
//
// ENV_VAR_1: value1
// ENV_VAR_2: value2
Env *structpb.Struct `protobuf:"bytes,21,opt,name=env,proto3" json:"env,omitempty"`
Affinity *structpb.Struct `protobuf:"bytes,22,opt,name=affinity,proto3" json:"affinity,omitempty"`
// K8s rolling update strategy
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
RollingMaxSurge *IntOrString `protobuf:"bytes,24,opt,name=rollingMaxSurge,proto3" json:"rollingMaxSurge,omitempty"`
// K8s rolling update strategy
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
RollingMaxUnavailable *IntOrString `protobuf:"bytes,25,opt,name=rollingMaxUnavailable,proto3" json:"rollingMaxUnavailable,omitempty"`
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Tolerations []*structpb.Struct `protobuf:"bytes,26,rep,name=tolerations,proto3" json:"tolerations,omitempty"`
// if protocol sniffing is enabled for outbound
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
EnableProtocolSniffingForOutbound *wrapperspb.BoolValue `protobuf:"bytes,28,opt,name=enableProtocolSniffingForOutbound,proto3" json:"enableProtocolSniffingForOutbound,omitempty"`
// if protocol sniffing is enabled for inbound
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
EnableProtocolSniffingForInbound *wrapperspb.BoolValue `protobuf:"bytes,29,opt,name=enableProtocolSniffingForInbound,proto3" json:"enableProtocolSniffingForInbound,omitempty"`
// K8s annotations for pods.
//
// See: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
PodAnnotations *structpb.Struct `protobuf:"bytes,30,opt,name=podAnnotations,proto3" json:"podAnnotations,omitempty"`
ServiceAnnotations *structpb.Struct `protobuf:"bytes,37,opt,name=serviceAnnotations,proto3" json:"serviceAnnotations,omitempty"`
// ConfigSource describes a source of configuration data for networking
// rules, and other Istio configuration artifacts. Multiple data sources
// can be configured for a single control plane.
ConfigSource *PilotConfigSource `protobuf:"bytes,31,opt,name=configSource,proto3" json:"configSource,omitempty"`
JwksResolverExtraRootCA string `protobuf:"bytes,32,opt,name=jwksResolverExtraRootCA,proto3" json:"jwksResolverExtraRootCA,omitempty"`
Plugins []string `protobuf:"bytes,33,rep,name=plugins,proto3" json:"plugins,omitempty"`
Hub string `protobuf:"bytes,34,opt,name=hub,proto3" json:"hub,omitempty"`
Tag *structpb.Value `protobuf:"bytes,35,opt,name=tag,proto3" json:"tag,omitempty"`
Variant string `protobuf:"bytes,39,opt,name=variant,proto3" json:"variant,omitempty"`
// The Container seccompProfile
//
// See: https://kubernetes.io/docs/tutorials/security/seccomp/
SeccompProfile *structpb.Struct `protobuf:"bytes,38,opt,name=seccompProfile,proto3" json:"seccompProfile,omitempty"`
TopologySpreadConstraints []*structpb.Struct `protobuf:"bytes,41,rep,name=topologySpreadConstraints,proto3" json:"topologySpreadConstraints,omitempty"`
ExtraContainerArgs []*structpb.Struct `protobuf:"bytes,42,rep,name=extraContainerArgs,proto3" json:"extraContainerArgs,omitempty"`
VolumeMounts []*structpb.Struct `protobuf:"bytes,49,rep,name=volumeMounts,proto3" json:"volumeMounts,omitempty"`
Volumes []*structpb.Struct `protobuf:"bytes,51,rep,name=volumes,proto3" json:"volumes,omitempty"`
IpFamilies []string `protobuf:"bytes,52,rep,name=ipFamilies,proto3" json:"ipFamilies,omitempty"`
IpFamilyPolicy string `protobuf:"bytes,53,opt,name=ipFamilyPolicy,proto3" json:"ipFamilyPolicy,omitempty"`
// Target memory utilization used in HorizontalPodAutoscaler.
//
// See https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Memory *TargetUtilizationConfig `protobuf:"bytes,54,opt,name=memory,proto3" json:"memory,omitempty"`
}
func (x *PilotConfig) Reset() {
*x = PilotConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[20]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PilotConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PilotConfig) ProtoMessage() {}
func (x *PilotConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[20]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PilotConfig.ProtoReflect.Descriptor instead.
func (*PilotConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{20}
}
func (x *PilotConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *PilotConfig) GetAutoscaleEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.AutoscaleEnabled
}
return nil
}
func (x *PilotConfig) GetAutoscaleMin() uint32 {
if x != nil {
return x.AutoscaleMin
}
return 0
}
func (x *PilotConfig) GetAutoscaleMax() uint32 {
if x != nil {
return x.AutoscaleMax
}
return 0
}
func (x *PilotConfig) GetAutoscaleBehavior() *structpb.Struct {
if x != nil {
return x.AutoscaleBehavior
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetReplicaCount() uint32 {
if x != nil {
return x.ReplicaCount
}
return 0
}
func (x *PilotConfig) GetImage() string {
if x != nil {
return x.Image
}
return ""
}
func (x *PilotConfig) GetTraceSampling() float64 {
if x != nil {
return x.TraceSampling
}
return 0
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetResources() *Resources {
if x != nil {
return x.Resources
}
return nil
}
func (x *PilotConfig) GetConfigNamespace() string {
if x != nil {
return x.ConfigNamespace
}
return ""
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetCpu() *TargetUtilizationConfig {
if x != nil {
return x.Cpu
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetNodeSelector() *structpb.Struct {
if x != nil {
return x.NodeSelector
}
return nil
}
func (x *PilotConfig) GetKeepaliveMaxServerConnectionAge() *durationpb.Duration {
if x != nil {
return x.KeepaliveMaxServerConnectionAge
}
return nil
}
func (x *PilotConfig) GetDeploymentLabels() *structpb.Struct {
if x != nil {
return x.DeploymentLabels
}
return nil
}
func (x *PilotConfig) GetPodLabels() *structpb.Struct {
if x != nil {
return x.PodLabels
}
return nil
}
func (x *PilotConfig) GetConfigMap() *wrapperspb.BoolValue {
if x != nil {
return x.ConfigMap
}
return nil
}
func (x *PilotConfig) GetUseMCP() *wrapperspb.BoolValue {
if x != nil {
return x.UseMCP
}
return nil
}
func (x *PilotConfig) GetEnv() *structpb.Struct {
if x != nil {
return x.Env
}
return nil
}
func (x *PilotConfig) GetAffinity() *structpb.Struct {
if x != nil {
return x.Affinity
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetRollingMaxSurge() *IntOrString {
if x != nil {
return x.RollingMaxSurge
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetRollingMaxUnavailable() *IntOrString {
if x != nil {
return x.RollingMaxUnavailable
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetTolerations() []*structpb.Struct {
if x != nil {
return x.Tolerations
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetEnableProtocolSniffingForOutbound() *wrapperspb.BoolValue {
if x != nil {
return x.EnableProtocolSniffingForOutbound
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetEnableProtocolSniffingForInbound() *wrapperspb.BoolValue {
if x != nil {
return x.EnableProtocolSniffingForInbound
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetPodAnnotations() *structpb.Struct {
if x != nil {
return x.PodAnnotations
}
return nil
}
func (x *PilotConfig) GetServiceAnnotations() *structpb.Struct {
if x != nil {
return x.ServiceAnnotations
}
return nil
}
func (x *PilotConfig) GetConfigSource() *PilotConfigSource {
if x != nil {
return x.ConfigSource
}
return nil
}
func (x *PilotConfig) GetJwksResolverExtraRootCA() string {
if x != nil {
return x.JwksResolverExtraRootCA
}
return ""
}
func (x *PilotConfig) GetPlugins() []string {
if x != nil {
return x.Plugins
}
return nil
}
func (x *PilotConfig) GetHub() string {
if x != nil {
return x.Hub
}
return ""
}
func (x *PilotConfig) GetTag() *structpb.Value {
if x != nil {
return x.Tag
}
return nil
}
func (x *PilotConfig) GetVariant() string {
if x != nil {
return x.Variant
}
return ""
}
func (x *PilotConfig) GetSeccompProfile() *structpb.Struct {
if x != nil {
return x.SeccompProfile
}
return nil
}
func (x *PilotConfig) GetTopologySpreadConstraints() []*structpb.Struct {
if x != nil {
return x.TopologySpreadConstraints
}
return nil
}
func (x *PilotConfig) GetExtraContainerArgs() []*structpb.Struct {
if x != nil {
return x.ExtraContainerArgs
}
return nil
}
func (x *PilotConfig) GetVolumeMounts() []*structpb.Struct {
if x != nil {
return x.VolumeMounts
}
return nil
}
func (x *PilotConfig) GetVolumes() []*structpb.Struct {
if x != nil {
return x.Volumes
}
return nil
}
func (x *PilotConfig) GetIpFamilies() []string {
if x != nil {
return x.IpFamilies
}
return nil
}
func (x *PilotConfig) GetIpFamilyPolicy() string {
if x != nil {
return x.IpFamilyPolicy
}
return ""
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *PilotConfig) GetMemory() *TargetUtilizationConfig {
if x != nil {
return x.Memory
}
return nil
}
// Controls legacy k8s ingress. Only one pilot profile should enable ingress support.
type PilotIngressConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Sets the type ingress service for Pilot.
//
// If empty, node-port is assumed.
//
// Allowed values: node-port, istio-ingressgateway, ingress
IngressService string `protobuf:"bytes,1,opt,name=ingressService,proto3" json:"ingressService,omitempty"`
IngressControllerMode IngressControllerMode `protobuf:"varint,2,opt,name=ingressControllerMode,proto3,enum=v1alpha1.IngressControllerMode" json:"ingressControllerMode,omitempty"`
// If mode is STRICT, this value must be set on "kubernetes.io/ingress.class" annotation to activate.
IngressClass string `protobuf:"bytes,3,opt,name=ingressClass,proto3" json:"ingressClass,omitempty"`
}
func (x *PilotIngressConfig) Reset() {
*x = PilotIngressConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[21]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PilotIngressConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PilotIngressConfig) ProtoMessage() {}
func (x *PilotIngressConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[21]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PilotIngressConfig.ProtoReflect.Descriptor instead.
func (*PilotIngressConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{21}
}
func (x *PilotIngressConfig) GetIngressService() string {
if x != nil {
return x.IngressService
}
return ""
}
func (x *PilotIngressConfig) GetIngressControllerMode() IngressControllerMode {
if x != nil {
return x.IngressControllerMode
}
return IngressControllerMode_UNSPECIFIED
}
func (x *PilotIngressConfig) GetIngressClass() string {
if x != nil {
return x.IngressClass
}
return ""
}
// Controls whether Istio policy is applied to Pilot.
type PilotPolicyConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether Istio policy is applied to Pilot.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
}
func (x *PilotPolicyConfig) Reset() {
*x = PilotPolicyConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[22]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PilotPolicyConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PilotPolicyConfig) ProtoMessage() {}
func (x *PilotPolicyConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[22]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PilotPolicyConfig.ProtoReflect.Descriptor instead.
func (*PilotPolicyConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{22}
}
func (x *PilotPolicyConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
// Controls telemetry configuration
type TelemetryConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether telemetry is exported for Pilot.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
// Use telemetry v2.
V2 *TelemetryV2Config `protobuf:"bytes,3,opt,name=v2,proto3" json:"v2,omitempty"`
}
func (x *TelemetryConfig) Reset() {
*x = TelemetryConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[23]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TelemetryConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TelemetryConfig) ProtoMessage() {}
func (x *TelemetryConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[23]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TelemetryConfig.ProtoReflect.Descriptor instead.
func (*TelemetryConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{23}
}
func (x *TelemetryConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *TelemetryConfig) GetV2() *TelemetryV2Config {
if x != nil {
return x.V2
}
return nil
}
// Controls whether pilot will configure telemetry v2.
type TelemetryV2Config struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether pilot will configure telemetry v2.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
Prometheus *TelemetryV2PrometheusConfig `protobuf:"bytes,2,opt,name=prometheus,proto3" json:"prometheus,omitempty"`
Stackdriver *TelemetryV2StackDriverConfig `protobuf:"bytes,3,opt,name=stackdriver,proto3" json:"stackdriver,omitempty"`
}
func (x *TelemetryV2Config) Reset() {
*x = TelemetryV2Config{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[24]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TelemetryV2Config) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TelemetryV2Config) ProtoMessage() {}
func (x *TelemetryV2Config) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[24]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TelemetryV2Config.ProtoReflect.Descriptor instead.
func (*TelemetryV2Config) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{24}
}
func (x *TelemetryV2Config) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *TelemetryV2Config) GetPrometheus() *TelemetryV2PrometheusConfig {
if x != nil {
return x.Prometheus
}
return nil
}
func (x *TelemetryV2Config) GetStackdriver() *TelemetryV2StackDriverConfig {
if x != nil {
return x.Stackdriver
}
return nil
}
// Controls telemetry v2 prometheus settings.
type TelemetryV2PrometheusConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether stats envoyfilter would be enabled or not.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
}
func (x *TelemetryV2PrometheusConfig) Reset() {
*x = TelemetryV2PrometheusConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[25]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TelemetryV2PrometheusConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TelemetryV2PrometheusConfig) ProtoMessage() {}
func (x *TelemetryV2PrometheusConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[25]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TelemetryV2PrometheusConfig.ProtoReflect.Descriptor instead.
func (*TelemetryV2PrometheusConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{25}
}
func (x *TelemetryV2PrometheusConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
// TelemetryV2StackDriverConfig controls telemetry v2 stackdriver settings.
type TelemetryV2StackDriverConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
}
func (x *TelemetryV2StackDriverConfig) Reset() {
*x = TelemetryV2StackDriverConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[26]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TelemetryV2StackDriverConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TelemetryV2StackDriverConfig) ProtoMessage() {}
func (x *TelemetryV2StackDriverConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[26]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TelemetryV2StackDriverConfig.ProtoReflect.Descriptor instead.
func (*TelemetryV2StackDriverConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{26}
}
func (x *TelemetryV2StackDriverConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
// PilotConfigSource describes information about a configuration store inside a
// mesh. A single control plane instance can interact with one or more data
// sources.
type PilotConfigSource struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Describes the source of configuration, if nothing is specified default is MCP.
SubscribedResources []string `protobuf:"bytes,1,rep,name=subscribedResources,proto3" json:"subscribedResources,omitempty"`
}
func (x *PilotConfigSource) Reset() {
*x = PilotConfigSource{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[27]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PilotConfigSource) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PilotConfigSource) ProtoMessage() {}
func (x *PilotConfigSource) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[27]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PilotConfigSource.ProtoReflect.Descriptor instead.
func (*PilotConfigSource) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{27}
}
func (x *PilotConfigSource) GetSubscribedResources() []string {
if x != nil {
return x.SubscribedResources
}
return nil
}
// Configuration for a port.
type PortsConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Port name.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Port number.
Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
// NodePort number.
NodePort int32 `protobuf:"varint,3,opt,name=nodePort,proto3" json:"nodePort,omitempty"`
// Target port number.
TargetPort int32 `protobuf:"varint,4,opt,name=targetPort,proto3" json:"targetPort,omitempty"`
// Protocol name.
Protocol string `protobuf:"bytes,5,opt,name=protocol,proto3" json:"protocol,omitempty"`
}
func (x *PortsConfig) Reset() {
*x = PortsConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[28]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PortsConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PortsConfig) ProtoMessage() {}
func (x *PortsConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[28]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PortsConfig.ProtoReflect.Descriptor instead.
func (*PortsConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{28}
}
func (x *PortsConfig) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *PortsConfig) GetPort() int32 {
if x != nil {
return x.Port
}
return 0
}
func (x *PortsConfig) GetNodePort() int32 {
if x != nil {
return x.NodePort
}
return 0
}
func (x *PortsConfig) GetTargetPort() int32 {
if x != nil {
return x.TargetPort
}
return 0
}
func (x *PortsConfig) GetProtocol() string {
if x != nil {
return x.Protocol
}
return ""
}
// Configuration for Proxy.
type ProxyConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
AutoInject string `protobuf:"bytes,4,opt,name=autoInject,proto3" json:"autoInject,omitempty"`
// Domain for the cluster, default: "cluster.local".
//
// K8s allows this to be customized, see https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/
ClusterDomain string `protobuf:"bytes,5,opt,name=clusterDomain,proto3" json:"clusterDomain,omitempty"`
// Per Component log level for proxy, applies to gateways and sidecars.
//
// If a component level is not set, then the global "logLevel" will be used. If left empty, "misc:error" is used.
ComponentLogLevel string `protobuf:"bytes,6,opt,name=componentLogLevel,proto3" json:"componentLogLevel,omitempty"`
// Enables core dumps for newly injected sidecars.
//
// If set, newly injected sidecars will have core dumps enabled.
EnableCoreDump *wrapperspb.BoolValue `protobuf:"bytes,9,opt,name=enableCoreDump,proto3" json:"enableCoreDump,omitempty"`
// Specifies the Istio ingress ports not to capture.
ExcludeInboundPorts string `protobuf:"bytes,12,opt,name=excludeInboundPorts,proto3" json:"excludeInboundPorts,omitempty"`
// Lists the excluded IP ranges of Istio egress traffic that the sidecar captures.
ExcludeIPRanges string `protobuf:"bytes,13,opt,name=excludeIPRanges,proto3" json:"excludeIPRanges,omitempty"`
// Image name or path for the proxy, default: "proxyv2".
//
// If registry or tag are not specified, global.hub and global.tag are used.
//
// Examples: my-proxy (uses global.hub/tag), docker.io/myrepo/my-proxy:v1.0.0
Image string `protobuf:"bytes,14,opt,name=image,proto3" json:"image,omitempty"`
// Lists the IP ranges of Istio egress traffic that the sidecar captures.
//
// Example: "172.30.0.0/16,172.20.0.0/16"
// This would only capture egress traffic on those two IP Ranges, all other outbound traffic would # be allowed by the sidecar."
IncludeIPRanges string `protobuf:"bytes,16,opt,name=includeIPRanges,proto3" json:"includeIPRanges,omitempty"`
// Log level for proxy, applies to gateways and sidecars. If left empty, "warning" is used. Expected values are: trace\|debug\|info\|warning\|error\|critical\|off
LogLevel string `protobuf:"bytes,18,opt,name=logLevel,proto3" json:"logLevel,omitempty"`
// Enables privileged securityContext for the istio-proxy container.
//
// See https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
Privileged *wrapperspb.BoolValue `protobuf:"bytes,19,opt,name=privileged,proto3" json:"privileged,omitempty"`
// Sets the initial delay for readiness probes in seconds.
ReadinessInitialDelaySeconds uint32 `protobuf:"varint,20,opt,name=readinessInitialDelaySeconds,proto3" json:"readinessInitialDelaySeconds,omitempty"`
// Sets the interval between readiness probes in seconds.
ReadinessPeriodSeconds uint32 `protobuf:"varint,21,opt,name=readinessPeriodSeconds,proto3" json:"readinessPeriodSeconds,omitempty"`
// Sets the number of successive failed probes before indicating readiness failure.
ReadinessFailureThreshold uint32 `protobuf:"varint,22,opt,name=readinessFailureThreshold,proto3" json:"readinessFailureThreshold,omitempty"`
StartupProbe *StartupProbe `protobuf:"bytes,41,opt,name=startupProbe,proto3" json:"startupProbe,omitempty"`
// Default port used for the Pilot agent's health checks.
StatusPort uint32 `protobuf:"varint,23,opt,name=statusPort,proto3" json:"statusPort,omitempty"`
// K8s resources settings.
//
// See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Resources *Resources `protobuf:"bytes,24,opt,name=resources,proto3" json:"resources,omitempty"`
Tracer Tracer `protobuf:"varint,25,opt,name=tracer,proto3,enum=v1alpha1.Tracer" json:"tracer,omitempty"`
ExcludeOutboundPorts string `protobuf:"bytes,28,opt,name=excludeOutboundPorts,proto3" json:"excludeOutboundPorts,omitempty"`
Lifecycle *structpb.Struct `protobuf:"bytes,36,opt,name=lifecycle,proto3" json:"lifecycle,omitempty"`
// Controls if sidecar is injected at the front of the container list and blocks the start of the other containers until the proxy is ready
//
// Deprecated: replaced by ProxyConfig setting which allows per-pod configuration of this behavior.
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
HoldApplicationUntilProxyStarts *wrapperspb.BoolValue `protobuf:"bytes,37,opt,name=holdApplicationUntilProxyStarts,proto3" json:"holdApplicationUntilProxyStarts,omitempty"`
IncludeInboundPorts string `protobuf:"bytes,38,opt,name=includeInboundPorts,proto3" json:"includeInboundPorts,omitempty"`
IncludeOutboundPorts string `protobuf:"bytes,39,opt,name=includeOutboundPorts,proto3" json:"includeOutboundPorts,omitempty"`
}
func (x *ProxyConfig) Reset() {
*x = ProxyConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[29]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ProxyConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProxyConfig) ProtoMessage() {}
func (x *ProxyConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[29]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProxyConfig.ProtoReflect.Descriptor instead.
func (*ProxyConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{29}
}
func (x *ProxyConfig) GetAutoInject() string {
if x != nil {
return x.AutoInject
}
return ""
}
func (x *ProxyConfig) GetClusterDomain() string {
if x != nil {
return x.ClusterDomain
}
return ""
}
func (x *ProxyConfig) GetComponentLogLevel() string {
if x != nil {
return x.ComponentLogLevel
}
return ""
}
func (x *ProxyConfig) GetEnableCoreDump() *wrapperspb.BoolValue {
if x != nil {
return x.EnableCoreDump
}
return nil
}
func (x *ProxyConfig) GetExcludeInboundPorts() string {
if x != nil {
return x.ExcludeInboundPorts
}
return ""
}
func (x *ProxyConfig) GetExcludeIPRanges() string {
if x != nil {
return x.ExcludeIPRanges
}
return ""
}
func (x *ProxyConfig) GetImage() string {
if x != nil {
return x.Image
}
return ""
}
func (x *ProxyConfig) GetIncludeIPRanges() string {
if x != nil {
return x.IncludeIPRanges
}
return ""
}
func (x *ProxyConfig) GetLogLevel() string {
if x != nil {
return x.LogLevel
}
return ""
}
func (x *ProxyConfig) GetPrivileged() *wrapperspb.BoolValue {
if x != nil {
return x.Privileged
}
return nil
}
func (x *ProxyConfig) GetReadinessInitialDelaySeconds() uint32 {
if x != nil {
return x.ReadinessInitialDelaySeconds
}
return 0
}
func (x *ProxyConfig) GetReadinessPeriodSeconds() uint32 {
if x != nil {
return x.ReadinessPeriodSeconds
}
return 0
}
func (x *ProxyConfig) GetReadinessFailureThreshold() uint32 {
if x != nil {
return x.ReadinessFailureThreshold
}
return 0
}
func (x *ProxyConfig) GetStartupProbe() *StartupProbe {
if x != nil {
return x.StartupProbe
}
return nil
}
func (x *ProxyConfig) GetStatusPort() uint32 {
if x != nil {
return x.StatusPort
}
return 0
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *ProxyConfig) GetResources() *Resources {
if x != nil {
return x.Resources
}
return nil
}
func (x *ProxyConfig) GetTracer() Tracer {
if x != nil {
return x.Tracer
}
return Tracer_zipkin
}
func (x *ProxyConfig) GetExcludeOutboundPorts() string {
if x != nil {
return x.ExcludeOutboundPorts
}
return ""
}
func (x *ProxyConfig) GetLifecycle() *structpb.Struct {
if x != nil {
return x.Lifecycle
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *ProxyConfig) GetHoldApplicationUntilProxyStarts() *wrapperspb.BoolValue {
if x != nil {
return x.HoldApplicationUntilProxyStarts
}
return nil
}
func (x *ProxyConfig) GetIncludeInboundPorts() string {
if x != nil {
return x.IncludeInboundPorts
}
return ""
}
func (x *ProxyConfig) GetIncludeOutboundPorts() string {
if x != nil {
return x.IncludeOutboundPorts
}
return ""
}
type StartupProbe struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
FailureThreshold uint32 `protobuf:"varint,2,opt,name=failureThreshold,proto3" json:"failureThreshold,omitempty"`
}
func (x *StartupProbe) Reset() {
*x = StartupProbe{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[30]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StartupProbe) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StartupProbe) ProtoMessage() {}
func (x *StartupProbe) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[30]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StartupProbe.ProtoReflect.Descriptor instead.
func (*StartupProbe) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{30}
}
func (x *StartupProbe) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *StartupProbe) GetFailureThreshold() uint32 {
if x != nil {
return x.FailureThreshold
}
return 0
}
// Configuration for proxy_init container which sets the pods' networking to intercept the inbound/outbound traffic.
type ProxyInitConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Specifies the image for the proxy_init container.
Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"`
// K8s resources settings.
//
// See https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#resource-requests-and-limits-of-pod-and-container
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Resources *Resources `protobuf:"bytes,5,opt,name=resources,proto3" json:"resources,omitempty"`
}
func (x *ProxyInitConfig) Reset() {
*x = ProxyInitConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[31]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ProxyInitConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ProxyInitConfig) ProtoMessage() {}
func (x *ProxyInitConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[31]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ProxyInitConfig.ProtoReflect.Descriptor instead.
func (*ProxyInitConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{31}
}
func (x *ProxyInitConfig) GetImage() string {
if x != nil {
return x.Image
}
return ""
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *ProxyInitConfig) GetResources() *Resources {
if x != nil {
return x.Resources
}
return nil
}
// Configuration for K8s resource requests.
type ResourcesRequestsConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Cpu string `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty"`
Memory string `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty"`
}
func (x *ResourcesRequestsConfig) Reset() {
*x = ResourcesRequestsConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[32]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ResourcesRequestsConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ResourcesRequestsConfig) ProtoMessage() {}
func (x *ResourcesRequestsConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[32]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ResourcesRequestsConfig.ProtoReflect.Descriptor instead.
func (*ResourcesRequestsConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{32}
}
func (x *ResourcesRequestsConfig) GetCpu() string {
if x != nil {
return x.Cpu
}
return ""
}
func (x *ResourcesRequestsConfig) GetMemory() string {
if x != nil {
return x.Memory
}
return ""
}
// Configuration for the SecretDiscoveryService instead of using K8S secrets to mount the certificates.
type SDSConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
Token *structpb.Struct `protobuf:"bytes,5,opt,name=token,proto3" json:"token,omitempty"`
}
func (x *SDSConfig) Reset() {
*x = SDSConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[33]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SDSConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SDSConfig) ProtoMessage() {}
func (x *SDSConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[33]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SDSConfig.ProtoReflect.Descriptor instead.
func (*SDSConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{33}
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *SDSConfig) GetToken() *structpb.Struct {
if x != nil {
return x.Token
}
return nil
}
// Configuration for secret volume mounts.
//
// See https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets.
type SecretVolume struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
MountPath string `protobuf:"bytes,1,opt,name=mountPath,proto3" json:"mountPath,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
SecretName string `protobuf:"bytes,3,opt,name=secretName,proto3" json:"secretName,omitempty"`
}
func (x *SecretVolume) Reset() {
*x = SecretVolume{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[34]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SecretVolume) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SecretVolume) ProtoMessage() {}
func (x *SecretVolume) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[34]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SecretVolume.ProtoReflect.Descriptor instead.
func (*SecretVolume) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{34}
}
func (x *SecretVolume) GetMountPath() string {
if x != nil {
return x.MountPath
}
return ""
}
func (x *SecretVolume) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *SecretVolume) GetSecretName() string {
if x != nil {
return x.SecretName
}
return ""
}
// SidecarInjectorConfig is described in istio.io documentation.
type SidecarInjectorConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Enables sidecar auto-injection in namespaces by default.
EnableNamespacesByDefault *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=enableNamespacesByDefault,proto3" json:"enableNamespacesByDefault,omitempty"`
// Setting this to `IfNeeded` will result in the sidecar injector being run again if additional mutations occur. Default: Never
ReinvocationPolicy string `protobuf:"bytes,3,opt,name=reinvocationPolicy,proto3" json:"reinvocationPolicy,omitempty"`
// Instructs Istio to not inject the sidecar on those pods, based on labels that are present in those pods.
//
// Annotations in the pods have higher precedence than the label selectors.
// Order of evaluation: Pod Annotations → NeverInjectSelector → AlwaysInjectSelector → Default Policy.
// See https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/#more-control-adding-exceptions
NeverInjectSelector []*structpb.Struct `protobuf:"bytes,11,rep,name=neverInjectSelector,proto3" json:"neverInjectSelector,omitempty"`
// See NeverInjectSelector.
AlwaysInjectSelector []*structpb.Struct `protobuf:"bytes,12,rep,name=alwaysInjectSelector,proto3" json:"alwaysInjectSelector,omitempty"`
// If true, webhook or istioctl injector will rewrite PodSpec for liveness health check to redirect request to sidecar. This makes liveness check work even when mTLS is enabled.
RewriteAppHTTPProbe *wrapperspb.BoolValue `protobuf:"bytes,16,opt,name=rewriteAppHTTPProbe,proto3" json:"rewriteAppHTTPProbe,omitempty"`
// injectedAnnotations are additional annotations that will be added to the pod spec after injection
// This is primarily to support PSP annotations.
InjectedAnnotations *structpb.Struct `protobuf:"bytes,19,opt,name=injectedAnnotations,proto3" json:"injectedAnnotations,omitempty"`
// Enable objectSelector to filter out pods with no need for sidecar before calling istio-sidecar-injector.
ObjectSelector *structpb.Struct `protobuf:"bytes,21,opt,name=objectSelector,proto3" json:"objectSelector,omitempty"`
// Configure the injection url for sidecar injector webhook
InjectionURL string `protobuf:"bytes,22,opt,name=injectionURL,proto3" json:"injectionURL,omitempty"`
// Templates defines a set of custom injection templates that can be used. For example, defining:
//
// templates:
//
// hello: |
// metadata:
// labels:
// hello: world
//
// Then starting a pod with the `inject.istio.io/templates: hello` annotation, will result in the pod
// being injected with the hello=world labels.
// This is intended for advanced configuration only; most users should use the built in template
Templates *structpb.Struct `protobuf:"bytes,23,opt,name=templates,proto3" json:"templates,omitempty"`
// defaultTemplates: ["sidecar", "hello"]
DefaultTemplates []string `protobuf:"bytes,24,rep,name=defaultTemplates,proto3" json:"defaultTemplates,omitempty"`
// If enabled, the legacy webhook selection logic will be used. This relies on filtering of webhook
// requests in Istiod, rather than at the webhook selection level.
// This is option is intended for migration purposes only and will be removed in Istio 1.10.
//
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
UseLegacySelectors *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=useLegacySelectors,proto3" json:"useLegacySelectors,omitempty"`
}
func (x *SidecarInjectorConfig) Reset() {
*x = SidecarInjectorConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[35]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SidecarInjectorConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SidecarInjectorConfig) ProtoMessage() {}
func (x *SidecarInjectorConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[35]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SidecarInjectorConfig.ProtoReflect.Descriptor instead.
func (*SidecarInjectorConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{35}
}
func (x *SidecarInjectorConfig) GetEnableNamespacesByDefault() *wrapperspb.BoolValue {
if x != nil {
return x.EnableNamespacesByDefault
}
return nil
}
func (x *SidecarInjectorConfig) GetReinvocationPolicy() string {
if x != nil {
return x.ReinvocationPolicy
}
return ""
}
func (x *SidecarInjectorConfig) GetNeverInjectSelector() []*structpb.Struct {
if x != nil {
return x.NeverInjectSelector
}
return nil
}
func (x *SidecarInjectorConfig) GetAlwaysInjectSelector() []*structpb.Struct {
if x != nil {
return x.AlwaysInjectSelector
}
return nil
}
func (x *SidecarInjectorConfig) GetRewriteAppHTTPProbe() *wrapperspb.BoolValue {
if x != nil {
return x.RewriteAppHTTPProbe
}
return nil
}
func (x *SidecarInjectorConfig) GetInjectedAnnotations() *structpb.Struct {
if x != nil {
return x.InjectedAnnotations
}
return nil
}
func (x *SidecarInjectorConfig) GetObjectSelector() *structpb.Struct {
if x != nil {
return x.ObjectSelector
}
return nil
}
func (x *SidecarInjectorConfig) GetInjectionURL() string {
if x != nil {
return x.InjectionURL
}
return ""
}
func (x *SidecarInjectorConfig) GetTemplates() *structpb.Struct {
if x != nil {
return x.Templates
}
return nil
}
func (x *SidecarInjectorConfig) GetDefaultTemplates() []string {
if x != nil {
return x.DefaultTemplates
}
return nil
}
// Deprecated: Marked as deprecated in pkg/apis/istio/v1alpha1/values_types.proto.
func (x *SidecarInjectorConfig) GetUseLegacySelectors() *wrapperspb.BoolValue {
if x != nil {
return x.UseLegacySelectors
}
return nil
}
// Configuration for each of the supported tracers.
type TracerConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Configuration for the datadog tracing service.
Datadog *TracerDatadogConfig `protobuf:"bytes,1,opt,name=datadog,proto3" json:"datadog,omitempty"`
// Configuration for the lightstep tracing service.
Lightstep *TracerLightStepConfig `protobuf:"bytes,2,opt,name=lightstep,proto3" json:"lightstep,omitempty"`
// Configuration for the zipkin tracing service.
Zipkin *TracerZipkinConfig `protobuf:"bytes,3,opt,name=zipkin,proto3" json:"zipkin,omitempty"`
// Configuration for the stackdriver tracing service.
Stackdriver *TracerStackdriverConfig `protobuf:"bytes,4,opt,name=stackdriver,proto3" json:"stackdriver,omitempty"`
}
func (x *TracerConfig) Reset() {
*x = TracerConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[36]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TracerConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TracerConfig) ProtoMessage() {}
func (x *TracerConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[36]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TracerConfig.ProtoReflect.Descriptor instead.
func (*TracerConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{36}
}
func (x *TracerConfig) GetDatadog() *TracerDatadogConfig {
if x != nil {
return x.Datadog
}
return nil
}
func (x *TracerConfig) GetLightstep() *TracerLightStepConfig {
if x != nil {
return x.Lightstep
}
return nil
}
func (x *TracerConfig) GetZipkin() *TracerZipkinConfig {
if x != nil {
return x.Zipkin
}
return nil
}
func (x *TracerConfig) GetStackdriver() *TracerStackdriverConfig {
if x != nil {
return x.Stackdriver
}
return nil
}
// Configuration for the datadog tracing service.
type TracerDatadogConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Address in host:port format for reporting trace data to the Datadog agent.
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
}
func (x *TracerDatadogConfig) Reset() {
*x = TracerDatadogConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[37]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TracerDatadogConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TracerDatadogConfig) ProtoMessage() {}
func (x *TracerDatadogConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[37]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TracerDatadogConfig.ProtoReflect.Descriptor instead.
func (*TracerDatadogConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{37}
}
func (x *TracerDatadogConfig) GetAddress() string {
if x != nil {
return x.Address
}
return ""
}
// Configuration for the lightstep tracing service.
type TracerLightStepConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Sets the lightstep satellite pool address in host:port format for reporting trace data.
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
// Sets the lightstep access token.
AccessToken string `protobuf:"bytes,2,opt,name=accessToken,proto3" json:"accessToken,omitempty"`
}
func (x *TracerLightStepConfig) Reset() {
*x = TracerLightStepConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[38]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TracerLightStepConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TracerLightStepConfig) ProtoMessage() {}
func (x *TracerLightStepConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[38]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TracerLightStepConfig.ProtoReflect.Descriptor instead.
func (*TracerLightStepConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{38}
}
func (x *TracerLightStepConfig) GetAddress() string {
if x != nil {
return x.Address
}
return ""
}
func (x *TracerLightStepConfig) GetAccessToken() string {
if x != nil {
return x.AccessToken
}
return ""
}
// Configuration for the zipkin tracing service.
type TracerZipkinConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Address of zipkin instance in host:port format for reporting trace data.
//
// Example: <zipkin-collector-service>.<zipkin-collector-namespace>:941
Address string `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
}
func (x *TracerZipkinConfig) Reset() {
*x = TracerZipkinConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[39]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TracerZipkinConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TracerZipkinConfig) ProtoMessage() {}
func (x *TracerZipkinConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[39]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TracerZipkinConfig.ProtoReflect.Descriptor instead.
func (*TracerZipkinConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{39}
}
func (x *TracerZipkinConfig) GetAddress() string {
if x != nil {
return x.Address
}
return ""
}
// Configuration for the stackdriver tracing service.
type TracerStackdriverConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// enables trace output to stdout.
Debug *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=debug,proto3" json:"debug,omitempty"`
// The global default max number of attributes per span.
MaxNumberOfAttributes uint32 `protobuf:"varint,2,opt,name=maxNumberOfAttributes,proto3" json:"maxNumberOfAttributes,omitempty"`
// The global default max number of annotation events per span.
MaxNumberOfAnnotations uint32 `protobuf:"varint,3,opt,name=maxNumberOfAnnotations,proto3" json:"maxNumberOfAnnotations,omitempty"`
// The global default max number of message events per span.
MaxNumberOfMessageEvents uint32 `protobuf:"varint,4,opt,name=maxNumberOfMessageEvents,proto3" json:"maxNumberOfMessageEvents,omitempty"`
}
func (x *TracerStackdriverConfig) Reset() {
*x = TracerStackdriverConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[40]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TracerStackdriverConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TracerStackdriverConfig) ProtoMessage() {}
func (x *TracerStackdriverConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[40]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TracerStackdriverConfig.ProtoReflect.Descriptor instead.
func (*TracerStackdriverConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{40}
}
func (x *TracerStackdriverConfig) GetDebug() *wrapperspb.BoolValue {
if x != nil {
return x.Debug
}
return nil
}
func (x *TracerStackdriverConfig) GetMaxNumberOfAttributes() uint32 {
if x != nil {
return x.MaxNumberOfAttributes
}
return 0
}
func (x *TracerStackdriverConfig) GetMaxNumberOfAnnotations() uint32 {
if x != nil {
return x.MaxNumberOfAnnotations
}
return 0
}
func (x *TracerStackdriverConfig) GetMaxNumberOfMessageEvents() uint32 {
if x != nil {
return x.MaxNumberOfMessageEvents
}
return 0
}
type BaseConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// For Helm2 use, adds the CRDs to templates.
EnableCRDTemplates *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enableCRDTemplates,proto3" json:"enableCRDTemplates,omitempty"`
// URL to use for validating webhook.
ValidationURL string `protobuf:"bytes,2,opt,name=validationURL,proto3" json:"validationURL,omitempty"`
// For istioctl usage to disable istio config crds in base
EnableIstioConfigCRDs *wrapperspb.BoolValue `protobuf:"bytes,3,opt,name=enableIstioConfigCRDs,proto3" json:"enableIstioConfigCRDs,omitempty"`
ValidateGateway *wrapperspb.BoolValue `protobuf:"bytes,4,opt,name=validateGateway,proto3" json:"validateGateway,omitempty"`
}
func (x *BaseConfig) Reset() {
*x = BaseConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[41]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *BaseConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*BaseConfig) ProtoMessage() {}
func (x *BaseConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[41]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use BaseConfig.ProtoReflect.Descriptor instead.
func (*BaseConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{41}
}
func (x *BaseConfig) GetEnableCRDTemplates() *wrapperspb.BoolValue {
if x != nil {
return x.EnableCRDTemplates
}
return nil
}
func (x *BaseConfig) GetValidationURL() string {
if x != nil {
return x.ValidationURL
}
return ""
}
func (x *BaseConfig) GetEnableIstioConfigCRDs() *wrapperspb.BoolValue {
if x != nil {
return x.EnableIstioConfigCRDs
}
return nil
}
func (x *BaseConfig) GetValidateGateway() *wrapperspb.BoolValue {
if x != nil {
return x.ValidateGateway
}
return nil
}
type IstiodRemoteConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// URL to use for sidecar injector webhook.
InjectionURL string `protobuf:"bytes,1,opt,name=injectionURL,proto3" json:"injectionURL,omitempty"`
// Path to use for the sidecar injector webhook service.
InjectionPath string `protobuf:"bytes,2,opt,name=injectionPath,proto3" json:"injectionPath,omitempty"`
}
func (x *IstiodRemoteConfig) Reset() {
*x = IstiodRemoteConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[42]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IstiodRemoteConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IstiodRemoteConfig) ProtoMessage() {}
func (x *IstiodRemoteConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[42]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IstiodRemoteConfig.ProtoReflect.Descriptor instead.
func (*IstiodRemoteConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{42}
}
func (x *IstiodRemoteConfig) GetInjectionURL() string {
if x != nil {
return x.InjectionURL
}
return ""
}
func (x *IstiodRemoteConfig) GetInjectionPath() string {
if x != nil {
return x.InjectionPath
}
return ""
}
type Values struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Cni *CNIConfig `protobuf:"bytes,2,opt,name=cni,proto3" json:"cni,omitempty"`
Gateways *GatewaysConfig `protobuf:"bytes,5,opt,name=gateways,proto3" json:"gateways,omitempty"`
Global *GlobalConfig `protobuf:"bytes,6,opt,name=global,proto3" json:"global,omitempty"`
Pilot *PilotConfig `protobuf:"bytes,10,opt,name=pilot,proto3" json:"pilot,omitempty"`
Ztunnel *structpb.Value `protobuf:"bytes,41,opt,name=ztunnel,proto3" json:"ztunnel,omitempty"`
// Controls whether telemetry is exported for Pilot.
Telemetry *TelemetryConfig `protobuf:"bytes,23,opt,name=telemetry,proto3" json:"telemetry,omitempty"`
SidecarInjectorWebhook *SidecarInjectorConfig `protobuf:"bytes,13,opt,name=sidecarInjectorWebhook,proto3" json:"sidecarInjectorWebhook,omitempty"`
IstioCni *CNIConfig `protobuf:"bytes,19,opt,name=istio_cni,json=istioCni,proto3" json:"istio_cni,omitempty"`
Revision string `protobuf:"bytes,21,opt,name=revision,proto3" json:"revision,omitempty"`
OwnerName string `protobuf:"bytes,22,opt,name=ownerName,proto3" json:"ownerName,omitempty"`
// TODO can this import the real mesh config API?
MeshConfig *structpb.Value `protobuf:"bytes,36,opt,name=meshConfig,proto3" json:"meshConfig,omitempty"`
Base *BaseConfig `protobuf:"bytes,37,opt,name=base,proto3" json:"base,omitempty"`
IstiodRemote *IstiodRemoteConfig `protobuf:"bytes,38,opt,name=istiodRemote,proto3" json:"istiodRemote,omitempty"`
RevisionTags []string `protobuf:"bytes,39,rep,name=revisionTags,proto3" json:"revisionTags,omitempty"`
DefaultRevision string `protobuf:"bytes,40,opt,name=defaultRevision,proto3" json:"defaultRevision,omitempty"`
Profile string `protobuf:"bytes,42,opt,name=profile,proto3" json:"profile,omitempty"`
CompatibilityVersion string `protobuf:"bytes,43,opt,name=compatibilityVersion,proto3" json:"compatibilityVersion,omitempty"`
}
func (x *Values) Reset() {
*x = Values{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[43]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Values) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Values) ProtoMessage() {}
func (x *Values) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[43]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Values.ProtoReflect.Descriptor instead.
func (*Values) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{43}
}
func (x *Values) GetCni() *CNIConfig {
if x != nil {
return x.Cni
}
return nil
}
func (x *Values) GetGateways() *GatewaysConfig {
if x != nil {
return x.Gateways
}
return nil
}
func (x *Values) GetGlobal() *GlobalConfig {
if x != nil {
return x.Global
}
return nil
}
func (x *Values) GetPilot() *PilotConfig {
if x != nil {
return x.Pilot
}
return nil
}
func (x *Values) GetZtunnel() *structpb.Value {
if x != nil {
return x.Ztunnel
}
return nil
}
func (x *Values) GetTelemetry() *TelemetryConfig {
if x != nil {
return x.Telemetry
}
return nil
}
func (x *Values) GetSidecarInjectorWebhook() *SidecarInjectorConfig {
if x != nil {
return x.SidecarInjectorWebhook
}
return nil
}
func (x *Values) GetIstioCni() *CNIConfig {
if x != nil {
return x.IstioCni
}
return nil
}
func (x *Values) GetRevision() string {
if x != nil {
return x.Revision
}
return ""
}
func (x *Values) GetOwnerName() string {
if x != nil {
return x.OwnerName
}
return ""
}
func (x *Values) GetMeshConfig() *structpb.Value {
if x != nil {
return x.MeshConfig
}
return nil
}
func (x *Values) GetBase() *BaseConfig {
if x != nil {
return x.Base
}
return nil
}
func (x *Values) GetIstiodRemote() *IstiodRemoteConfig {
if x != nil {
return x.IstiodRemote
}
return nil
}
func (x *Values) GetRevisionTags() []string {
if x != nil {
return x.RevisionTags
}
return nil
}
func (x *Values) GetDefaultRevision() string {
if x != nil {
return x.DefaultRevision
}
return ""
}
func (x *Values) GetProfile() string {
if x != nil {
return x.Profile
}
return ""
}
func (x *Values) GetCompatibilityVersion() string {
if x != nil {
return x.CompatibilityVersion
}
return ""
}
// ZeroVPNConfig enables cross-cluster access using SNI matching.
type ZeroVPNConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Controls whether ZeroVPN is enabled.
Enabled *wrapperspb.BoolValue `protobuf:"bytes,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
Suffix string `protobuf:"bytes,2,opt,name=suffix,proto3" json:"suffix,omitempty"`
}
func (x *ZeroVPNConfig) Reset() {
*x = ZeroVPNConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[44]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ZeroVPNConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ZeroVPNConfig) ProtoMessage() {}
func (x *ZeroVPNConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[44]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ZeroVPNConfig.ProtoReflect.Descriptor instead.
func (*ZeroVPNConfig) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{44}
}
func (x *ZeroVPNConfig) GetEnabled() *wrapperspb.BoolValue {
if x != nil {
return x.Enabled
}
return nil
}
func (x *ZeroVPNConfig) GetSuffix() string {
if x != nil {
return x.Suffix
}
return ""
}
// IntOrString is a type that can hold an int32 or a string. When used in
// JSON or YAML marshalling and unmarshalling, it produces or consumes the
// inner type. This allows you to have, for example, a JSON field that can
// accept a name or number.
// TODO: Rename to Int32OrString
//
// +protobuf=true
// +protobuf.options.(gogoproto.goproto_stringer)=false
// +k8s:openapi-gen=true
type IntOrString struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Type int64 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
IntVal *wrapperspb.Int32Value `protobuf:"bytes,2,opt,name=intVal,proto3" json:"intVal,omitempty"`
StrVal *wrapperspb.StringValue `protobuf:"bytes,3,opt,name=strVal,proto3" json:"strVal,omitempty"`
}
func (x *IntOrString) Reset() {
*x = IntOrString{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[45]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *IntOrString) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*IntOrString) ProtoMessage() {}
func (x *IntOrString) ProtoReflect() protoreflect.Message {
mi := &file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[45]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use IntOrString.ProtoReflect.Descriptor instead.
func (*IntOrString) Descriptor() ([]byte, []int) {
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP(), []int{45}
}
func (x *IntOrString) GetType() int64 {
if x != nil {
return x.Type
}
return 0
}
func (x *IntOrString) GetIntVal() *wrapperspb.Int32Value {
if x != nil {
return x.IntVal
}
return nil
}
func (x *IntOrString) GetStrVal() *wrapperspb.StringValue {
if x != nil {
return x.StrVal
}
return nil
}
var File_pkg_apis_istio_v1alpha1_values_types_proto protoreflect.FileDescriptor
var file_pkg_apis_istio_v1alpha1_values_types_proto_rawDesc = []byte{
0x0a, 0x2a, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x73, 0x74, 0x69, 0x6f,
0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73,
0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
0x68, 0x0a, 0x0a, 0x41, 0x72, 0x63, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x14, 0x0a,
0x05, 0x61, 0x6d, 0x64, 0x36, 0x34, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x61, 0x6d,
0x64, 0x36, 0x34, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x70, 0x63, 0x36, 0x34, 0x6c, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x70, 0x70, 0x63, 0x36, 0x34, 0x6c, 0x65, 0x12, 0x14, 0x0a,
0x05, 0x73, 0x33, 0x39, 0x30, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x73, 0x33,
0x39, 0x30, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x6d, 0x36, 0x34, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0d, 0x52, 0x05, 0x61, 0x72, 0x6d, 0x36, 0x34, 0x22, 0xc0, 0x08, 0x0a, 0x09, 0x43, 0x4e,
0x49, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c,
0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56,
0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x10, 0x0a,
0x03, 0x68, 0x75, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x68, 0x75, 0x62, 0x12,
0x28, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x61, 0x72,
0x69, 0x61, 0x6e, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x61, 0x72, 0x69,
0x61, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x75, 0x6c,
0x6c, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70,
0x75, 0x6c, 0x6c, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6e, 0x69,
0x42, 0x69, 0x6e, 0x44, 0x69, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6e,
0x69, 0x42, 0x69, 0x6e, 0x44, 0x69, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6e, 0x69, 0x43, 0x6f,
0x6e, 0x66, 0x44, 0x69, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6e, 0x69,
0x43, 0x6f, 0x6e, 0x66, 0x44, 0x69, 0x72, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x6e, 0x69, 0x43, 0x6f,
0x6e, 0x66, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0f, 0x63, 0x6e, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x61, 0x6d,
0x65, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6e, 0x69, 0x4e, 0x65, 0x74, 0x6e, 0x73, 0x44, 0x69, 0x72,
0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6e, 0x69, 0x4e, 0x65, 0x74, 0x6e, 0x73,
0x44, 0x69, 0x72, 0x12, 0x2c, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x61,
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11,
0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
0x73, 0x12, 0x33, 0x0a, 0x08, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x18, 0x14, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x61, 0x66,
0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x0e, 0x70, 0x6f, 0x64, 0x41, 0x6e, 0x6e,
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x70, 0x6f, 0x64,
0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x70,
0x73, 0x70, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x6f, 0x6c, 0x65, 0x18,
0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x73, 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
0x72, 0x52, 0x6f, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65,
0x6c, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65,
0x6c, 0x12, 0x31, 0x0a, 0x06, 0x72, 0x65, 0x70, 0x61, 0x69, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x19, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x4e, 0x49,
0x52, 0x65, 0x70, 0x61, 0x69, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x72, 0x65,
0x70, 0x61, 0x69, 0x72, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x18,
0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
0x65, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x65, 0x64, 0x12, 0x41, 0x0a, 0x0f, 0x72, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x73, 0x18, 0x10, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x73, 0x52, 0x0e, 0x72,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x73, 0x12, 0x31, 0x0a,
0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x13, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f,
0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x64, 0x18, 0x12,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x0e,
0x73, 0x65, 0x63, 0x63, 0x6f, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x13,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0e, 0x73,
0x65, 0x63, 0x63, 0x6f, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x34, 0x0a,
0x07, 0x61, 0x6d, 0x62, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x4e, 0x49, 0x41, 0x6d, 0x62,
0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x61, 0x6d, 0x62, 0x69,
0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18,
0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12,
0x4b, 0x0a, 0x15, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x78, 0x55, 0x6e, 0x61,
0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15,
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e, 0x74, 0x4f, 0x72, 0x53,
0x74, 0x72, 0x69, 0x6e, 0x67, 0x52, 0x15, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61,
0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x22, 0xa2, 0x01, 0x0a,
0x10, 0x43, 0x4e, 0x49, 0x41, 0x6d, 0x62, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07,
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x44, 0x69, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x44, 0x69, 0x72, 0x12, 0x3a, 0x0a, 0x0a, 0x64, 0x6e, 0x73, 0x43, 0x61, 0x70, 0x74,
0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x64, 0x6e, 0x73, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72,
0x65, 0x22, 0xad, 0x03, 0x0a, 0x0f, 0x43, 0x4e, 0x49, 0x52, 0x65, 0x70, 0x61, 0x69, 0x72, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c,
0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x68,
0x75, 0x62, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x68, 0x75, 0x62, 0x12, 0x28, 0x0a,
0x03, 0x74, 0x61, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c,
0x75, 0x65, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65,
0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a,
0x09, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x6f, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08,
0x52, 0x09, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x6f, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x72,
0x65, 0x70, 0x61, 0x69, 0x72, 0x50, 0x6f, 0x64, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52,
0x0a, 0x72, 0x65, 0x70, 0x61, 0x69, 0x72, 0x50, 0x6f, 0x64, 0x73, 0x12, 0x26, 0x0a, 0x0c, 0x63,
0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x76, 0x65,
0x6e, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x64,
0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50,
0x6f, 0x64, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x6f, 0x64,
0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4b, 0x65, 0x79, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11,
0x62, 0x72, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x6f, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x4b, 0x65,
0x79, 0x12, 0x30, 0x0a, 0x13, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x6f, 0x64, 0x4c, 0x61,
0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13,
0x62, 0x72, 0x6f, 0x6b, 0x65, 0x6e, 0x50, 0x6f, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61,
0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11,
0x69, 0x6e, 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d,
0x65, 0x22, 0x5a, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x51, 0x75, 0x6f,
0x74, 0x61, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x64,
0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x70, 0x6f, 0x64, 0x73, 0x22, 0x55, 0x0a,
0x17, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3a, 0x0a, 0x18, 0x74, 0x61, 0x72, 0x67,
0x65, 0x74, 0x41, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x18, 0x74, 0x61, 0x72, 0x67,
0x65, 0x74, 0x41, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x22, 0xfb, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x73, 0x12, 0x37, 0x0a, 0x06, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2e, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x08, 0x72,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e,
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x69,
0x6d, 0x69, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
0x38, 0x01, 0x22, 0x4b, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75,
0x63, 0x74, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
0x58, 0x0a, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x50, 0x6f, 0x64, 0x44, 0x69, 0x73,
0x72, 0x75, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x57, 0x0a, 0x16, 0x44, 0x65, 0x66,
0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18,
0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x73, 0x22, 0xa1, 0x0e, 0x0a, 0x13, 0x45, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74,
0x65, 0x77, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x10, 0x61, 0x75,
0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x52, 0x10, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c,
0x65, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4d,
0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63,
0x61, 0x6c, 0x65, 0x4d, 0x61, 0x78, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63,
0x61, 0x6c, 0x65, 0x4d, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x61, 0x75,
0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4d, 0x69, 0x6e, 0x12, 0x3d, 0x0a, 0x06, 0x6d, 0x65,
0x6d, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x74, 0x69, 0x6c,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18,
0x01, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x03, 0x63, 0x70, 0x75,
0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x03, 0x63,
0x70, 0x75, 0x12, 0x40, 0x0a, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18,
0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x03, 0x65, 0x6e,
0x76, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x41, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18,
0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2e, 0x45, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x0c,
0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52,
0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x43, 0x0a,
0x0e, 0x70, 0x6f, 0x64, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02,
0x18, 0x01, 0x52, 0x0e, 0x70, 0x6f, 0x64, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x12, 0x5f, 0x0a, 0x1c, 0x70, 0x6f, 0x64, 0x41, 0x6e, 0x74, 0x69, 0x41, 0x66, 0x66,
0x69, 0x6e, 0x69, 0x74, 0x79, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74,
0x6f, 0x72, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63,
0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x1c, 0x70, 0x6f, 0x64, 0x41, 0x6e, 0x74, 0x69, 0x41, 0x66,
0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63,
0x74, 0x6f, 0x72, 0x12, 0x67, 0x0a, 0x20, 0x70, 0x6f, 0x64, 0x41, 0x6e, 0x74, 0x69, 0x41, 0x66,
0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x54, 0x65, 0x72, 0x6d, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x53,
0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x20, 0x70, 0x6f, 0x64, 0x41,
0x6e, 0x74, 0x69, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x54, 0x65, 0x72, 0x6d, 0x4c,
0x61, 0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2b, 0x0a, 0x05,
0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x52, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
0x12, 0x3c, 0x0a, 0x0d, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
0x61, 0x31, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52,
0x0d, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x47,
0x0a, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
0x75, 0x63, 0x74, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f,
0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x04, 0x7a,
0x76, 0x70, 0x6e, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x5a, 0x65, 0x72, 0x6f, 0x56, 0x50, 0x4e, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x52, 0x04, 0x7a, 0x76, 0x70, 0x6e, 0x12, 0x3d, 0x0a, 0x0b, 0x74, 0x6f, 0x6c, 0x65,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0b, 0x74, 0x6f, 0x6c, 0x65,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x72, 0x6f, 0x6c, 0x6c, 0x69,
0x6e, 0x67, 0x4d, 0x61, 0x78, 0x53, 0x75, 0x72, 0x67, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x15, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e, 0x74, 0x4f,
0x72, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0f, 0x72, 0x6f, 0x6c,
0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x78, 0x53, 0x75, 0x72, 0x67, 0x65, 0x12, 0x4f, 0x0a, 0x15,
0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69,
0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e, 0x74, 0x4f, 0x72, 0x53, 0x74, 0x72, 0x69,
0x6e, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x15, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4d,
0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x3d, 0x0a,
0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x17,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0d, 0x63,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x14,
0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69,
0x6e, 0x65, 0x72, 0x73, 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
0x75, 0x63, 0x74, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x43,
0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x72, 0x75, 0x6e,
0x41, 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42,
0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x41, 0x73, 0x52,
0x6f, 0x6f, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11,
0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74,
0x65, 0x12, 0x40, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65,
0x73, 0x18, 0x1d, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c,
0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x50,
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x70, 0x46,
0x61, 0x6d, 0x69, 0x6c, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x39, 0x0a, 0x0b, 0x4c,
0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xeb, 0x01, 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x65, 0x77,
0x61, 0x79, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4f, 0x0a, 0x13, 0x69, 0x73, 0x74,
0x69, 0x6f, 0x5f, 0x65, 0x67, 0x72, 0x65, 0x73, 0x73, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0x2e, 0x45, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2d, 0x65, 0x67, 0x72,
0x65, 0x73, 0x73, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e,
0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f,
0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64,
0x12, 0x52, 0x0a, 0x14, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73,
0x73, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73,
0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x14,
0x69, 0x73, 0x74, 0x69, 0x6f, 0x2d, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x67, 0x61, 0x74,
0x65, 0x77, 0x61, 0x79, 0x22, 0xe4, 0x12, 0x0a, 0x0c, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x0a, 0x04, 0x61, 0x72, 0x63, 0x68, 0x18, 0x01, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x41,
0x72, 0x63, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x61,
0x72, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x53, 0x69, 0x67, 0x6e, 0x65,
0x72, 0x73, 0x18, 0x44, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x65, 0x72, 0x74, 0x53, 0x69,
0x67, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
0x6f, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x32, 0x20, 0x01,
0x28, 0x09, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x4e, 0x61,
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x63,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
0x48, 0x0a, 0x1f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
0x67, 0x73, 0x18, 0x34, 0x20, 0x03, 0x28, 0x09, 0x52, 0x1f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x69, 0x73, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74,
0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x4d, 0x0a, 0x13, 0x64, 0x65, 0x66,
0x61, 0x75, 0x6c, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72,
0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42,
0x02, 0x18, 0x01, 0x52, 0x13, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4e, 0x6f, 0x64, 0x65,
0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x6e, 0x0a, 0x1a, 0x64, 0x65, 0x66, 0x61,
0x75, 0x6c, 0x74, 0x50, 0x6f, 0x64, 0x44, 0x69, 0x73, 0x72, 0x75, 0x70, 0x74, 0x69, 0x6f, 0x6e,
0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x50,
0x6f, 0x64, 0x44, 0x69, 0x73, 0x72, 0x75, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x64, 0x67,
0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x1a, 0x64, 0x65,
0x66, 0x61, 0x75, 0x6c, 0x74, 0x50, 0x6f, 0x64, 0x44, 0x69, 0x73, 0x72, 0x75, 0x70, 0x74, 0x69,
0x6f, 0x6e, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x64, 0x65, 0x66, 0x61,
0x75, 0x6c, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x20, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65,
0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x12, 0x64, 0x65,
0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
0x18, 0x37, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42,
0x02, 0x18, 0x01, 0x52, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x6f, 0x6c, 0x65,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x68, 0x75, 0x62, 0x18, 0x0c,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x68, 0x75, 0x62, 0x12, 0x28, 0x0a, 0x0f, 0x69, 0x6d, 0x61,
0x67, 0x65, 0x50, 0x75, 0x6c, 0x6c, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0d, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x75, 0x6c, 0x6c, 0x50, 0x6f, 0x6c,
0x69, 0x63, 0x79, 0x12, 0x2a, 0x0a, 0x10, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x75, 0x6c, 0x6c,
0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x25, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69,
0x6d, 0x61, 0x67, 0x65, 0x50, 0x75, 0x6c, 0x6c, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12,
0x26, 0x0a, 0x0e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x4e, 0x61,
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x41, 0x73,
0x4a, 0x73, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f,
0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x41, 0x73, 0x4a, 0x73, 0x6f,
0x6e, 0x12, 0x37, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x6c,
0x6f, 0x62, 0x61, 0x6c, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65,
0x73, 0x68, 0x49, 0x44, 0x18, 0x35, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x73, 0x68,
0x49, 0x44, 0x12, 0x3b, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x68, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72,
0x6b, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63,
0x74, 0x52, 0x0c, 0x6d, 0x65, 0x73, 0x68, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x12,
0x40, 0x0a, 0x0c, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18,
0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31,
0x2e, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x52, 0x0c, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
0x72, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x27, 0x20, 0x01,
0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x36, 0x0a, 0x16, 0x70,
0x6f, 0x64, 0x44, 0x4e, 0x53, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x73,
0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x2b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x70, 0x6f, 0x64,
0x44, 0x4e, 0x53, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
0x63, 0x65, 0x73, 0x12, 0x5e, 0x0a, 0x1c, 0x6f, 0x6d, 0x69, 0x74, 0x53, 0x69, 0x64, 0x65, 0x63,
0x61, 0x72, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x4d, 0x61, 0x70, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x1c, 0x6f, 0x6d, 0x69, 0x74, 0x53, 0x69, 0x64, 0x65, 0x63,
0x61, 0x72, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x4d, 0x61, 0x70, 0x12, 0x3e, 0x0a, 0x0c, 0x6f, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70,
0x61, 0x63, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x6f, 0x6e, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70,
0x61, 0x63, 0x65, 0x12, 0x52, 0x0a, 0x16, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4d,
0x61, 0x6e, 0x61, 0x67, 0x65, 0x57, 0x65, 0x62, 0x68, 0x6f, 0x6f, 0x6b, 0x73, 0x18, 0x29, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
0x16, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x57,
0x65, 0x62, 0x68, 0x6f, 0x6f, 0x6b, 0x73, 0x12, 0x30, 0x0a, 0x11, 0x70, 0x72, 0x69, 0x6f, 0x72,
0x69, 0x74, 0x79, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x1b, 0x20, 0x01,
0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x11, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79,
0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x70, 0x72, 0x6f,
0x78, 0x79, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
0x05, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f,
0x69, 0x6e, 0x69, 0x74, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x49, 0x6e, 0x69, 0x74, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x69, 0x6e, 0x69,
0x74, 0x12, 0x25, 0x0a, 0x03, 0x73, 0x64, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x44, 0x53, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x52, 0x03, 0x73, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18,
0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03, 0x74,
0x61, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x18, 0x43, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x12, 0x2e, 0x0a, 0x06,
0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x06,
0x75, 0x73, 0x65, 0x4d, 0x43, 0x50, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42,
0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x75, 0x73, 0x65, 0x4d, 0x43, 0x50,
0x12, 0x2e, 0x0a, 0x12, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x50, 0x69, 0x6c, 0x6f, 0x74, 0x41,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x30, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65,
0x6d, 0x6f, 0x74, 0x65, 0x50, 0x69, 0x6c, 0x6f, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x12, 0x2e, 0x0a, 0x06, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x64, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x16, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x73, 0x74, 0x69,
0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x64,
0x12, 0x2c, 0x0a, 0x11, 0x70, 0x69, 0x6c, 0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f,
0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x38, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x69, 0x6c,
0x6f, 0x74, 0x43, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1c,
0x0a, 0x09, 0x6a, 0x77, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x39, 0x20, 0x01, 0x28,
0x09, 0x52, 0x09, 0x6a, 0x77, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x25, 0x0a, 0x03,
0x73, 0x74, 0x73, 0x18, 0x3a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x54, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x03,
0x73, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18,
0x3b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12,
0x42, 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x4d, 0x74, 0x6c, 0x73, 0x43, 0x65, 0x72, 0x74,
0x73, 0x18, 0x3c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x4d, 0x74, 0x6c, 0x73, 0x43, 0x65,
0x72, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x18, 0x3d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x61, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x12, 0x42, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x73, 0x74,
0x69, 0x6f, 0x64, 0x18, 0x3e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x49,
0x73, 0x74, 0x69, 0x6f, 0x64, 0x12, 0x40, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x43,
0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x40, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42,
0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x61, 0x4e, 0x61, 0x6d,
0x65, 0x18, 0x41, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x12,
0x46, 0x0a, 0x10, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x76, 0x32,
0x41, 0x50, 0x49, 0x18, 0x42, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69,
0x6e, 0x67, 0x76, 0x32, 0x41, 0x50, 0x49, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66,
0x6f, 0x72, 0x6d, 0x18, 0x45, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66,
0x6f, 0x72, 0x6d, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65,
0x73, 0x18, 0x46, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c,
0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x50,
0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x47, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x70, 0x46,
0x61, 0x6d, 0x69, 0x6c, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2d, 0x0a, 0x09, 0x53,
0x54, 0x53, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x73,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x22, 0x52, 0x0a, 0x0c, 0x49, 0x73,
0x74, 0x69, 0x6f, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x42, 0x0a, 0x0e, 0x65, 0x6e,
0x61, 0x62, 0x6c, 0x65, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x73, 0x69, 0x73, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e,
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x6e, 0x61, 0x6c, 0x79, 0x73, 0x69, 0x73, 0x22, 0x2b,
0x0a, 0x13, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x22, 0xb1, 0x10, 0x0a, 0x14,
0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x12, 0x46, 0x0a, 0x10, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c,
0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x75, 0x74, 0x6f,
0x73, 0x63, 0x61, 0x6c, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x22, 0x0a, 0x0c,
0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0d, 0x52, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4d, 0x61, 0x78,
0x12, 0x22, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4d, 0x69, 0x6e,
0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c,
0x65, 0x4d, 0x69, 0x6e, 0x12, 0x3d, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x04,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x6d, 0x65, 0x6d,
0x6f, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x21, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67,
0x65, 0x74, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x40, 0x0a, 0x0d,
0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x06, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x34,
0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61,
0x62, 0x6c, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x0b, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12,
0x42, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x2a, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e, 0x67, 0x72, 0x65,
0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62,
0x65, 0x6c, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e,
0x63, 0x65, 0x72, 0x49, 0x50, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x6f, 0x61,
0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x49, 0x50, 0x12, 0x3a, 0x0a, 0x18, 0x6c,
0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x11, 0x20, 0x03, 0x28, 0x09, 0x52, 0x18, 0x6c,
0x6f, 0x61, 0x64, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x72, 0x53, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x0c, 0x6e,
0x6f, 0x64, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c,
0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x43, 0x0a, 0x0e,
0x70, 0x6f, 0x64, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x14,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18,
0x01, 0x52, 0x0e, 0x70, 0x6f, 0x64, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x73, 0x12, 0x5f, 0x0a, 0x1c, 0x70, 0x6f, 0x64, 0x41, 0x6e, 0x74, 0x69, 0x41, 0x66, 0x66, 0x69,
0x6e, 0x69, 0x74, 0x79, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f,
0x72, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
0x42, 0x02, 0x18, 0x01, 0x52, 0x1c, 0x70, 0x6f, 0x64, 0x41, 0x6e, 0x74, 0x69, 0x41, 0x66, 0x66,
0x69, 0x6e, 0x69, 0x74, 0x79, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74,
0x6f, 0x72, 0x12, 0x67, 0x0a, 0x20, 0x70, 0x6f, 0x64, 0x41, 0x6e, 0x74, 0x69, 0x41, 0x66, 0x66,
0x69, 0x6e, 0x69, 0x74, 0x79, 0x54, 0x65, 0x72, 0x6d, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x53, 0x65,
0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53,
0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x20, 0x70, 0x6f, 0x64, 0x41, 0x6e,
0x74, 0x69, 0x41, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x54, 0x65, 0x72, 0x6d, 0x4c, 0x61,
0x62, 0x65, 0x6c, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2b, 0x0a, 0x05, 0x70,
0x6f, 0x72, 0x74, 0x73, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x52, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0c, 0x72, 0x65, 0x70, 0x6c,
0x69, 0x63, 0x61, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x02,
0x18, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x12, 0x39, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x19, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01,
0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x3c, 0x0a, 0x0d, 0x73,
0x65, 0x63, 0x72, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x1b, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x65,
0x63, 0x72, 0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x0d, 0x73, 0x65, 0x63, 0x72,
0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x12, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x12,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x7a, 0x76, 0x70, 0x6e, 0x18, 0x1e,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x5a, 0x76,
0x70, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x7a, 0x76, 0x70, 0x6e, 0x12, 0x43,
0x0a, 0x0f, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x78, 0x53, 0x75, 0x72, 0x67,
0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
0x61, 0x31, 0x2e, 0x49, 0x6e, 0x74, 0x4f, 0x72, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x02,
0x18, 0x01, 0x52, 0x0f, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x78, 0x53, 0x75,
0x72, 0x67, 0x65, 0x12, 0x4f, 0x0a, 0x15, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61,
0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x20, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e,
0x74, 0x4f, 0x72, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x15, 0x72,
0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c,
0x61, 0x62, 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x22, 0x20,
0x01, 0x28, 0x09, 0x52, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x54, 0x72, 0x61,
0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3d, 0x0a, 0x0b, 0x74, 0x6f,
0x6c, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0b, 0x74, 0x6f,
0x6c, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3b, 0x0a, 0x0c, 0x69, 0x6e, 0x67,
0x72, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x24, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0c, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73,
0x73, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x4b, 0x0a, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69,
0x6f, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x25,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x14, 0x61,
0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
0x65, 0x72, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x6f, 0x6c,
0x75, 0x6d, 0x65, 0x73, 0x18, 0x26, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
0x75, 0x63, 0x74, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x72, 0x75, 0x6e, 0x41, 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x18,
0x2d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
0x65, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x41, 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x2c, 0x0a, 0x11,
0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74,
0x65, 0x18, 0x2e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x2f, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x18, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x0e, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a,
0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x09,
0x52, 0x0a, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e,
0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x31,
0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x50, 0x6f,
0x6c, 0x69, 0x63, 0x79, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e,
0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
0x68, 0x0a, 0x18, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61,
0x79, 0x5a, 0x76, 0x70, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x07, 0x65,
0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42,
0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0xe8, 0x01, 0x0a, 0x12, 0x4d, 0x75,
0x6c, 0x74, 0x69, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65,
0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75,
0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x67, 0x6c, 0x6f, 0x62,
0x61, 0x6c, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03,
0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x44, 0x6f, 0x6d, 0x61,
0x69, 0x6e, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4a, 0x0a, 0x12, 0x69, 0x6e, 0x63, 0x6c,
0x75, 0x64, 0x65, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x52, 0x12, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x45, 0x6e, 0x76, 0x6f, 0x79, 0x46, 0x69,
0x6c, 0x74, 0x65, 0x72, 0x22, 0x87, 0x01, 0x0a, 0x1b, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e,
0x64, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4f, 0x75,
0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x72, 0x61, 0x66, 0x66, 0x69, 0x63, 0x50, 0x6f, 0x6c,
0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04,
0x6d, 0x6f, 0x64, 0x65, 0x22, 0x28, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0d, 0x0a, 0x09,
0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x52,
0x45, 0x47, 0x49, 0x53, 0x54, 0x52, 0x59, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x22, 0x88,
0x12, 0x0a, 0x0b, 0x50, 0x69, 0x6c, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34,
0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61,
0x62, 0x6c, 0x65, 0x64, 0x12, 0x46, 0x0a, 0x10, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c,
0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x61, 0x75, 0x74, 0x6f,
0x73, 0x63, 0x61, 0x6c, 0x65, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x22, 0x0a, 0x0c,
0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4d, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01,
0x28, 0x0d, 0x52, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4d, 0x69, 0x6e,
0x12, 0x22, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x4d, 0x61, 0x78,
0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c,
0x65, 0x4d, 0x61, 0x78, 0x12, 0x45, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c,
0x65, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x18, 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x11, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63,
0x61, 0x6c, 0x65, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x26, 0x0a, 0x0c, 0x72,
0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28,
0x0d, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x43, 0x6f,
0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61,
0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01,
0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x12,
0x35, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x72, 0x65, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
0x12, 0x37, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e,
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55,
0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x42, 0x02, 0x18, 0x01, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x3f, 0x0a, 0x0c, 0x6e, 0x6f, 0x64,
0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0c, 0x6e, 0x6f,
0x64, 0x65, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x63, 0x0a, 0x1f, 0x6b, 0x65,
0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x67, 0x65, 0x18, 0x0d, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x1f,
0x6b, 0x65, 0x65, 0x70, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x53, 0x65, 0x72, 0x76,
0x65, 0x72, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x67, 0x65, 0x12,
0x43, 0x0a, 0x10, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x62,
0x65, 0x6c, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75,
0x63, 0x74, 0x52, 0x10, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x4c, 0x61,
0x62, 0x65, 0x6c, 0x73, 0x12, 0x35, 0x0a, 0x09, 0x70, 0x6f, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c,
0x73, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
0x52, 0x09, 0x70, 0x6f, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x63,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x61, 0x70, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x4d, 0x61, 0x70, 0x12, 0x32, 0x0a, 0x06, 0x75, 0x73, 0x65, 0x4d, 0x43, 0x50, 0x18,
0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
0x65, 0x52, 0x06, 0x75, 0x73, 0x65, 0x4d, 0x43, 0x50, 0x12, 0x29, 0x0a, 0x03, 0x65, 0x6e, 0x76,
0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52,
0x03, 0x65, 0x6e, 0x76, 0x12, 0x33, 0x0a, 0x08, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79,
0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52,
0x08, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x0f, 0x72, 0x6f, 0x6c,
0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x78, 0x53, 0x75, 0x72, 0x67, 0x65, 0x18, 0x18, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e,
0x74, 0x4f, 0x72, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0f, 0x72,
0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x78, 0x53, 0x75, 0x72, 0x67, 0x65, 0x12, 0x4f,
0x0a, 0x15, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76,
0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x6e, 0x74, 0x4f, 0x72, 0x53, 0x74,
0x72, 0x69, 0x6e, 0x67, 0x42, 0x02, 0x18, 0x01, 0x52, 0x15, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e,
0x67, 0x4d, 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12,
0x3d, 0x0a, 0x0b, 0x74, 0x6f, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x1a,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18,
0x01, 0x52, 0x0b, 0x74, 0x6f, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x6c,
0x0a, 0x21, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c,
0x53, 0x6e, 0x69, 0x66, 0x66, 0x69, 0x6e, 0x67, 0x46, 0x6f, 0x72, 0x4f, 0x75, 0x74, 0x62, 0x6f,
0x75, 0x6e, 0x64, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x21, 0x65, 0x6e, 0x61, 0x62, 0x6c,
0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x6e, 0x69, 0x66, 0x66, 0x69, 0x6e,
0x67, 0x46, 0x6f, 0x72, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x6a, 0x0a, 0x20,
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x6e,
0x69, 0x66, 0x66, 0x69, 0x6e, 0x67, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64,
0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c,
0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x20, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x50, 0x72,
0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x53, 0x6e, 0x69, 0x66, 0x66, 0x69, 0x6e, 0x67, 0x46, 0x6f,
0x72, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x43, 0x0a, 0x0e, 0x70, 0x6f, 0x64, 0x41,
0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x70,
0x6f, 0x64, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x47, 0x0a,
0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x18, 0x25, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75,
0x63, 0x74, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3f, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x69, 0x6c, 0x6f, 0x74, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x38, 0x0a, 0x17, 0x6a, 0x77, 0x6b, 0x73, 0x52,
0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x6f, 0x74,
0x43, 0x41, 0x18, 0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x6a, 0x77, 0x6b, 0x73, 0x52, 0x65,
0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x45, 0x78, 0x74, 0x72, 0x61, 0x52, 0x6f, 0x6f, 0x74, 0x43,
0x41, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x21, 0x20, 0x03,
0x28, 0x09, 0x52, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x68,
0x75, 0x62, 0x18, 0x22, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x68, 0x75, 0x62, 0x12, 0x28, 0x0a,
0x03, 0x74, 0x61, 0x67, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c,
0x75, 0x65, 0x52, 0x03, 0x74, 0x61, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x61, 0x72, 0x69, 0x61,
0x6e, 0x74, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e,
0x74, 0x12, 0x3f, 0x0a, 0x0e, 0x73, 0x65, 0x63, 0x63, 0x6f, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75,
0x63, 0x74, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x63, 0x6f, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69,
0x6c, 0x65, 0x12, 0x55, 0x0a, 0x19, 0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x53, 0x70,
0x72, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18,
0x29, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x19,
0x74, 0x6f, 0x70, 0x6f, 0x6c, 0x6f, 0x67, 0x79, 0x53, 0x70, 0x72, 0x65, 0x61, 0x64, 0x43, 0x6f,
0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x78, 0x74,
0x72, 0x61, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x41, 0x72, 0x67, 0x73, 0x18,
0x2a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x12,
0x65, 0x78, 0x74, 0x72, 0x61, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x41, 0x72,
0x67, 0x73, 0x12, 0x3b, 0x0a, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e,
0x74, 0x73, 0x18, 0x31, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63,
0x74, 0x52, 0x0c, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12,
0x31, 0x0a, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x33, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73,
0x18, 0x34, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69,
0x65, 0x73, 0x12, 0x26, 0x0a, 0x0e, 0x69, 0x70, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x50, 0x6f,
0x6c, 0x69, 0x63, 0x79, 0x18, 0x35, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x70, 0x46, 0x61,
0x6d, 0x69, 0x6c, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3d, 0x0a, 0x06, 0x6d, 0x65,
0x6d, 0x6f, 0x72, 0x79, 0x18, 0x36, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x55, 0x74, 0x69, 0x6c,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x02, 0x18,
0x01, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x22, 0xb7, 0x01, 0x0a, 0x12, 0x50, 0x69,
0x6c, 0x6f, 0x74, 0x49, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x12, 0x26, 0x0a, 0x0e, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73,
0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x55, 0x0a, 0x15, 0x69, 0x6e, 0x67, 0x72,
0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4d, 0x6f, 0x64,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68,
0x61, 0x31, 0x2e, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
0x6c, 0x6c, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x15, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73,
0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x65, 0x12,
0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73, 0x43, 0x6c,
0x61, 0x73, 0x73, 0x22, 0x49, 0x0a, 0x11, 0x50, 0x69, 0x6c, 0x6f, 0x74, 0x50, 0x6f, 0x6c, 0x69,
0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62,
0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x74,
0x0a, 0x0f, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07,
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2b, 0x0a, 0x02, 0x76, 0x32, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54,
0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x56, 0x32, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x52, 0x02, 0x76, 0x32, 0x22, 0xda, 0x01, 0x0a, 0x11, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
0x72, 0x79, 0x56, 0x32, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e,
0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f,
0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64,
0x12, 0x45, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x56, 0x32, 0x50, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x72, 0x6f,
0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x12, 0x48, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b,
0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
0x79, 0x56, 0x32, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x44, 0x72, 0x69, 0x76, 0x65, 0x72, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65,
0x72, 0x22, 0x53, 0x0a, 0x1b, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x56, 0x32,
0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65,
0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x54, 0x0a, 0x1c, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65,
0x74, 0x72, 0x79, 0x56, 0x32, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x44, 0x72, 0x69, 0x76, 0x65, 0x72,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x45, 0x0a, 0x11,
0x50, 0x69, 0x6c, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x12, 0x30, 0x0a, 0x13, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x64, 0x52,
0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13,
0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x73, 0x22, 0x8d, 0x01, 0x0a, 0x0b, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18,
0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x6e,
0x6f, 0x64, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x6e,
0x6f, 0x64, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65,
0x74, 0x50, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x74, 0x61, 0x72,
0x67, 0x65, 0x74, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x63, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x63, 0x6f, 0x6c, 0x22, 0xeb, 0x08, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x6a, 0x65, 0x63,
0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x49, 0x6e, 0x6a,
0x65, 0x63, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x44, 0x6f,
0x6d, 0x61, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6c, 0x75, 0x73,
0x74, 0x65, 0x72, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x63, 0x6f, 0x6d,
0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x06,
0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x4c,
0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x42, 0x0a, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c,
0x65, 0x43, 0x6f, 0x72, 0x65, 0x44, 0x75, 0x6d, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x65, 0x6e, 0x61,
0x62, 0x6c, 0x65, 0x43, 0x6f, 0x72, 0x65, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x30, 0x0a, 0x13, 0x65,
0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x6f, 0x72,
0x74, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64,
0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x28, 0x0a,
0x0f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73,
0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49,
0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65,
0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x28, 0x0a,
0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73,
0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49,
0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65,
0x76, 0x65, 0x6c, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65,
0x76, 0x65, 0x6c, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65,
0x64, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x64, 0x12,
0x42, 0x0a, 0x1c, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x69, 0x74,
0x69, 0x61, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18,
0x14, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x1c, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73,
0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x53, 0x65, 0x63, 0x6f,
0x6e, 0x64, 0x73, 0x12, 0x36, 0x0a, 0x16, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73,
0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x15, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x16, 0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x50, 0x65,
0x72, 0x69, 0x6f, 0x64, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x3c, 0x0a, 0x19, 0x72,
0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x54,
0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19,
0x72, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x65, 0x73, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65,
0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x3a, 0x0a, 0x0c, 0x73, 0x74, 0x61,
0x72, 0x74, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x16, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
0x75, 0x70, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x72, 0x74, 0x75, 0x70,
0x50, 0x72, 0x6f, 0x62, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50,
0x6f, 0x72, 0x74, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x75,
0x73, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70,
0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x42, 0x02, 0x18,
0x01, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x06,
0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x52, 0x06,
0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x14, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64,
0x65, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x1c,
0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4f, 0x75, 0x74,
0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x35, 0x0a, 0x09, 0x6c, 0x69,
0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c,
0x65, 0x12, 0x68, 0x0a, 0x1f, 0x68, 0x6f, 0x6c, 0x64, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74,
0x61, 0x72, 0x74, 0x73, 0x18, 0x25, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f,
0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x1f, 0x68, 0x6f, 0x6c, 0x64,
0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x6e, 0x74, 0x69, 0x6c,
0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x13, 0x69,
0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x6f, 0x72,
0x74, 0x73, 0x18, 0x26, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64,
0x65, 0x49, 0x6e, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x32, 0x0a,
0x14, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64,
0x50, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x69, 0x6e, 0x63,
0x6c, 0x75, 0x64, 0x65, 0x4f, 0x75, 0x74, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x6f, 0x72, 0x74,
0x73, 0x22, 0x70, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x72, 0x74, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x62,
0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07,
0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2a, 0x0a, 0x10, 0x66, 0x61, 0x69, 0x6c, 0x75,
0x72, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
0x0d, 0x52, 0x10, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68,
0x6f, 0x6c, 0x64, 0x22, 0x5e, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x49, 0x6e, 0x69, 0x74,
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x09,
0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x13, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75,
0x72, 0x63, 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x17, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10,
0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, 0x70, 0x75,
0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x22, 0x3e, 0x0a, 0x09, 0x53, 0x44, 0x53, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x31, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x42, 0x02, 0x18,
0x01, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x60, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x72,
0x65, 0x74, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x6f, 0x75, 0x6e,
0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75,
0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x65,
0x63, 0x72, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x15, 0x53,
0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f,
0x6e, 0x66, 0x69, 0x67, 0x12, 0x58, 0x0a, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61,
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c,
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x52, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73,
0x70, 0x61, 0x63, 0x65, 0x73, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x2e,
0x0a, 0x12, 0x72, 0x65, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f,
0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x72, 0x65, 0x69, 0x6e,
0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x49,
0x0a, 0x13, 0x6e, 0x65, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x6c,
0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74,
0x72, 0x75, 0x63, 0x74, 0x52, 0x13, 0x6e, 0x65, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x6a, 0x65, 0x63,
0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x4b, 0x0a, 0x14, 0x61, 0x6c, 0x77,
0x61, 0x79, 0x73, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f,
0x72, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
0x52, 0x14, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65,
0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x4c, 0x0a, 0x13, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74,
0x65, 0x41, 0x70, 0x70, 0x48, 0x54, 0x54, 0x50, 0x50, 0x72, 0x6f, 0x62, 0x65, 0x18, 0x10, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
0x13, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x41, 0x70, 0x70, 0x48, 0x54, 0x54, 0x50, 0x50,
0x72, 0x6f, 0x62, 0x65, 0x12, 0x49, 0x0a, 0x13, 0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64,
0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x13, 0x69, 0x6e, 0x6a, 0x65,
0x63, 0x74, 0x65, 0x64, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
0x3f, 0x0a, 0x0e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f,
0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
0x52, 0x0e, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72,
0x12, 0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x52, 0x4c,
0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f,
0x6e, 0x55, 0x52, 0x4c, 0x12, 0x35, 0x0a, 0x09, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65,
0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
0x52, 0x09, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x64,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x18,
0x18, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x54, 0x65,
0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x12, 0x4e, 0x0a, 0x12, 0x75, 0x73, 0x65, 0x4c, 0x65,
0x67, 0x61, 0x63, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
0x02, 0x18, 0x01, 0x52, 0x12, 0x75, 0x73, 0x65, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x53, 0x65,
0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63,
0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61,
0x64, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x64,
0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f,
0x67, 0x12, 0x3d, 0x0a, 0x09, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, 0x70, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e,
0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x53, 0x74, 0x65, 0x70, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x73, 0x74, 0x65, 0x70,
0x12, 0x34, 0x0a, 0x06, 0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63,
0x65, 0x72, 0x5a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06,
0x7a, 0x69, 0x70, 0x6b, 0x69, 0x6e, 0x12, 0x43, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64,
0x72, 0x69, 0x76, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61,
0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b,
0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x22, 0x2f, 0x0a, 0x13, 0x54,
0x72, 0x61, 0x63, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x53, 0x0a, 0x15,
0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x4c, 0x69, 0x67, 0x68, 0x74, 0x53, 0x74, 0x65, 0x70, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12,
0x20, 0x0a, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x54, 0x6f, 0x6b, 0x65,
0x6e, 0x22, 0x2e, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5a, 0x69, 0x70, 0x6b, 0x69,
0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x22, 0xf5, 0x01, 0x0a, 0x17, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x53, 0x74, 0x61, 0x63,
0x6b, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a,
0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42,
0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x64, 0x65, 0x62, 0x75, 0x67, 0x12,
0x34, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74,
0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x15,
0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, 0x69,
0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x36, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62,
0x65, 0x72, 0x4f, 0x66, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72,
0x4f, 0x66, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3a, 0x0a,
0x18, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x18, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x96, 0x02, 0x0a, 0x0a, 0x42, 0x61,
0x73, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4a, 0x0a, 0x12, 0x65, 0x6e, 0x61, 0x62,
0x6c, 0x65, 0x43, 0x52, 0x44, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x52, 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x52, 0x44, 0x54, 0x65, 0x6d, 0x70, 0x6c,
0x61, 0x74, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x55, 0x52, 0x4c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x76, 0x61, 0x6c,
0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x52, 0x4c, 0x12, 0x50, 0x0a, 0x15, 0x65, 0x6e,
0x61, 0x62, 0x6c, 0x65, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x43,
0x52, 0x44, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x73, 0x74,
0x69, 0x6f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x43, 0x52, 0x44, 0x73, 0x12, 0x44, 0x0a, 0x0f,
0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x18,
0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
0x65, 0x52, 0x0f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x47, 0x61, 0x74, 0x65, 0x77,
0x61, 0x79, 0x22, 0x5e, 0x0a, 0x12, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x64, 0x52, 0x65, 0x6d, 0x6f,
0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x22, 0x0a, 0x0c, 0x69, 0x6e, 0x6a, 0x65,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x52, 0x4c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x52, 0x4c, 0x12, 0x24, 0x0a, 0x0d,
0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x0d, 0x69, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61,
0x74, 0x68, 0x22, 0xb2, 0x06, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x25, 0x0a,
0x03, 0x63, 0x6e, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x4e, 0x49, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
0x03, 0x63, 0x6e, 0x69, 0x12, 0x34, 0x0a, 0x08, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73,
0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61,
0x31, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x52, 0x08, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x67, 0x6c,
0x6f, 0x62, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x31, 0x61,
0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x66,
0x69, 0x67, 0x52, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x12, 0x2b, 0x0a, 0x05, 0x70, 0x69,
0x6c, 0x6f, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, 0x31, 0x61, 0x6c,
0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x69, 0x6c, 0x6f, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x52, 0x05, 0x70, 0x69, 0x6c, 0x6f, 0x74, 0x12, 0x30, 0x0a, 0x07, 0x7a, 0x74, 0x75, 0x6e, 0x6e,
0x65, 0x6c, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65,
0x52, 0x07, 0x7a, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x09, 0x74, 0x65, 0x6c,
0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76,
0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
0x72, 0x79, 0x12, 0x57, 0x0a, 0x16, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x49, 0x6e, 0x6a,
0x65, 0x63, 0x74, 0x6f, 0x72, 0x57, 0x65, 0x62, 0x68, 0x6f, 0x6f, 0x6b, 0x18, 0x0d, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x53, 0x69,
0x64, 0x65, 0x63, 0x61, 0x72, 0x49, 0x6e, 0x6a, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x52, 0x16, 0x73, 0x69, 0x64, 0x65, 0x63, 0x61, 0x72, 0x49, 0x6e, 0x6a, 0x65,
0x63, 0x74, 0x6f, 0x72, 0x57, 0x65, 0x62, 0x68, 0x6f, 0x6f, 0x6b, 0x12, 0x30, 0x0a, 0x09, 0x69,
0x73, 0x74, 0x69, 0x6f, 0x5f, 0x63, 0x6e, 0x69, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x4e, 0x49, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x52, 0x08, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x6e, 0x69, 0x12, 0x1a, 0x0a,
0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x77, 0x6e,
0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6f, 0x77,
0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x43,
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61,
0x6c, 0x75, 0x65, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
0x28, 0x0a, 0x04, 0x62, 0x61, 0x73, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x61, 0x73, 0x65, 0x43, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x52, 0x04, 0x62, 0x61, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0c, 0x69, 0x73, 0x74,
0x69, 0x6f, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1c, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x49, 0x73, 0x74, 0x69, 0x6f,
0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0c, 0x69,
0x73, 0x74, 0x69, 0x6f, 0x64, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x72,
0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x67, 0x73, 0x18, 0x27, 0x20, 0x03, 0x28,
0x09, 0x52, 0x0c, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x67, 0x73, 0x12,
0x28, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69,
0x6f, 0x6e, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
0x74, 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f,
0x66, 0x69, 0x6c, 0x65, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x12, 0x32, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69,
0x6c, 0x69, 0x74, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x2b, 0x20, 0x01, 0x28,
0x09, 0x52, 0x14, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x0a, 0x0d, 0x5a, 0x65, 0x72, 0x6f, 0x56,
0x50, 0x4e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62,
0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x16,
0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x22, 0x8c, 0x01, 0x0a, 0x0b, 0x49, 0x6e, 0x74, 0x4f, 0x72,
0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x69, 0x6e,
0x74, 0x56, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x49, 0x6e, 0x74,
0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x12,
0x34, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x56, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x73,
0x74, 0x72, 0x56, 0x61, 0x6c, 0x2a, 0x4a, 0x0a, 0x15, 0x69, 0x6e, 0x67, 0x72, 0x65, 0x73, 0x73,
0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0f,
0x0a, 0x0b, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06,
0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x46, 0x46, 0x10,
0x03, 0x2a, 0x60, 0x0a, 0x06, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x7a,
0x69, 0x70, 0x6b, 0x69, 0x6e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x6c, 0x69, 0x67, 0x68, 0x74,
0x73, 0x74, 0x65, 0x70, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f,
0x67, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x64, 0x72, 0x69, 0x76,
0x65, 0x72, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x43, 0x65, 0x6e, 0x73,
0x75, 0x73, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x6e, 0x6f, 0x6e,
0x65, 0x10, 0x05, 0x42, 0x31, 0x5a, 0x2f, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x69, 0x6f, 0x2f,
0x69, 0x73, 0x74, 0x69, 0x6f, 0x2f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x70,
0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2f, 0x76, 0x31,
0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescOnce sync.Once
file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescData = file_pkg_apis_istio_v1alpha1_values_types_proto_rawDesc
)
func file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescGZIP() []byte {
file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescOnce.Do(func() {
file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescData)
})
return file_pkg_apis_istio_v1alpha1_values_types_proto_rawDescData
}
var file_pkg_apis_istio_v1alpha1_values_types_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes = make([]protoimpl.MessageInfo, 50)
var file_pkg_apis_istio_v1alpha1_values_types_proto_goTypes = []interface{}{
(IngressControllerMode)(0), // 0: v1alpha1.ingressControllerMode
(Tracer)(0), // 1: v1alpha1.tracer
(OutboundTrafficPolicyConfig_Mode)(0), // 2: v1alpha1.OutboundTrafficPolicyConfig.Mode
(*ArchConfig)(nil), // 3: v1alpha1.ArchConfig
(*CNIConfig)(nil), // 4: v1alpha1.CNIConfig
(*CNIAmbientConfig)(nil), // 5: v1alpha1.CNIAmbientConfig
(*CNIRepairConfig)(nil), // 6: v1alpha1.CNIRepairConfig
(*ResourceQuotas)(nil), // 7: v1alpha1.ResourceQuotas
(*TargetUtilizationConfig)(nil), // 8: v1alpha1.TargetUtilizationConfig
(*Resources)(nil), // 9: v1alpha1.Resources
(*ServiceAccount)(nil), // 10: v1alpha1.ServiceAccount
(*DefaultPodDisruptionBudgetConfig)(nil), // 11: v1alpha1.DefaultPodDisruptionBudgetConfig
(*DefaultResourcesConfig)(nil), // 12: v1alpha1.DefaultResourcesConfig
(*EgressGatewayConfig)(nil), // 13: v1alpha1.EgressGatewayConfig
(*GatewaysConfig)(nil), // 14: v1alpha1.GatewaysConfig
(*GlobalConfig)(nil), // 15: v1alpha1.GlobalConfig
(*STSConfig)(nil), // 16: v1alpha1.STSConfig
(*IstiodConfig)(nil), // 17: v1alpha1.IstiodConfig
(*GlobalLoggingConfig)(nil), // 18: v1alpha1.GlobalLoggingConfig
(*IngressGatewayConfig)(nil), // 19: v1alpha1.IngressGatewayConfig
(*IngressGatewayZvpnConfig)(nil), // 20: v1alpha1.IngressGatewayZvpnConfig
(*MultiClusterConfig)(nil), // 21: v1alpha1.MultiClusterConfig
(*OutboundTrafficPolicyConfig)(nil), // 22: v1alpha1.OutboundTrafficPolicyConfig
(*PilotConfig)(nil), // 23: v1alpha1.PilotConfig
(*PilotIngressConfig)(nil), // 24: v1alpha1.PilotIngressConfig
(*PilotPolicyConfig)(nil), // 25: v1alpha1.PilotPolicyConfig
(*TelemetryConfig)(nil), // 26: v1alpha1.TelemetryConfig
(*TelemetryV2Config)(nil), // 27: v1alpha1.TelemetryV2Config
(*TelemetryV2PrometheusConfig)(nil), // 28: v1alpha1.TelemetryV2PrometheusConfig
(*TelemetryV2StackDriverConfig)(nil), // 29: v1alpha1.TelemetryV2StackDriverConfig
(*PilotConfigSource)(nil), // 30: v1alpha1.PilotConfigSource
(*PortsConfig)(nil), // 31: v1alpha1.PortsConfig
(*ProxyConfig)(nil), // 32: v1alpha1.ProxyConfig
(*StartupProbe)(nil), // 33: v1alpha1.StartupProbe
(*ProxyInitConfig)(nil), // 34: v1alpha1.ProxyInitConfig
(*ResourcesRequestsConfig)(nil), // 35: v1alpha1.ResourcesRequestsConfig
(*SDSConfig)(nil), // 36: v1alpha1.SDSConfig
(*SecretVolume)(nil), // 37: v1alpha1.SecretVolume
(*SidecarInjectorConfig)(nil), // 38: v1alpha1.SidecarInjectorConfig
(*TracerConfig)(nil), // 39: v1alpha1.TracerConfig
(*TracerDatadogConfig)(nil), // 40: v1alpha1.TracerDatadogConfig
(*TracerLightStepConfig)(nil), // 41: v1alpha1.TracerLightStepConfig
(*TracerZipkinConfig)(nil), // 42: v1alpha1.TracerZipkinConfig
(*TracerStackdriverConfig)(nil), // 43: v1alpha1.TracerStackdriverConfig
(*BaseConfig)(nil), // 44: v1alpha1.BaseConfig
(*IstiodRemoteConfig)(nil), // 45: v1alpha1.IstiodRemoteConfig
(*Values)(nil), // 46: v1alpha1.Values
(*ZeroVPNConfig)(nil), // 47: v1alpha1.ZeroVPNConfig
(*IntOrString)(nil), // 48: v1alpha1.IntOrString
nil, // 49: v1alpha1.Resources.LimitsEntry
nil, // 50: v1alpha1.Resources.RequestsEntry
nil, // 51: v1alpha1.EgressGatewayConfig.LabelsEntry
nil, // 52: v1alpha1.IngressGatewayConfig.LabelsEntry
(*wrapperspb.BoolValue)(nil), // 53: google.protobuf.BoolValue
(*structpb.Value)(nil), // 54: google.protobuf.Value
(*structpb.Struct)(nil), // 55: google.protobuf.Struct
(*durationpb.Duration)(nil), // 56: google.protobuf.Duration
(*wrapperspb.Int32Value)(nil), // 57: google.protobuf.Int32Value
(*wrapperspb.StringValue)(nil), // 58: google.protobuf.StringValue
}
var file_pkg_apis_istio_v1alpha1_values_types_proto_depIdxs = []int32{
53, // 0: v1alpha1.CNIConfig.enabled:type_name -> google.protobuf.BoolValue
54, // 1: v1alpha1.CNIConfig.tag:type_name -> google.protobuf.Value
55, // 2: v1alpha1.CNIConfig.affinity:type_name -> google.protobuf.Struct
55, // 3: v1alpha1.CNIConfig.podAnnotations:type_name -> google.protobuf.Struct
6, // 4: v1alpha1.CNIConfig.repair:type_name -> v1alpha1.CNIRepairConfig
53, // 5: v1alpha1.CNIConfig.chained:type_name -> google.protobuf.BoolValue
7, // 6: v1alpha1.CNIConfig.resource_quotas:type_name -> v1alpha1.ResourceQuotas
9, // 7: v1alpha1.CNIConfig.resources:type_name -> v1alpha1.Resources
53, // 8: v1alpha1.CNIConfig.privileged:type_name -> google.protobuf.BoolValue
55, // 9: v1alpha1.CNIConfig.seccompProfile:type_name -> google.protobuf.Struct
5, // 10: v1alpha1.CNIConfig.ambient:type_name -> v1alpha1.CNIAmbientConfig
48, // 11: v1alpha1.CNIConfig.rollingMaxUnavailable:type_name -> v1alpha1.IntOrString
53, // 12: v1alpha1.CNIAmbientConfig.enabled:type_name -> google.protobuf.BoolValue
53, // 13: v1alpha1.CNIAmbientConfig.dnsCapture:type_name -> google.protobuf.BoolValue
53, // 14: v1alpha1.CNIRepairConfig.enabled:type_name -> google.protobuf.BoolValue
54, // 15: v1alpha1.CNIRepairConfig.tag:type_name -> google.protobuf.Value
53, // 16: v1alpha1.ResourceQuotas.enabled:type_name -> google.protobuf.BoolValue
49, // 17: v1alpha1.Resources.limits:type_name -> v1alpha1.Resources.LimitsEntry
50, // 18: v1alpha1.Resources.requests:type_name -> v1alpha1.Resources.RequestsEntry
55, // 19: v1alpha1.ServiceAccount.annotations:type_name -> google.protobuf.Struct
53, // 20: v1alpha1.DefaultPodDisruptionBudgetConfig.enabled:type_name -> google.protobuf.BoolValue
35, // 21: v1alpha1.DefaultResourcesConfig.requests:type_name -> v1alpha1.ResourcesRequestsConfig
53, // 22: v1alpha1.EgressGatewayConfig.autoscaleEnabled:type_name -> google.protobuf.BoolValue
8, // 23: v1alpha1.EgressGatewayConfig.memory:type_name -> v1alpha1.TargetUtilizationConfig
8, // 24: v1alpha1.EgressGatewayConfig.cpu:type_name -> v1alpha1.TargetUtilizationConfig
53, // 25: v1alpha1.EgressGatewayConfig.customService:type_name -> google.protobuf.BoolValue
53, // 26: v1alpha1.EgressGatewayConfig.enabled:type_name -> google.protobuf.BoolValue
55, // 27: v1alpha1.EgressGatewayConfig.env:type_name -> google.protobuf.Struct
51, // 28: v1alpha1.EgressGatewayConfig.labels:type_name -> v1alpha1.EgressGatewayConfig.LabelsEntry
55, // 29: v1alpha1.EgressGatewayConfig.nodeSelector:type_name -> google.protobuf.Struct
55, // 30: v1alpha1.EgressGatewayConfig.podAnnotations:type_name -> google.protobuf.Struct
55, // 31: v1alpha1.EgressGatewayConfig.podAntiAffinityLabelSelector:type_name -> google.protobuf.Struct
55, // 32: v1alpha1.EgressGatewayConfig.podAntiAffinityTermLabelSelector:type_name -> google.protobuf.Struct
31, // 33: v1alpha1.EgressGatewayConfig.ports:type_name -> v1alpha1.PortsConfig
9, // 34: v1alpha1.EgressGatewayConfig.resources:type_name -> v1alpha1.Resources
37, // 35: v1alpha1.EgressGatewayConfig.secretVolumes:type_name -> v1alpha1.SecretVolume
55, // 36: v1alpha1.EgressGatewayConfig.serviceAnnotations:type_name -> google.protobuf.Struct
47, // 37: v1alpha1.EgressGatewayConfig.zvpn:type_name -> v1alpha1.ZeroVPNConfig
55, // 38: v1alpha1.EgressGatewayConfig.tolerations:type_name -> google.protobuf.Struct
48, // 39: v1alpha1.EgressGatewayConfig.rollingMaxSurge:type_name -> v1alpha1.IntOrString
48, // 40: v1alpha1.EgressGatewayConfig.rollingMaxUnavailable:type_name -> v1alpha1.IntOrString
55, // 41: v1alpha1.EgressGatewayConfig.configVolumes:type_name -> google.protobuf.Struct
55, // 42: v1alpha1.EgressGatewayConfig.additionalContainers:type_name -> google.protobuf.Struct
53, // 43: v1alpha1.EgressGatewayConfig.runAsRoot:type_name -> google.protobuf.BoolValue
10, // 44: v1alpha1.EgressGatewayConfig.serviceAccount:type_name -> v1alpha1.ServiceAccount
13, // 45: v1alpha1.GatewaysConfig.istio_egressgateway:type_name -> v1alpha1.EgressGatewayConfig
53, // 46: v1alpha1.GatewaysConfig.enabled:type_name -> google.protobuf.BoolValue
19, // 47: v1alpha1.GatewaysConfig.istio_ingressgateway:type_name -> v1alpha1.IngressGatewayConfig
3, // 48: v1alpha1.GlobalConfig.arch:type_name -> v1alpha1.ArchConfig
53, // 49: v1alpha1.GlobalConfig.configValidation:type_name -> google.protobuf.BoolValue
55, // 50: v1alpha1.GlobalConfig.defaultNodeSelector:type_name -> google.protobuf.Struct
11, // 51: v1alpha1.GlobalConfig.defaultPodDisruptionBudget:type_name -> v1alpha1.DefaultPodDisruptionBudgetConfig
12, // 52: v1alpha1.GlobalConfig.defaultResources:type_name -> v1alpha1.DefaultResourcesConfig
55, // 53: v1alpha1.GlobalConfig.defaultTolerations:type_name -> google.protobuf.Struct
53, // 54: v1alpha1.GlobalConfig.logAsJson:type_name -> google.protobuf.BoolValue
18, // 55: v1alpha1.GlobalConfig.logging:type_name -> v1alpha1.GlobalLoggingConfig
55, // 56: v1alpha1.GlobalConfig.meshNetworks:type_name -> google.protobuf.Struct
21, // 57: v1alpha1.GlobalConfig.multiCluster:type_name -> v1alpha1.MultiClusterConfig
53, // 58: v1alpha1.GlobalConfig.omitSidecarInjectorConfigMap:type_name -> google.protobuf.BoolValue
53, // 59: v1alpha1.GlobalConfig.oneNamespace:type_name -> google.protobuf.BoolValue
53, // 60: v1alpha1.GlobalConfig.operatorManageWebhooks:type_name -> google.protobuf.BoolValue
32, // 61: v1alpha1.GlobalConfig.proxy:type_name -> v1alpha1.ProxyConfig
34, // 62: v1alpha1.GlobalConfig.proxy_init:type_name -> v1alpha1.ProxyInitConfig
36, // 63: v1alpha1.GlobalConfig.sds:type_name -> v1alpha1.SDSConfig
54, // 64: v1alpha1.GlobalConfig.tag:type_name -> google.protobuf.Value
39, // 65: v1alpha1.GlobalConfig.tracer:type_name -> v1alpha1.TracerConfig
53, // 66: v1alpha1.GlobalConfig.useMCP:type_name -> google.protobuf.BoolValue
17, // 67: v1alpha1.GlobalConfig.istiod:type_name -> v1alpha1.IstiodConfig
16, // 68: v1alpha1.GlobalConfig.sts:type_name -> v1alpha1.STSConfig
53, // 69: v1alpha1.GlobalConfig.mountMtlsCerts:type_name -> google.protobuf.BoolValue
53, // 70: v1alpha1.GlobalConfig.externalIstiod:type_name -> google.protobuf.BoolValue
53, // 71: v1alpha1.GlobalConfig.configCluster:type_name -> google.protobuf.BoolValue
53, // 72: v1alpha1.GlobalConfig.autoscalingv2API:type_name -> google.protobuf.BoolValue
53, // 73: v1alpha1.IstiodConfig.enableAnalysis:type_name -> google.protobuf.BoolValue
53, // 74: v1alpha1.IngressGatewayConfig.autoscaleEnabled:type_name -> google.protobuf.BoolValue
8, // 75: v1alpha1.IngressGatewayConfig.memory:type_name -> v1alpha1.TargetUtilizationConfig
8, // 76: v1alpha1.IngressGatewayConfig.cpu:type_name -> v1alpha1.TargetUtilizationConfig
53, // 77: v1alpha1.IngressGatewayConfig.customService:type_name -> google.protobuf.BoolValue
53, // 78: v1alpha1.IngressGatewayConfig.enabled:type_name -> google.protobuf.BoolValue
55, // 79: v1alpha1.IngressGatewayConfig.env:type_name -> google.protobuf.Struct
52, // 80: v1alpha1.IngressGatewayConfig.labels:type_name -> v1alpha1.IngressGatewayConfig.LabelsEntry
55, // 81: v1alpha1.IngressGatewayConfig.nodeSelector:type_name -> google.protobuf.Struct
55, // 82: v1alpha1.IngressGatewayConfig.podAnnotations:type_name -> google.protobuf.Struct
55, // 83: v1alpha1.IngressGatewayConfig.podAntiAffinityLabelSelector:type_name -> google.protobuf.Struct
55, // 84: v1alpha1.IngressGatewayConfig.podAntiAffinityTermLabelSelector:type_name -> google.protobuf.Struct
31, // 85: v1alpha1.IngressGatewayConfig.ports:type_name -> v1alpha1.PortsConfig
55, // 86: v1alpha1.IngressGatewayConfig.resources:type_name -> google.protobuf.Struct
37, // 87: v1alpha1.IngressGatewayConfig.secretVolumes:type_name -> v1alpha1.SecretVolume
55, // 88: v1alpha1.IngressGatewayConfig.serviceAnnotations:type_name -> google.protobuf.Struct
20, // 89: v1alpha1.IngressGatewayConfig.zvpn:type_name -> v1alpha1.IngressGatewayZvpnConfig
48, // 90: v1alpha1.IngressGatewayConfig.rollingMaxSurge:type_name -> v1alpha1.IntOrString
48, // 91: v1alpha1.IngressGatewayConfig.rollingMaxUnavailable:type_name -> v1alpha1.IntOrString
55, // 92: v1alpha1.IngressGatewayConfig.tolerations:type_name -> google.protobuf.Struct
55, // 93: v1alpha1.IngressGatewayConfig.ingressPorts:type_name -> google.protobuf.Struct
55, // 94: v1alpha1.IngressGatewayConfig.additionalContainers:type_name -> google.protobuf.Struct
55, // 95: v1alpha1.IngressGatewayConfig.configVolumes:type_name -> google.protobuf.Struct
53, // 96: v1alpha1.IngressGatewayConfig.runAsRoot:type_name -> google.protobuf.BoolValue
10, // 97: v1alpha1.IngressGatewayConfig.serviceAccount:type_name -> v1alpha1.ServiceAccount
53, // 98: v1alpha1.IngressGatewayZvpnConfig.enabled:type_name -> google.protobuf.BoolValue
53, // 99: v1alpha1.MultiClusterConfig.enabled:type_name -> google.protobuf.BoolValue
53, // 100: v1alpha1.MultiClusterConfig.includeEnvoyFilter:type_name -> google.protobuf.BoolValue
2, // 101: v1alpha1.OutboundTrafficPolicyConfig.mode:type_name -> v1alpha1.OutboundTrafficPolicyConfig.Mode
53, // 102: v1alpha1.PilotConfig.enabled:type_name -> google.protobuf.BoolValue
53, // 103: v1alpha1.PilotConfig.autoscaleEnabled:type_name -> google.protobuf.BoolValue
55, // 104: v1alpha1.PilotConfig.autoscaleBehavior:type_name -> google.protobuf.Struct
9, // 105: v1alpha1.PilotConfig.resources:type_name -> v1alpha1.Resources
8, // 106: v1alpha1.PilotConfig.cpu:type_name -> v1alpha1.TargetUtilizationConfig
55, // 107: v1alpha1.PilotConfig.nodeSelector:type_name -> google.protobuf.Struct
56, // 108: v1alpha1.PilotConfig.keepaliveMaxServerConnectionAge:type_name -> google.protobuf.Duration
55, // 109: v1alpha1.PilotConfig.deploymentLabels:type_name -> google.protobuf.Struct
55, // 110: v1alpha1.PilotConfig.podLabels:type_name -> google.protobuf.Struct
53, // 111: v1alpha1.PilotConfig.configMap:type_name -> google.protobuf.BoolValue
53, // 112: v1alpha1.PilotConfig.useMCP:type_name -> google.protobuf.BoolValue
55, // 113: v1alpha1.PilotConfig.env:type_name -> google.protobuf.Struct
55, // 114: v1alpha1.PilotConfig.affinity:type_name -> google.protobuf.Struct
48, // 115: v1alpha1.PilotConfig.rollingMaxSurge:type_name -> v1alpha1.IntOrString
48, // 116: v1alpha1.PilotConfig.rollingMaxUnavailable:type_name -> v1alpha1.IntOrString
55, // 117: v1alpha1.PilotConfig.tolerations:type_name -> google.protobuf.Struct
53, // 118: v1alpha1.PilotConfig.enableProtocolSniffingForOutbound:type_name -> google.protobuf.BoolValue
53, // 119: v1alpha1.PilotConfig.enableProtocolSniffingForInbound:type_name -> google.protobuf.BoolValue
55, // 120: v1alpha1.PilotConfig.podAnnotations:type_name -> google.protobuf.Struct
55, // 121: v1alpha1.PilotConfig.serviceAnnotations:type_name -> google.protobuf.Struct
30, // 122: v1alpha1.PilotConfig.configSource:type_name -> v1alpha1.PilotConfigSource
54, // 123: v1alpha1.PilotConfig.tag:type_name -> google.protobuf.Value
55, // 124: v1alpha1.PilotConfig.seccompProfile:type_name -> google.protobuf.Struct
55, // 125: v1alpha1.PilotConfig.topologySpreadConstraints:type_name -> google.protobuf.Struct
55, // 126: v1alpha1.PilotConfig.extraContainerArgs:type_name -> google.protobuf.Struct
55, // 127: v1alpha1.PilotConfig.volumeMounts:type_name -> google.protobuf.Struct
55, // 128: v1alpha1.PilotConfig.volumes:type_name -> google.protobuf.Struct
8, // 129: v1alpha1.PilotConfig.memory:type_name -> v1alpha1.TargetUtilizationConfig
0, // 130: v1alpha1.PilotIngressConfig.ingressControllerMode:type_name -> v1alpha1.ingressControllerMode
53, // 131: v1alpha1.PilotPolicyConfig.enabled:type_name -> google.protobuf.BoolValue
53, // 132: v1alpha1.TelemetryConfig.enabled:type_name -> google.protobuf.BoolValue
27, // 133: v1alpha1.TelemetryConfig.v2:type_name -> v1alpha1.TelemetryV2Config
53, // 134: v1alpha1.TelemetryV2Config.enabled:type_name -> google.protobuf.BoolValue
28, // 135: v1alpha1.TelemetryV2Config.prometheus:type_name -> v1alpha1.TelemetryV2PrometheusConfig
29, // 136: v1alpha1.TelemetryV2Config.stackdriver:type_name -> v1alpha1.TelemetryV2StackDriverConfig
53, // 137: v1alpha1.TelemetryV2PrometheusConfig.enabled:type_name -> google.protobuf.BoolValue
53, // 138: v1alpha1.TelemetryV2StackDriverConfig.enabled:type_name -> google.protobuf.BoolValue
53, // 139: v1alpha1.ProxyConfig.enableCoreDump:type_name -> google.protobuf.BoolValue
53, // 140: v1alpha1.ProxyConfig.privileged:type_name -> google.protobuf.BoolValue
33, // 141: v1alpha1.ProxyConfig.startupProbe:type_name -> v1alpha1.StartupProbe
9, // 142: v1alpha1.ProxyConfig.resources:type_name -> v1alpha1.Resources
1, // 143: v1alpha1.ProxyConfig.tracer:type_name -> v1alpha1.tracer
55, // 144: v1alpha1.ProxyConfig.lifecycle:type_name -> google.protobuf.Struct
53, // 145: v1alpha1.ProxyConfig.holdApplicationUntilProxyStarts:type_name -> google.protobuf.BoolValue
53, // 146: v1alpha1.StartupProbe.enabled:type_name -> google.protobuf.BoolValue
9, // 147: v1alpha1.ProxyInitConfig.resources:type_name -> v1alpha1.Resources
55, // 148: v1alpha1.SDSConfig.token:type_name -> google.protobuf.Struct
53, // 149: v1alpha1.SidecarInjectorConfig.enableNamespacesByDefault:type_name -> google.protobuf.BoolValue
55, // 150: v1alpha1.SidecarInjectorConfig.neverInjectSelector:type_name -> google.protobuf.Struct
55, // 151: v1alpha1.SidecarInjectorConfig.alwaysInjectSelector:type_name -> google.protobuf.Struct
53, // 152: v1alpha1.SidecarInjectorConfig.rewriteAppHTTPProbe:type_name -> google.protobuf.BoolValue
55, // 153: v1alpha1.SidecarInjectorConfig.injectedAnnotations:type_name -> google.protobuf.Struct
55, // 154: v1alpha1.SidecarInjectorConfig.objectSelector:type_name -> google.protobuf.Struct
55, // 155: v1alpha1.SidecarInjectorConfig.templates:type_name -> google.protobuf.Struct
53, // 156: v1alpha1.SidecarInjectorConfig.useLegacySelectors:type_name -> google.protobuf.BoolValue
40, // 157: v1alpha1.TracerConfig.datadog:type_name -> v1alpha1.TracerDatadogConfig
41, // 158: v1alpha1.TracerConfig.lightstep:type_name -> v1alpha1.TracerLightStepConfig
42, // 159: v1alpha1.TracerConfig.zipkin:type_name -> v1alpha1.TracerZipkinConfig
43, // 160: v1alpha1.TracerConfig.stackdriver:type_name -> v1alpha1.TracerStackdriverConfig
53, // 161: v1alpha1.TracerStackdriverConfig.debug:type_name -> google.protobuf.BoolValue
53, // 162: v1alpha1.BaseConfig.enableCRDTemplates:type_name -> google.protobuf.BoolValue
53, // 163: v1alpha1.BaseConfig.enableIstioConfigCRDs:type_name -> google.protobuf.BoolValue
53, // 164: v1alpha1.BaseConfig.validateGateway:type_name -> google.protobuf.BoolValue
4, // 165: v1alpha1.Values.cni:type_name -> v1alpha1.CNIConfig
14, // 166: v1alpha1.Values.gateways:type_name -> v1alpha1.GatewaysConfig
15, // 167: v1alpha1.Values.global:type_name -> v1alpha1.GlobalConfig
23, // 168: v1alpha1.Values.pilot:type_name -> v1alpha1.PilotConfig
54, // 169: v1alpha1.Values.ztunnel:type_name -> google.protobuf.Value
26, // 170: v1alpha1.Values.telemetry:type_name -> v1alpha1.TelemetryConfig
38, // 171: v1alpha1.Values.sidecarInjectorWebhook:type_name -> v1alpha1.SidecarInjectorConfig
4, // 172: v1alpha1.Values.istio_cni:type_name -> v1alpha1.CNIConfig
54, // 173: v1alpha1.Values.meshConfig:type_name -> google.protobuf.Value
44, // 174: v1alpha1.Values.base:type_name -> v1alpha1.BaseConfig
45, // 175: v1alpha1.Values.istiodRemote:type_name -> v1alpha1.IstiodRemoteConfig
53, // 176: v1alpha1.ZeroVPNConfig.enabled:type_name -> google.protobuf.BoolValue
57, // 177: v1alpha1.IntOrString.intVal:type_name -> google.protobuf.Int32Value
58, // 178: v1alpha1.IntOrString.strVal:type_name -> google.protobuf.StringValue
179, // [179:179] is the sub-list for method output_type
179, // [179:179] is the sub-list for method input_type
179, // [179:179] is the sub-list for extension type_name
179, // [179:179] is the sub-list for extension extendee
0, // [0:179] is the sub-list for field type_name
}
func init() { file_pkg_apis_istio_v1alpha1_values_types_proto_init() }
func file_pkg_apis_istio_v1alpha1_values_types_proto_init() {
if File_pkg_apis_istio_v1alpha1_values_types_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ArchConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CNIConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CNIAmbientConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*CNIRepairConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ResourceQuotas); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TargetUtilizationConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Resources); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ServiceAccount); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DefaultPodDisruptionBudgetConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*DefaultResourcesConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EgressGatewayConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GatewaysConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GlobalConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*STSConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*IstiodConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GlobalLoggingConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*IngressGatewayConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*IngressGatewayZvpnConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MultiClusterConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*OutboundTrafficPolicyConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PilotConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PilotIngressConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PilotPolicyConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TelemetryConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TelemetryV2Config); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TelemetryV2PrometheusConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TelemetryV2StackDriverConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PilotConfigSource); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PortsConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ProxyConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StartupProbe); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ProxyInitConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ResourcesRequestsConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SDSConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SecretVolume); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SidecarInjectorConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TracerConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TracerDatadogConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TracerLightStepConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TracerZipkinConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TracerStackdriverConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*BaseConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*IstiodRemoteConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Values); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ZeroVPNConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*IntOrString); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkg_apis_istio_v1alpha1_values_types_proto_rawDesc,
NumEnums: 3,
NumMessages: 50,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_pkg_apis_istio_v1alpha1_values_types_proto_goTypes,
DependencyIndexes: file_pkg_apis_istio_v1alpha1_values_types_proto_depIdxs,
EnumInfos: file_pkg_apis_istio_v1alpha1_values_types_proto_enumTypes,
MessageInfos: file_pkg_apis_istio_v1alpha1_values_types_proto_msgTypes,
}.Build()
File_pkg_apis_istio_v1alpha1_values_types_proto = out.File
file_pkg_apis_istio_v1alpha1_values_types_proto_rawDesc = nil
file_pkg_apis_istio_v1alpha1_values_types_proto_goTypes = nil
file_pkg_apis_istio_v1alpha1_values_types_proto_depIdxs = nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IstioOperator) DeepCopyInto(out *IstioOperator) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.Spec != nil {
in.Spec.DeepCopyInto(out.Spec)
}
if in.Status != nil {
in.Status.DeepCopyInto(out.Status)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioOperator.
func (in *IstioOperator) DeepCopy() *IstioOperator {
if in == nil {
return nil
}
out := new(IstioOperator)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IstioOperator) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IstioOperatorList) DeepCopyInto(out *IstioOperatorList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]IstioOperator, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioOperatorList.
func (in *IstioOperatorList) DeepCopy() *IstioOperatorList {
if in == nil {
return nil
}
out := new(IstioOperatorList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IstioOperatorList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"sync"
"istio.io/istio/operator/pkg/metrics"
"istio.io/istio/operator/pkg/object"
)
// ObjectCache is a cache of objects,
type ObjectCache struct {
// Cache is a cache keyed by object Hash() function.
Cache map[string]*object.K8sObject
Mu *sync.RWMutex
}
var (
// objectCaches holds the latest copy of each object applied by the controller. The caches are divided by component
// name.
objectCaches = make(map[string]*ObjectCache)
objectCachesMu sync.RWMutex
)
// FlushObjectCaches flushes all object caches.
func FlushObjectCaches() {
objectCachesMu.Lock()
defer objectCachesMu.Unlock()
objectCaches = make(map[string]*ObjectCache)
metrics.CacheFlushTotal.Increment()
}
// GetCache returns the object Cache for the given name, creating one in the global Cache if needed.
func GetCache(name string) *ObjectCache {
objectCachesMu.Lock()
defer objectCachesMu.Unlock()
// Create and/or get the Cache corresponding to the CR name we're processing. Per name partitioning is required to
// prune the Cache to remove any objects not in the manifest generated for a given CR.
if objectCaches[name] == nil {
objectCaches[name] = &ObjectCache{
Cache: make(map[string]*object.K8sObject),
Mu: &sync.RWMutex{},
}
}
return objectCaches[name]
}
// RemoveObject removes object with objHash in the Cache with the given name from the object Cache.
func RemoveObject(name, objHash string) {
objectCachesMu.RLock()
objectCache := objectCaches[name]
objectCachesMu.RUnlock()
if objectCache != nil {
objectCache.Mu.Lock()
delete(objectCache.Cache, objHash)
objectCache.Mu.Unlock()
}
}
// RemoveCache removes the object Cache with the give name.
func RemoveCache(name string) {
objectCachesMu.Lock()
defer objectCachesMu.Unlock()
delete(objectCaches, name)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package component defines an in-memory representation of IstioOperator.<Feature>.<Component>. It provides functions
for manipulating the component and rendering a manifest from it.
See ../README.md for an architecture overview.
*/
package component
import (
"fmt"
"k8s.io/apimachinery/pkg/version"
"sigs.k8s.io/yaml"
"istio.io/api/operator/v1alpha1"
"istio.io/istio/operator/pkg/helm"
"istio.io/istio/operator/pkg/metrics"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/patch"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/translate"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
const (
// String to emit for any component which is disabled.
componentDisabledStr = "component is disabled."
yamlCommentStr = "#"
)
var scope = log.RegisterScope("installer", "installer")
// Options defines options for a component.
type Options struct {
// installSpec is the global IstioOperatorSpec.
InstallSpec *v1alpha1.IstioOperatorSpec
// translator is the translator for this component.
Translator *translate.Translator
// Namespace is the namespace for this component.
Namespace string
// Filter is the filenames to render
Filter sets.String
// Version is the Kubernetes version information.
Version *version.Info
}
// IstioComponent defines the interface for a component.
type IstioComponent interface {
// ComponentName returns the name of the component.
ComponentName() name.ComponentName
// ResourceName returns the name of the resources of the component.
ResourceName() string
// Namespace returns the namespace for the component.
Namespace() string
// Enabled reports whether the component is enabled.
Enabled() bool
// Run starts the component. Must be called before the component is used.
Run() error
// RenderManifest returns a string with the rendered manifest for the component.
RenderManifest() (string, error)
}
// CommonComponentFields is a struct common to all components.
type CommonComponentFields struct {
*Options
ComponentName name.ComponentName
// resourceName is the name of all resources for this component.
ResourceName string
// index is the index of the component (only used for components with multiple instances like gateways).
index int
// componentSpec for the actual component e.g. GatewaySpec, ComponentSpec.
componentSpec any
// started reports whether the component is in initialized and running.
started bool
renderer helm.TemplateRenderer
}
type IstioComponentBase struct {
*CommonComponentFields
}
func (c *IstioComponentBase) ComponentName() name.ComponentName {
return c.CommonComponentFields.ComponentName
}
func (c *IstioComponentBase) ResourceName() string {
return c.CommonComponentFields.ResourceName
}
func (c *IstioComponentBase) Namespace() string {
return c.CommonComponentFields.Namespace
}
func (c *IstioComponentBase) Enabled() bool {
if c.CommonComponentFields.ComponentName.IsGateway() {
// type assert is guaranteed to work in this context.
return c.componentSpec.(*v1alpha1.GatewaySpec).Enabled.GetValue()
}
return isCoreComponentEnabled(c.CommonComponentFields)
}
func (c *IstioComponentBase) Run() error {
return runComponent(c.CommonComponentFields)
}
func (c *IstioComponentBase) RenderManifest() (string, error) {
return renderManifest(c)
}
// NewCoreComponent creates a new IstioComponent with the given componentName and options.
func NewCoreComponent(cn name.ComponentName, opts *Options) IstioComponent {
var component IstioComponent
switch cn {
case name.IstioBaseComponentName:
component = NewCRDComponent(opts)
case name.PilotComponentName:
component = NewPilotComponent(opts)
case name.CNIComponentName:
component = NewCNIComponent(opts)
case name.IstiodRemoteComponentName:
component = NewIstiodRemoteComponent(opts)
case name.ZtunnelComponentName:
component = NewZtunnelComponent(opts)
default:
scope.Errorf("Unknown component componentName: " + string(cn))
}
return component
}
// BaseComponent is the base component.
type BaseComponent struct {
*IstioComponentBase
}
// NewCRDComponent creates a new BaseComponent and returns a pointer to it.
func NewCRDComponent(opts *Options) *BaseComponent {
return &BaseComponent{
&IstioComponentBase{
&CommonComponentFields{
Options: opts,
ComponentName: name.IstioBaseComponentName,
},
},
}
}
// PilotComponent is the pilot component.
type PilotComponent struct {
*IstioComponentBase
}
// NewPilotComponent creates a new PilotComponent and returns a pointer to it.
func NewPilotComponent(opts *Options) *PilotComponent {
cn := name.PilotComponentName
return &PilotComponent{
&IstioComponentBase{
&CommonComponentFields{
Options: opts,
ComponentName: cn,
ResourceName: opts.Translator.ComponentMaps[cn].ResourceName,
},
},
}
}
type CNIComponent struct {
*IstioComponentBase
}
// NewCNIComponent creates a new NewCNIComponent and returns a pointer to it.
func NewCNIComponent(opts *Options) *CNIComponent {
cn := name.CNIComponentName
return &CNIComponent{
&IstioComponentBase{
&CommonComponentFields{
Options: opts,
ComponentName: cn,
},
},
}
}
// IstiodRemoteComponent is the istiod remote component.
type IstiodRemoteComponent struct {
*IstioComponentBase
}
// NewIstiodRemoteComponent creates a new NewIstiodRemoteComponent and returns a pointer to it.
func NewIstiodRemoteComponent(opts *Options) *IstiodRemoteComponent {
cn := name.IstiodRemoteComponentName
return &IstiodRemoteComponent{
&IstioComponentBase{
&CommonComponentFields{
Options: opts,
ComponentName: cn,
},
},
}
}
// IngressComponent is the ingress gateway component.
type IngressComponent struct {
*IstioComponentBase
}
// NewIngressComponent creates a new IngressComponent and returns a pointer to it.
func NewIngressComponent(resourceName string, index int, spec *v1alpha1.GatewaySpec, opts *Options) *IngressComponent {
cn := name.IngressComponentName
return &IngressComponent{
&IstioComponentBase{
CommonComponentFields: &CommonComponentFields{
Options: opts,
ComponentName: cn,
ResourceName: resourceName,
index: index,
componentSpec: spec,
},
},
}
}
// EgressComponent is the egress gateway component.
type EgressComponent struct {
*IstioComponentBase
}
// NewEgressComponent creates a new IngressComponent and returns a pointer to it.
func NewEgressComponent(resourceName string, index int, spec *v1alpha1.GatewaySpec, opts *Options) *EgressComponent {
cn := name.EgressComponentName
return &EgressComponent{
&IstioComponentBase{
CommonComponentFields: &CommonComponentFields{
Options: opts,
ComponentName: cn,
index: index,
componentSpec: spec,
ResourceName: resourceName,
},
},
}
}
// ZtunnelComponent is the istio ztunnel component.
type ZtunnelComponent struct {
*IstioComponentBase
}
// NewZtunnelComponent creates a new ZtunnelComponent and returns a pointer to it.
func NewZtunnelComponent(opts *Options) *ZtunnelComponent {
return &ZtunnelComponent{
&IstioComponentBase{
&CommonComponentFields{
Options: opts,
ComponentName: name.ZtunnelComponentName,
},
},
}
}
// runComponent performs startup tasks for the component defined by the given CommonComponentFields.
func runComponent(c *CommonComponentFields) error {
r := createHelmRenderer(c)
if err := r.Run(); err != nil {
return err
}
c.renderer = r
c.started = true
return nil
}
// renderManifest renders the manifest for the component defined by c and returns the resulting string.
func renderManifest(cf *IstioComponentBase) (string, error) {
if !cf.started {
metrics.CountManifestRenderError(cf.ComponentName(), metrics.RenderNotStartedError)
return "", fmt.Errorf("component %s not started in RenderManifest", cf.CommonComponentFields.ComponentName)
}
if !cf.Enabled() {
return disabledYAMLStr(cf.ComponentName(), cf.CommonComponentFields.ResourceName), nil
}
mergedYAML, err := cf.Translator.TranslateHelmValues(cf.InstallSpec, cf.componentSpec, cf.ComponentName())
if err != nil {
metrics.CountManifestRenderError(cf.ComponentName(), metrics.HelmTranslateIOPToValuesError)
return "", err
}
scope.Debugf("Merged values:\n%s\n", mergedYAML)
my, err := cf.renderer.RenderManifestFiltered(mergedYAML, func(s string) bool {
return cf.Filter.IsEmpty() || cf.Filter.Contains(s)
})
if err != nil {
log.Errorf("Error rendering the manifest: %s", err)
metrics.CountManifestRenderError(cf.ComponentName(), metrics.HelmChartRenderError)
return "", err
}
my += helm.YAMLSeparator + "\n"
scope.Debugf("Initial manifest with merged values:\n%s\n", my)
// Add the k8s resources from IstioOperatorSpec.
my, err = cf.Translator.OverlayK8sSettings(my, cf.InstallSpec, cf.CommonComponentFields.ComponentName,
cf.CommonComponentFields.ResourceName, cf.index)
if err != nil {
metrics.CountManifestRenderError(cf.ComponentName(), metrics.K8SSettingsOverlayError)
return "", err
}
cnOutput := string(cf.CommonComponentFields.ComponentName)
my = "# Resources for " + cnOutput + " component\n\n" + my
scope.Debugf("Manifest after k8s API settings:\n%s\n", my)
// Add the k8s resource overlays from IstioOperatorSpec.
pathToK8sOverlay := fmt.Sprintf("Components.%s.", cf.CommonComponentFields.ComponentName)
if cf.CommonComponentFields.ComponentName.IsGateway() {
pathToK8sOverlay += fmt.Sprintf("%d.", cf.index)
}
pathToK8sOverlay += "K8S.Overlays"
var overlays []*v1alpha1.K8SObjectOverlay
found, err := tpath.SetFromPath(cf.InstallSpec, pathToK8sOverlay, &overlays)
if err != nil {
return "", err
}
if !found {
scope.Debugf("Manifest after resources: \n%s\n", my)
metrics.CountManifestRender(cf.ComponentName())
return my, nil
}
kyo, err := yaml.Marshal(overlays)
if err != nil {
return "", err
}
scope.Infof("Applying Kubernetes overlay: \n%s\n", kyo)
ret, err := patch.YAMLManifestPatch(my, cf.Namespace(), overlays)
if err != nil {
metrics.CountManifestRenderError(cf.ComponentName(), metrics.K8SManifestPatchError)
return "", err
}
scope.Debugf("Manifest after resources and overlay: \n%s\n", ret)
metrics.CountManifestRender(cf.ComponentName())
return ret, nil
}
// createHelmRenderer creates a helm renderer for the component defined by c and returns a ptr to it.
// If a helm subdir is not found in ComponentMap translations, it is assumed to be "addon/<component name>".
func createHelmRenderer(c *CommonComponentFields) helm.TemplateRenderer {
iop := c.InstallSpec
cns := string(c.ComponentName)
helmSubdir := c.Translator.ComponentMap(cns).HelmSubdir
return helm.NewHelmRenderer(iop.InstallPackagePath, helmSubdir, cns, c.Namespace, c.Version)
}
func isCoreComponentEnabled(c *CommonComponentFields) bool {
enabled, err := c.Translator.IsComponentEnabled(c.ComponentName, c.InstallSpec)
if err != nil {
return false
}
return enabled
}
// disabledYAMLStr returns the YAML comment string that the given component is disabled.
func disabledYAMLStr(componentName name.ComponentName, resourceName string) string {
fullName := string(componentName)
if resourceName != "" {
fullName += " " + resourceName
}
return fmt.Sprintf("%s %s %s\n", yamlCommentStr, fullName, componentDisabledStr)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controlplane
import (
"fmt"
"sort"
"k8s.io/apimachinery/pkg/version"
"istio.io/api/operator/v1alpha1"
iop "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/component"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/translate"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/pkg/util/sets"
)
// IstioControlPlane is an installation of an Istio control plane.
type IstioControlPlane struct {
// components is a slice of components that are part of the feature.
components []component.IstioComponent
started bool
}
// NewIstioControlPlane creates a new IstioControlPlane and returns a pointer to it.
func NewIstioControlPlane(
installSpec *v1alpha1.IstioOperatorSpec,
translator *translate.Translator,
filter []string,
ver *version.Info,
) (*IstioControlPlane, error) {
out := &IstioControlPlane{}
opts := &component.Options{
InstallSpec: installSpec,
Translator: translator,
Filter: sets.New(filter...),
Version: ver,
}
for _, c := range name.AllCoreComponentNames {
o := *opts
ns, err := name.Namespace(c, installSpec)
if err != nil {
return nil, err
}
o.Namespace = ns
out.components = append(out.components, component.NewCoreComponent(c, &o))
}
if installSpec.Components != nil {
for idx, c := range installSpec.Components.IngressGateways {
o := *opts
o.Namespace = defaultIfEmpty(c.Namespace, iop.Namespace(installSpec))
out.components = append(out.components, component.NewIngressComponent(c.Name, idx, c, &o))
}
for idx, c := range installSpec.Components.EgressGateways {
o := *opts
o.Namespace = defaultIfEmpty(c.Namespace, iop.Namespace(installSpec))
out.components = append(out.components, component.NewEgressComponent(c.Name, idx, c, &o))
}
}
return out, nil
}
func orderedKeys(m map[string]*v1alpha1.ExternalComponentSpec) []string {
var keys []string
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
func defaultIfEmpty(val, dflt string) string {
if val == "" {
return dflt
}
return val
}
// Run starts the Istio control plane.
func (i *IstioControlPlane) Run() error {
for _, c := range i.components {
if err := c.Run(); err != nil {
return err
}
}
i.started = true
return nil
}
// RenderManifest returns a manifest rendered against
func (i *IstioControlPlane) RenderManifest() (manifests name.ManifestMap, errsOut util.Errors) {
if !i.started {
return nil, util.NewErrs(fmt.Errorf("istioControlPlane must be Run before calling RenderManifest"))
}
manifests = make(name.ManifestMap)
for _, c := range i.components {
ms, err := c.RenderManifest()
errsOut = util.AppendErr(errsOut, err)
manifests[c.ComponentName()] = append(manifests[c.ComponentName()], ms)
}
if len(errsOut) > 0 {
return nil, errsOut
}
return
}
// componentsEqual reports whether the given components are equal to those in i.
func (i *IstioControlPlane) componentsEqual(components []component.IstioComponent) bool {
if i.components == nil && components == nil {
return true
}
if len(i.components) != len(components) {
return false
}
for c := 0; c < len(i.components); c++ {
if i.components[c].ComponentName() != components[c].ComponentName() {
return false
}
if i.components[c].Namespace() != components[c].Namespace() {
return false
}
if i.components[c].Enabled() != components[c].Enabled() {
return false
}
if i.components[c].ResourceName() != components[c].ResourceName() {
return false
}
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helm
import (
"fmt"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chartutil"
"helm.sh/helm/v3/pkg/engine"
"k8s.io/apimachinery/pkg/version"
"sigs.k8s.io/yaml"
"istio.io/istio/istioctl/pkg/install/k8sversion"
"istio.io/istio/manifests"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/pkg/log"
)
const (
// YAMLSeparator is a separator for multi-document YAML files.
YAMLSeparator = "\n---\n"
// DefaultProfileString is the name of the default profile.
DefaultProfileString = "default"
// NotesFileNameSuffix is the file name suffix for helm notes.
// see https://helm.sh/docs/chart_template_guide/notes_files/
NotesFileNameSuffix = ".txt"
)
var scope = log.RegisterScope("installer", "installer")
// TemplateFilterFunc filters templates to render by their file name
type TemplateFilterFunc func(string) bool
// TemplateRenderer defines a helm template renderer interface.
type TemplateRenderer interface {
// Run starts the renderer and should be called before using it.
Run() error
// RenderManifest renders the associated helm charts with the given values YAML string and returns the resulting
// string.
RenderManifest(values string) (string, error)
// RenderManifestFiltered filters manifests to render by template file name
RenderManifestFiltered(values string, filter TemplateFilterFunc) (string, error)
}
// NewHelmRenderer creates a new helm renderer with the given parameters and returns an interface to it.
// The format of helmBaseDir and profile strings determines the type of helm renderer returned (compiled-in, file,
// HTTP etc.)
func NewHelmRenderer(operatorDataDir, helmSubdir, componentName, namespace string, version *version.Info) TemplateRenderer {
dir := strings.Join([]string{ChartsSubdirName, helmSubdir}, "/")
return NewGenericRenderer(manifests.BuiltinOrDir(operatorDataDir), dir, componentName, namespace, version)
}
// ReadProfileYAML reads the YAML values associated with the given profile. It uses an appropriate reader for the
// profile format (compiled-in, file, HTTP, etc.).
func ReadProfileYAML(profile, manifestsPath string) (string, error) {
var err error
var globalValues string
// Get global values from profile.
switch {
case util.IsFilePath(profile):
if globalValues, err = readFile(profile); err != nil {
return "", err
}
default:
if globalValues, err = LoadValues(profile, manifestsPath); err != nil {
return "", fmt.Errorf("failed to read profile %v from %v: %v", profile, manifestsPath, err)
}
}
return globalValues, nil
}
// renderChart renders the given chart with the given values and returns the resulting YAML manifest string.
func renderChart(namespace, values string, chrt *chart.Chart, filterFunc TemplateFilterFunc, version *version.Info) (string, error) {
options := chartutil.ReleaseOptions{
Name: "istio",
Namespace: namespace,
}
valuesMap := map[string]any{}
if err := yaml.Unmarshal([]byte(values), &valuesMap); err != nil {
return "", fmt.Errorf("failed to unmarshal values: %v", err)
}
caps := *chartutil.DefaultCapabilities
// overwrite helm default capabilities
operatorVersion, _ := chartutil.ParseKubeVersion("1." + strconv.Itoa(k8sversion.MinK8SVersion) + ".0")
caps.KubeVersion = *operatorVersion
if version != nil {
caps.KubeVersion = chartutil.KubeVersion{
Version: version.GitVersion,
Major: version.Major,
Minor: version.Minor,
}
}
vals, err := chartutil.ToRenderValues(chrt, valuesMap, options, &caps)
if err != nil {
return "", err
}
if filterFunc != nil {
filteredTemplates := []*chart.File{}
for _, t := range chrt.Templates {
// Always include required templates that do not produce any output
if filterFunc(t.Name) || strings.HasSuffix(t.Name, ".tpl") || t.Name == "templates/zzz_profile.yaml" {
filteredTemplates = append(filteredTemplates, t)
}
}
chrt.Templates = filteredTemplates
}
files, err := engine.Render(chrt, vals)
crdFiles := chrt.CRDObjects()
if err != nil {
return "", err
}
if chrt.Metadata.Name == "base" {
base, _ := valuesMap["base"].(map[string]any)
if enableIstioConfigCRDs, ok := base["enableIstioConfigCRDs"].(bool); ok && !enableIstioConfigCRDs {
crdFiles = []chart.CRD{}
}
}
// Create sorted array of keys to iterate over, to stabilize the order of the rendered templates
keys := make([]string, 0, len(files))
for k := range files {
if strings.HasSuffix(k, NotesFileNameSuffix) {
continue
}
keys = append(keys, k)
}
sort.Strings(keys)
var sb strings.Builder
for i := 0; i < len(keys); i++ {
f := files[keys[i]]
// add yaml separator if the rendered file doesn't have one at the end
f = strings.TrimSpace(f) + "\n"
if !strings.HasSuffix(f, YAMLSeparator) {
f += YAMLSeparator
}
_, err := sb.WriteString(f)
if err != nil {
return "", err
}
}
// Sort crd files by name to ensure stable manifest output
sort.Slice(crdFiles, func(i, j int) bool { return crdFiles[i].Name < crdFiles[j].Name })
for _, crdFile := range crdFiles {
f := string(crdFile.File.Data)
// add yaml separator if the rendered file doesn't have one at the end
f = strings.TrimSpace(f) + "\n"
if !strings.HasSuffix(f, YAMLSeparator) {
f += YAMLSeparator
}
_, err := sb.WriteString(f)
if err != nil {
return "", err
}
}
return sb.String(), nil
}
// GenerateHubTagOverlay creates an IstioOperatorSpec overlay YAML for hub and tag.
func GenerateHubTagOverlay(hub, tag string) (string, error) {
hubTagYAMLTemplate := `
spec:
hub: {{.Hub}}
tag: {{.Tag}}
`
ts := struct {
Hub string
Tag string
}{
Hub: hub,
Tag: tag,
}
return util.RenderTemplate(hubTagYAMLTemplate, ts)
}
// DefaultFilenameForProfile returns the profile name of the default profile for the given profile.
func DefaultFilenameForProfile(profile string) string {
switch {
case util.IsFilePath(profile):
return filepath.Join(filepath.Dir(profile), DefaultProfileFilename)
default:
return DefaultProfileString
}
}
// IsDefaultProfile reports whether the given profile is the default profile.
func IsDefaultProfile(profile string) bool {
return profile == "" || profile == DefaultProfileString || filepath.Base(profile) == DefaultProfileFilename
}
func readFile(path string) (string, error) {
b, err := os.ReadFile(path)
return string(b), err
}
// GetProfileYAML returns the YAML for the given profile name, using the given profileOrPath string, which may be either
// a profile label or a file path.
func GetProfileYAML(installPackagePath, profileOrPath string) (string, error) {
if profileOrPath == "" {
profileOrPath = "default"
}
profiles, err := readProfiles(installPackagePath)
if err != nil {
return "", fmt.Errorf("failed to read profiles: %v", err)
}
// If charts are a file path and profile is a name like default, transform it to the file path.
if profiles[profileOrPath] && installPackagePath != "" {
profileOrPath = filepath.Join(installPackagePath, "profiles", profileOrPath+".yaml")
}
// This contains the IstioOperator CR.
baseCRYAML, err := ReadProfileYAML(profileOrPath, installPackagePath)
if err != nil {
return "", err
}
if !IsDefaultProfile(profileOrPath) {
// Profile definitions are relative to the default profileOrPath, so read that first.
dfn := DefaultFilenameForProfile(profileOrPath)
defaultYAML, err := ReadProfileYAML(dfn, installPackagePath)
if err != nil {
return "", err
}
baseCRYAML, err = util.OverlayIOP(defaultYAML, baseCRYAML)
if err != nil {
return "", err
}
}
return baseCRYAML, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helm
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"helm.sh/helm/v3/pkg/chart"
"helm.sh/helm/v3/pkg/chart/loader"
"k8s.io/apimachinery/pkg/version"
"istio.io/istio/manifests"
"istio.io/istio/operator/pkg/util"
)
const (
// DefaultProfileFilename is the name of the default profile yaml file.
DefaultProfileFilename = "default.yaml"
ChartsSubdirName = "charts"
profilesRoot = "profiles"
)
// Renderer is a helm template renderer for a fs.FS.
type Renderer struct {
namespace string
componentName string
chart *chart.Chart
started bool
files fs.FS
dir string
// Kubernetes cluster version
version *version.Info
}
// NewFileTemplateRenderer creates a TemplateRenderer with the given parameters and returns a pointer to it.
// helmChartDirPath must be an absolute file path to the root of the helm charts.
func NewGenericRenderer(files fs.FS, dir, componentName, namespace string, version *version.Info) *Renderer {
return &Renderer{
namespace: namespace,
componentName: componentName,
dir: dir,
files: files,
version: version,
}
}
// Run implements the TemplateRenderer interface.
func (h *Renderer) Run() error {
if err := h.loadChart(); err != nil {
return err
}
h.started = true
return nil
}
// RenderManifest renders the current helm templates with the current values and returns the resulting YAML manifest string.
func (h *Renderer) RenderManifest(values string) (string, error) {
if !h.started {
return "", fmt.Errorf("fileTemplateRenderer for %s not started in renderChart", h.componentName)
}
return renderChart(h.namespace, values, h.chart, nil, h.version)
}
// RenderManifestFiltered filters templates to render using the supplied filter function.
func (h *Renderer) RenderManifestFiltered(values string, filter TemplateFilterFunc) (string, error) {
if !h.started {
return "", fmt.Errorf("fileTemplateRenderer for %s not started in renderChart", h.componentName)
}
return renderChart(h.namespace, values, h.chart, filter, h.version)
}
func GetFilesRecursive(f fs.FS, root string) ([]string, error) {
res := []string{}
err := fs.WalkDir(f, root, func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if d.IsDir() {
return nil
}
res = append(res, path)
return nil
})
return res, err
}
// loadChart implements the TemplateRenderer interface.
func (h *Renderer) loadChart() error {
fnames, err := GetFilesRecursive(h.files, h.dir)
if err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("component %q does not exist", h.componentName)
}
return fmt.Errorf("list files: %v", err)
}
var bfs []*loader.BufferedFile
for _, fname := range fnames {
b, err := fs.ReadFile(h.files, fname)
if err != nil {
return fmt.Errorf("read file: %v", err)
}
// Helm expects unix / separator, but on windows this will be \
name := strings.ReplaceAll(stripPrefix(fname, h.dir), string(filepath.Separator), "/")
bf := &loader.BufferedFile{
Name: name,
Data: b,
}
bfs = append(bfs, bf)
scope.Debugf("Chart loaded: %s", bf.Name)
}
h.chart, err = loader.LoadFiles(bfs)
if err != nil {
return fmt.Errorf("load files: %v", err)
}
return nil
}
func builtinProfileToFilename(name string) string {
if name == "" {
return DefaultProfileFilename
}
return name + ".yaml"
}
func LoadValues(profileName string, chartsDir string) (string, error) {
path := strings.Join([]string{profilesRoot, builtinProfileToFilename(profileName)}, "/")
by, err := fs.ReadFile(manifests.BuiltinOrDir(chartsDir), path)
if err != nil {
return "", err
}
return string(by), nil
}
func readProfiles(chartsDir string) (map[string]bool, error) {
profiles := map[string]bool{}
f := manifests.BuiltinOrDir(chartsDir)
dir, err := fs.ReadDir(f, profilesRoot)
if err != nil {
return nil, err
}
for _, f := range dir {
trimmedString := strings.TrimSuffix(f.Name(), ".yaml")
if f.Name() != trimmedString {
profiles[trimmedString] = true
}
}
return profiles, nil
}
// stripPrefix removes the given prefix from prefix.
func stripPrefix(path, prefix string) string {
pl := len(strings.Split(prefix, "/"))
pv := strings.Split(path, "/")
return strings.Join(pv[pl:], "/")
}
// list all the profiles.
func ListProfiles(charts string) ([]string, error) {
profiles, err := readProfiles(charts)
if err != nil {
return nil, err
}
return util.StringBoolMapToSlice(profiles), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helmreconciler
import (
"context"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"
"istio.io/istio/operator/pkg/cache"
"istio.io/istio/operator/pkg/metrics"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/object"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/operator/pkg/util/progress"
)
const fieldOwnerOperator = "istio-operator"
// AppliedResult is the result of applying a Manifest.
type AppliedResult struct {
// processedObjects is the list of objects that were processed in this apply operation.
processedObjects object.K8sObjects
// deployed is the number of objects have been deployed which means
// it's in the cache and it's not changed from the cache.
deployed int
}
// Succeed returns true if the apply operation succeeded.
func (r AppliedResult) Succeed() bool {
return len(r.processedObjects) > 0 || r.deployed > 0
}
// ApplyManifest applies the manifest to create or update resources. It returns the processed (created or updated)
// objects and the number of objects in the manifests.
func (h *HelmReconciler) ApplyManifest(manifest name.Manifest) (result AppliedResult, _ error) {
cname := string(manifest.Name)
crHash, err := h.getCRHash(cname)
if err != nil {
return result, err
}
scope.Infof("Processing resources from manifest: %s for CR %s", cname, crHash)
allObjects, err := object.ParseK8sObjectsFromYAMLManifest(manifest.Content)
if err != nil {
return result, err
}
objectCache := cache.GetCache(crHash)
// Ensure that for a given CR crHash only one control loop uses the per-crHash cache at any time.
objectCache.Mu.Lock()
defer objectCache.Mu.Unlock()
// No further locking required beyond this point, since we have a ptr to a cache corresponding to a CR crHash and no
// other controller is allowed to work on at the same time.
var changedObjects object.K8sObjects
var changedObjectKeys []string
allObjectsMap := make(map[string]bool)
// Check which objects in the manifest have changed from those in the cache.
for _, obj := range allObjects {
oh := obj.Hash()
allObjectsMap[oh] = true
if co, ok := objectCache.Cache[oh]; ok && obj.Equal(co) {
// Object is in the cache and unchanged.
metrics.AddResource(obj.FullName(), obj.GroupVersionKind().GroupKind())
result.deployed++
continue
}
changedObjects = append(changedObjects, obj)
changedObjectKeys = append(changedObjectKeys, oh)
}
var plog *progress.ManifestLog
if len(changedObjectKeys) > 0 {
plog = h.opts.ProgressLog.NewComponent(cname)
scope.Infof("The following objects differ between generated manifest and cache: \n - %s", strings.Join(changedObjectKeys, "\n - "))
} else {
scope.Infof("Generated manifest objects are the same as cached for component %s.", cname)
}
// Objects are applied in groups: namespaces, CRDs, everything else, with wait for ready in between.
nsObjs := object.KindObjects(changedObjects, name.NamespaceStr)
crdObjs := object.KindObjects(changedObjects, name.CRDStr)
otherObjs := object.ObjectsNotInLists(changedObjects, nsObjs, crdObjs)
for _, objList := range []object.K8sObjects{nsObjs, crdObjs, otherObjs} {
// For a given group of objects, apply in sorted order of priority with no wait in between.
objList.Sort(object.DefaultObjectOrder())
for _, obj := range objList {
obju := obj.UnstructuredObject()
if err := h.applyLabelsAndAnnotations(obju, cname); err != nil {
return result, err
}
if err := h.ApplyObject(obj.UnstructuredObject()); err != nil {
plog.ReportError(err.Error())
return result, err
}
plog.ReportProgress()
metrics.AddResource(obj.FullName(), obj.GroupVersionKind().GroupKind())
result.processedObjects = append(result.processedObjects, obj)
// Update the cache with the latest object.
objectCache.Cache[obj.Hash()] = obj
}
}
// Prune anything not in the manifest out of the cache.
var removeKeys []string
for k := range objectCache.Cache {
if !allObjectsMap[k] {
removeKeys = append(removeKeys, k)
}
}
for _, k := range removeKeys {
scope.Infof("Pruning object %s from cache.", k)
delete(objectCache.Cache, k)
}
if len(changedObjectKeys) > 0 {
err := WaitForResources(result.processedObjects, h.kubeClient,
h.opts.WaitTimeout, h.opts.DryRun, plog)
if err != nil {
werr := fmt.Errorf("failed to wait for resource: %v", err)
plog.ReportError(werr.Error())
return result, werr
}
plog.ReportFinished()
}
return result, nil
}
// ApplyObject creates or updates an object in the API server depending on whether it already exists.
// It mutates obj.
func (h *HelmReconciler) ApplyObject(obj *unstructured.Unstructured) error {
if obj.GetKind() == "List" {
var errs util.Errors
list, err := obj.ToList()
if err != nil {
scope.Errorf("error converting List object: %s", err)
return err
}
for _, item := range list.Items {
err = h.ApplyObject(&item)
if err != nil {
errs = util.AppendErr(errs, err)
}
}
return errs.ToError()
}
objectStr := fmt.Sprintf("%s/%s/%s", obj.GetKind(), obj.GetNamespace(), obj.GetName())
if scope.DebugEnabled() {
scope.Debugf("Processing object:\n%s\n\n", util.ToYAML(obj))
}
if h.opts.DryRun {
scope.Infof("Not applying object %s because of dry run.", objectStr)
return nil
}
return h.serverSideApply(obj)
}
// use server-side apply, require kubernetes 1.16+
func (h *HelmReconciler) serverSideApply(obj *unstructured.Unstructured) error {
objectStr := fmt.Sprintf("%s/%s/%s", obj.GetKind(), obj.GetNamespace(), obj.GetName())
scope.Infof("using server side apply to update obj: %v", objectStr)
opts := []client.PatchOption{client.ForceOwnership, client.FieldOwner(fieldOwnerOperator)}
if err := h.client.Patch(context.TODO(), obj, client.Apply, opts...); err != nil {
return fmt.Errorf("failed to update resource with server-side apply for obj %v: %v", objectStr, err)
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helmreconciler
import (
"io"
"strings"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/pkg/log"
)
const (
// MetadataNamespace is the namespace for mesh metadata (labels, annotations)
MetadataNamespace = "install.operator.istio.io"
// OwningResourceName represents the name of the owner to which the resource relates
OwningResourceName = MetadataNamespace + "/owning-resource"
// OwningResourceNamespace represents the namespace of the owner to which the resource relates
OwningResourceNamespace = MetadataNamespace + "/owning-resource-namespace"
// OwningResourceNotPruned indicates that the resource should not be pruned during reconciliation cycles,
// note this will not prevent the resource from being deleted if the owning resource is deleted.
OwningResourceNotPruned = MetadataNamespace + "/owning-resource-not-pruned"
// operatorLabelStr indicates Istio operator is managing this resource.
operatorLabelStr = name.OperatorAPINamespace + "/managed"
// operatorReconcileStr indicates that the operator will reconcile the resource.
operatorReconcileStr = "Reconcile"
// IstioComponentLabelStr indicates which Istio component a resource belongs to.
IstioComponentLabelStr = name.OperatorAPINamespace + "/component"
// istioVersionLabelStr indicates the Istio version of the installation.
istioVersionLabelStr = name.OperatorAPINamespace + "/version"
)
var (
// TestMode sets the controller into test mode. Used for unit tests to bypass things like waiting on resources.
TestMode = false
scope = log.RegisterScope("installer", "installer")
)
func init() {
// Tree representation and wait channels are an inversion of ComponentDependencies and are constructed from it.
buildInstallTree()
}
// ComponentTree represents a tree of component dependencies.
type (
ComponentTree map[name.ComponentName]any
componentNameToListMap map[name.ComponentName][]name.ComponentName
)
var (
// ComponentDependencies is a tree of component dependencies. The semantics are ComponentDependencies[cname] gives
// the subtree of components that must wait for cname to be installed before starting installation themselves.
ComponentDependencies = componentNameToListMap{
name.PilotComponentName: {
name.CNIComponentName,
name.IngressComponentName,
name.EgressComponentName,
},
name.IstioBaseComponentName: {
name.PilotComponentName,
},
name.CNIComponentName: {
name.ZtunnelComponentName,
},
}
// InstallTree is a top down hierarchy tree of dependencies where children must wait for the parent to complete
// before starting installation.
InstallTree = make(ComponentTree)
)
// buildInstallTree builds a tree from buildInstallTree where parents are the root of each subtree.
func buildInstallTree() {
// Starting with root, recursively insert each first level child into each node.
insertChildrenRecursive(name.IstioBaseComponentName, InstallTree, ComponentDependencies)
}
func insertChildrenRecursive(componentName name.ComponentName, tree ComponentTree, children componentNameToListMap) {
tree[componentName] = make(ComponentTree)
for _, child := range children[componentName] {
insertChildrenRecursive(child, tree[componentName].(ComponentTree), children)
}
}
// InstallTreeString returns a string representation of the dependency tree.
func InstallTreeString() string {
var sb strings.Builder
buildInstallTreeString(name.IstioBaseComponentName, "", &sb)
return sb.String()
}
func buildInstallTreeString(componentName name.ComponentName, prefix string, sb io.StringWriter) {
_, _ = sb.WriteString(prefix + string(componentName) + "\n")
if _, ok := InstallTree[componentName].(ComponentTree); !ok {
return
}
for k := range InstallTree[componentName].(ComponentTree) {
buildInstallTreeString(k, prefix+" ", sb)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helmreconciler
import (
"context"
"fmt"
"strings"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/version"
"sigs.k8s.io/controller-runtime/pkg/client"
"istio.io/api/label"
"istio.io/api/operator/v1alpha1"
iopv1alpha1 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/cache"
"istio.io/istio/operator/pkg/metrics"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/object"
"istio.io/istio/operator/pkg/translate"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/proxy"
)
const (
autoscalingV2MinK8SVersion = 23
)
var (
// ClusterResources are resource types the operator prunes, ordered by which types should be deleted, first to last.
ClusterResources = []schema.GroupVersionKind{
{Group: "admissionregistration.k8s.io", Version: "v1", Kind: name.MutatingWebhookConfigurationStr},
{Group: "admissionregistration.k8s.io", Version: "v1", Kind: name.ValidatingWebhookConfigurationStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.ClusterRoleStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.ClusterRoleBindingStr},
// Cannot currently prune CRDs because this will also wipe out user config.
// {Group: "apiextensions.k8s.io", Version: "v1beta1", Kind: name.CRDStr},
}
// ClusterCPResources lists cluster scope resources types which should be deleted during uninstall command.
ClusterCPResources = []schema.GroupVersionKind{
{Group: "admissionregistration.k8s.io", Version: "v1", Kind: name.MutatingWebhookConfigurationStr},
{Group: "admissionregistration.k8s.io", Version: "v1", Kind: name.ValidatingWebhookConfigurationStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.ClusterRoleStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.ClusterRoleBindingStr},
}
// AllClusterResources lists all cluster scope resources types which should be deleted in purge case, including CRD.
AllClusterResources = append(ClusterResources,
schema.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: name.CRDStr},
schema.GroupVersionKind{Group: "k8s.cni.cncf.io", Version: "v1", Kind: name.NetworkAttachmentDefinitionStr},
)
)
// NamespacedResources gets specific pruning resources based on the k8s version
func NamespacedResources(version *version.Info) []schema.GroupVersionKind {
res := []schema.GroupVersionKind{
{Group: "apps", Version: "v1", Kind: name.DeploymentStr},
{Group: "apps", Version: "v1", Kind: name.DaemonSetStr},
{Group: "", Version: "v1", Kind: name.ServiceStr},
{Group: "", Version: "v1", Kind: name.CMStr},
{Group: "", Version: "v1", Kind: name.PodStr},
{Group: "", Version: "v1", Kind: name.SecretStr},
{Group: "", Version: "v1", Kind: name.SAStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.RoleBindingStr},
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: name.RoleStr},
{Group: "policy", Version: "v1", Kind: name.PDBStr},
}
// autoscaling v2 API is available on >=1.23
if kube.IsKubeAtLeastOrLessThanVersion(version, autoscalingV2MinK8SVersion, true) {
res = append(res, schema.GroupVersionKind{Group: "autoscaling", Version: "v2", Kind: name.HPAStr})
} else {
res = append(res, schema.GroupVersionKind{Group: "autoscaling", Version: "v2beta2", Kind: name.HPAStr})
}
return res
}
// NamespacedResources gets specific pruning resources based on the k8s version
func (h *HelmReconciler) NamespacedResources() []schema.GroupVersionKind {
clusterVersion, err := h.kubeClient.GetKubernetesVersion()
if err != nil {
scope.Warnf("Failed to get kubernetes version: %v", err)
}
return NamespacedResources(clusterVersion)
}
// Prune removes any resources not specified in manifests generated by HelmReconciler h.
func (h *HelmReconciler) Prune(manifests name.ManifestMap, all bool) error {
return h.runForAllTypes(func(labels map[string]string, objects *unstructured.UnstructuredList) error {
var errs util.Errors
if all {
errs = util.AppendErr(errs, h.deleteResources(nil, labels, "", objects, all))
} else {
for cname, manifest := range manifests.Consolidated() {
errs = util.AppendErr(errs, h.deleteResources(object.AllObjectHashes(manifest), labels, cname, objects, all))
}
}
return errs.ToError()
})
}
// PruneControlPlaneByRevisionWithController is called to remove specific control plane revision
// during reconciliation process of controller.
// It returns the install status and any error encountered.
func (h *HelmReconciler) PruneControlPlaneByRevisionWithController(iopSpec *v1alpha1.IstioOperatorSpec) (*v1alpha1.InstallStatus, error) {
ns := iopv1alpha1.Namespace(iopSpec)
if ns == "" {
ns = constants.IstioSystemNamespace
}
errStatus := &v1alpha1.InstallStatus{Status: v1alpha1.InstallStatus_ERROR}
enabledComponents, err := translate.GetEnabledComponents(iopSpec)
if err != nil {
return errStatus,
fmt.Errorf("failed to get enabled components: %v", err)
}
pilotEnabled := false
// check whether the istiod is enabled
for _, c := range enabledComponents {
if c == string(name.PilotComponentName) {
pilotEnabled = true
break
}
}
// If istiod is enabled, check if it has any proxies connected.
if pilotEnabled {
cfg := h.kubeClient.RESTConfig()
kubeClient, err := kube.NewCLIClient(kube.NewClientConfigForRestConfig(cfg), iopSpec.Revision)
if err != nil {
return errStatus, err
}
pilotExists, err := h.pilotExists(kubeClient, ns)
if err != nil {
return errStatus, fmt.Errorf("failed to check istiod extist: %v", err)
}
if pilotExists {
// TODO(ramaraochavali): Find a better alternative instead of using debug interface
// of istiod as it is typically not recommended in production environments.
pids, err := proxy.GetIDsFromProxyInfo(kubeClient, ns)
if err != nil {
return errStatus, fmt.Errorf("failed to check proxy infos: %v", err)
}
if len(pids) != 0 {
msg := fmt.Sprintf("there are proxies still pointing to the pruned control plane: %s.",
strings.Join(pids, " "))
st := &v1alpha1.InstallStatus{Status: v1alpha1.InstallStatus_ACTION_REQUIRED, Message: msg}
return st, nil
}
}
}
for _, c := range enabledComponents {
uslist, err := h.GetPrunedResources(iopSpec.Revision, false, c)
if err != nil {
return errStatus, err
}
err = h.DeleteObjectsList(uslist, c)
if err != nil {
return errStatus, err
}
}
return &v1alpha1.InstallStatus{Status: v1alpha1.InstallStatus_HEALTHY}, nil
}
func (h *HelmReconciler) pilotExists(cliClient kube.CLIClient, istioNamespace string) (bool, error) {
istiodPods, err := cliClient.GetIstioPods(context.TODO(), istioNamespace, metav1.ListOptions{
LabelSelector: "app=istiod",
FieldSelector: "status.phase=Running",
})
if err != nil {
return false, err
}
return len(istiodPods) > 0, nil
}
// DeleteObjectsList removed resources that are in the slice of UnstructuredList.
func (h *HelmReconciler) DeleteObjectsList(objectsList []*unstructured.UnstructuredList, componentName string) error {
var errs util.Errors
deletedObjects := make(map[string]bool)
for _, ul := range objectsList {
for _, o := range ul.Items {
obj := object.NewK8sObject(&o, nil, nil)
oh := obj.Hash()
// kube client does not differentiate API version when listing, added this check to deduplicate.
if deletedObjects[oh] {
continue
}
if err := h.deleteResource(obj, componentName, oh); err != nil {
errs = append(errs, err)
}
deletedObjects[oh] = true
}
}
return errs.ToError()
}
// GetPrunedResources get the list of resources to be removed
// 1. if includeClusterResources is false, we list the namespaced resources by matching revision and component labels.
// 2. if includeClusterResources is true, we list the namespaced and cluster resources by component labels only.
// If componentName is not empty, only resources associated with specific components would be returned
// UnstructuredList of objects and corresponding list of name kind hash of k8sObjects would be returned
func (h *HelmReconciler) GetPrunedResources(revision string, includeClusterResources bool, componentName string) (
[]*unstructured.UnstructuredList, error,
) {
var usList []*unstructured.UnstructuredList
labels := make(map[string]string)
if revision != "" {
labels[label.IoIstioRev.Name] = revision
}
if componentName != "" {
labels[IstioComponentLabelStr] = componentName
}
if h.iop.GetName() != "" {
labels[OwningResourceName] = h.iop.GetName()
}
if h.iop.GetNamespace() != "" {
labels[OwningResourceNamespace] = h.iop.GetNamespace()
}
selector := klabels.Set(labels).AsSelectorPreValidated()
resources := h.NamespacedResources()
gvkList := append(resources, ClusterCPResources...)
if includeClusterResources {
gvkList = append(resources, AllClusterResources...)
// Cleanup IstioOperator, which may be used with in-cluster operator.
if ioplist := h.getIstioOperatorCR(); ioplist != nil && len(ioplist.Items) > 0 {
usList = append(usList, ioplist)
}
}
for _, gvk := range gvkList {
objects := &unstructured.UnstructuredList{}
objects.SetGroupVersionKind(gvk)
componentRequirement, err := klabels.NewRequirement(IstioComponentLabelStr, selection.Exists, nil)
if err != nil {
return usList, err
}
if includeClusterResources {
s := klabels.NewSelector()
err = h.client.List(context.TODO(), objects,
client.MatchingLabelsSelector{Selector: s.Add(*componentRequirement)})
} else {
// do not prune base components or unknown components
includeCN := []string{
string(name.PilotComponentName),
string(name.IngressComponentName), string(name.EgressComponentName),
string(name.CNIComponentName), string(name.IstioOperatorComponentName),
string(name.IstiodRemoteComponentName),
string(name.ZtunnelComponentName),
}
includeRequirement, err := klabels.NewRequirement(IstioComponentLabelStr, selection.In, includeCN)
if err != nil {
return usList, err
}
if err = h.client.List(context.TODO(), objects,
client.MatchingLabelsSelector{
Selector: selector.Add(*includeRequirement, *componentRequirement),
},
); err != nil {
continue
}
}
if err != nil {
continue
}
for _, obj := range objects.Items {
objName := fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName())
metrics.AddResource(objName, gvk.GroupKind())
}
if len(objects.Items) == 0 {
continue
}
usList = append(usList, objects)
}
return usList, nil
}
// getIstioOperatorCR is a helper function to get IstioOperator CR during purge,
// otherwise the resources would be reconciled back later if there is in-cluster operator deployment.
// And it is needed to remove the IstioOperator CRD.
func (h *HelmReconciler) getIstioOperatorCR() *unstructured.UnstructuredList {
iopGVR := iopv1alpha1.IstioOperatorGVR
objects, err := h.kubeClient.Dynamic().Resource(iopGVR).List(context.TODO(), metav1.ListOptions{})
if err != nil {
if kerrors.IsNotFound(err) {
return nil
}
scope.Errorf("failed to list IstioOperator CR: %v", err)
}
return objects
}
// runForAllTypes will collect all existing resource types we care about. For each type, the callback function
// will be called with the labels used to select this type, and all objects.
// This is in internal function meant to support prune and delete
func (h *HelmReconciler) runForAllTypes(callback func(labels map[string]string, objects *unstructured.UnstructuredList) error) error {
var errs util.Errors
// Ultimately, we want to prune based on component labels. Each of these share a common set of labels
// Rather than do N List() calls for each component, we will just filter for the common subset here
// and each component will do its own filtering
// Because we are filtering by the core labels, List() will only return items that some components will care
// about, so we are not querying for an overly broad set of resources.
labels, err := h.getCoreOwnerLabels()
if err != nil {
return err
}
selector := klabels.Set(labels).AsSelectorPreValidated()
resources := append(h.NamespacedResources(), ClusterResources...)
for _, gvk := range resources {
// First, we collect all objects for the provided GVK
objects := &unstructured.UnstructuredList{}
objects.SetGroupVersionKind(gvk)
componentRequirement, err := klabels.NewRequirement(IstioComponentLabelStr, selection.Exists, nil)
if err != nil {
return err
}
selector = selector.Add(*componentRequirement)
if err := h.client.List(context.TODO(), objects, client.MatchingLabelsSelector{Selector: selector}); err != nil {
// we only want to retrieve resources clusters
if !(h.opts.DryRun && meta.IsNoMatchError(err)) {
scope.Debugf("retrieving resources to prune type %s: %s", gvk.String(), err)
}
continue
}
for _, obj := range objects.Items {
objName := fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName())
metrics.AddResource(objName, gvk.GroupKind())
}
errs = util.AppendErr(errs, callback(labels, objects))
}
return errs.ToError()
}
// deleteResources delete any resources from the given component that are not in the excluded map. Resource
// labels are used to identify the resources belonging to the component.
func (h *HelmReconciler) deleteResources(excluded map[string]bool, coreLabels map[string]string,
componentName string, objects *unstructured.UnstructuredList, all bool,
) error {
var errs util.Errors
labels := h.addComponentLabels(coreLabels, componentName)
selector := klabels.Set(labels).AsSelectorPreValidated()
for _, o := range objects.Items {
obj := object.NewK8sObject(&o, nil, nil)
oh := obj.Hash()
if !all {
// Label mismatch. Provided objects don't select against the component, so this likely means the object
// is for another component.
if !selector.Matches(klabels.Set(o.GetLabels())) {
continue
}
if excluded[oh] {
continue
}
if o.GetLabels()[OwningResourceNotPruned] == "true" {
continue
}
}
if err := h.deleteResource(obj, componentName, oh); err != nil {
errs = append(errs, err)
}
}
if all {
cache.FlushObjectCaches()
}
return errs.ToError()
}
func (h *HelmReconciler) deleteResource(obj *object.K8sObject, componentName, oh string) error {
if h.opts.DryRun {
h.opts.Log.LogAndPrintf("Not pruning object %s because of dry run.", oh)
return nil
}
u := obj.UnstructuredObject()
if u.GetKind() == name.IstioOperatorStr {
u.SetFinalizers([]string{})
if err := h.client.Patch(context.TODO(), u, client.Merge); err != nil {
scope.Errorf("failed to patch IstioOperator CR: %s, %v", u.GetName(), err)
}
}
err := h.client.Delete(context.TODO(), u, client.PropagationPolicy(metav1.DeletePropagationBackground))
scope.Debugf("Deleting %s (%s/%v)", oh, h.iop.Name, h.iop.Spec.Revision)
objGvk := u.GroupVersionKind()
if err != nil {
if !kerrors.IsNotFound(err) {
return err
}
// do not return error if resources are not found
h.opts.Log.LogAndPrintf("object: %s is not being deleted because it no longer exists", obj.Hash())
return nil
}
if componentName != "" {
h.removeFromObjectCache(componentName, oh)
} else {
cache.FlushObjectCaches()
}
metrics.ResourceDeletionTotal.
With(metrics.ResourceKindLabel.Value(util.GKString(objGvk.GroupKind()))).
Increment()
h.addPrunedKind(objGvk.GroupKind())
metrics.RemoveResource(obj.FullName(), objGvk.GroupKind())
h.opts.Log.LogAndPrintf(" Removed %s.", oh)
return nil
}
// RemoveObject removes object with objHash in componentName from the object cache.
func (h *HelmReconciler) removeFromObjectCache(componentName, objHash string) {
crHash, err := h.getCRHash(componentName)
if err != nil {
scope.Error(err.Error())
}
cache.RemoveObject(crHash, objHash)
scope.Infof("Removed object %s from Cache.", objHash)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helmreconciler
import (
"context"
"fmt"
"os"
"strings"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
"istio.io/api/label"
"istio.io/api/operator/v1alpha1"
revtag "istio.io/istio/istioctl/pkg/tag"
"istio.io/istio/istioctl/pkg/util/formatting"
istioV1Alpha1 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/helm"
"istio.io/istio/operator/pkg/metrics"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/object"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/operator/pkg/util/clog"
"istio.io/istio/operator/pkg/util/progress"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/webhook"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/analysis/local"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/gvr"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/version"
)
// HelmReconciler reconciles resources rendered by a set of helm charts.
type HelmReconciler struct {
client client.Client
kubeClient kube.Client
iop *istioV1Alpha1.IstioOperator
opts *Options
// copy of the last generated manifests.
manifests name.ManifestMap
// dependencyWaitCh is a map of signaling channels. A parent with children ch1...chN will signal
// dependencyWaitCh[ch1]...dependencyWaitCh[chN] when it's completely installed.
dependencyWaitCh map[name.ComponentName]chan struct{}
// The fields below are for metrics and reporting
countLock *sync.Mutex
prunedKindSet map[schema.GroupKind]struct{}
}
// Options are options for HelmReconciler.
type Options struct {
// DryRun executes all actions but does not write anything to the cluster.
DryRun bool
// Log is a console logger for user visible CLI output.
Log clog.Logger
// Wait determines if we will wait for resources to be fully applied. Only applies to components that have no
// dependencies.
Wait bool
// WaitTimeout controls the amount of time to wait for resources in a component to become ready before giving up.
WaitTimeout time.Duration
// Log tracks the installation progress for all components.
ProgressLog *progress.Log
// Force ignores validation errors
Force bool
// SkipPrune will skip pruning
SkipPrune bool
}
var defaultOptions = &Options{
Log: clog.NewDefaultLogger(),
ProgressLog: progress.NewLog(),
}
// NewHelmReconciler creates a HelmReconciler and returns a ptr to it
func NewHelmReconciler(client client.Client, kubeClient kube.Client, iop *istioV1Alpha1.IstioOperator, opts *Options) (*HelmReconciler, error) {
if opts == nil {
opts = defaultOptions
}
if opts.ProgressLog == nil {
opts.ProgressLog = progress.NewLog()
}
if int64(opts.WaitTimeout) == 0 {
if waitForResourcesTimeoutStr, found := os.LookupEnv("WAIT_FOR_RESOURCES_TIMEOUT"); found {
if waitForResourcesTimeout, err := time.ParseDuration(waitForResourcesTimeoutStr); err == nil {
opts.WaitTimeout = waitForResourcesTimeout
} else {
scope.Warnf("invalid env variable value: %s for 'WAIT_FOR_RESOURCES_TIMEOUT'! falling back to default value...", waitForResourcesTimeoutStr)
// fallback to default wait resource timeout
opts.WaitTimeout = defaultWaitResourceTimeout
}
} else {
// fallback to default wait resource timeout
opts.WaitTimeout = defaultWaitResourceTimeout
}
}
if iop == nil {
// allows controller code to function for cases where IOP is not provided (e.g. operator remove).
iop = &istioV1Alpha1.IstioOperator{}
iop.Spec = &v1alpha1.IstioOperatorSpec{}
}
return &HelmReconciler{
client: client,
kubeClient: kubeClient,
iop: iop,
opts: opts,
dependencyWaitCh: initDependencies(),
countLock: &sync.Mutex{},
prunedKindSet: make(map[schema.GroupKind]struct{}),
}, nil
}
// initDependencies initializes the dependencies channel tree.
func initDependencies() map[name.ComponentName]chan struct{} {
ret := make(map[name.ComponentName]chan struct{})
for _, parent := range ComponentDependencies {
for _, child := range parent {
ret[child] = make(chan struct{}, 1)
}
}
return ret
}
// Reconcile reconciles the associated resources.
func (h *HelmReconciler) Reconcile() (*v1alpha1.InstallStatus, error) {
if err := util.CreateNamespace(h.kubeClient.Kube(), istioV1Alpha1.Namespace(h.iop.Spec), h.networkName(), h.opts.DryRun); err != nil {
return nil, err
}
manifestMap, err := h.RenderCharts()
if err != nil {
return nil, err
}
err = h.analyzeWebhooks(manifestMap[name.PilotComponentName])
if err != nil {
if h.opts.Force {
scope.Error("invalid webhook configs; continuing because of --force")
} else {
return nil, err
}
}
status := h.processRecursive(manifestMap)
var pruneErr error
if !h.opts.SkipPrune && !h.opts.DryRun {
h.opts.ProgressLog.SetState(progress.StatePruning)
pruneErr = h.Prune(manifestMap, false)
h.reportPrunedObjectKind()
}
return status, pruneErr
}
// processRecursive processes the given manifests in an order of dependencies defined in h. Dependencies are a tree,
// where a child must wait for the parent to complete before starting.
func (h *HelmReconciler) processRecursive(manifests name.ManifestMap) *v1alpha1.InstallStatus {
componentStatus := make(map[string]*v1alpha1.InstallStatus_VersionStatus)
// mu protects the shared InstallStatus componentStatus across goroutines
var mu sync.Mutex
// wg waits for all manifest processing goroutines to finish
var wg sync.WaitGroup
for c, ms := range manifests {
c, ms := c, ms
wg.Add(1)
go func() {
var appliedResult AppliedResult
defer wg.Done()
if s := h.dependencyWaitCh[c]; s != nil {
scope.Infof("%s is waiting on dependency...", c)
<-s
scope.Infof("Dependency for %s has completed, proceeding.", c)
}
// Possible paths for status are RECONCILING -> {NONE, ERROR, HEALTHY}. NONE means component has no resources.
// In NONE case, the component is not shown in overall status.
mu.Lock()
setStatus(componentStatus, c, v1alpha1.InstallStatus_RECONCILING, nil)
mu.Unlock()
status := v1alpha1.InstallStatus_NONE
var err error
if len(ms) != 0 {
m := name.Manifest{
Name: c,
Content: name.MergeManifestSlices(ms),
}
appliedResult, err = h.ApplyManifest(m)
if err != nil {
status = v1alpha1.InstallStatus_ERROR
} else if appliedResult.Succeed() {
status = v1alpha1.InstallStatus_HEALTHY
}
}
mu.Lock()
setStatus(componentStatus, c, status, err)
mu.Unlock()
// Signal all the components that depend on us.
for _, ch := range ComponentDependencies[c] {
scope.Infof("Unblocking dependency %s.", ch)
h.dependencyWaitCh[ch] <- struct{}{}
}
}()
}
wg.Wait()
metrics.ReportOwnedResourceCounts()
out := &v1alpha1.InstallStatus{
Status: overallStatus(componentStatus),
ComponentStatus: componentStatus,
}
return out
}
// Delete resources associated with the custom resource instance
func (h *HelmReconciler) Delete() error {
defer func() {
metrics.ReportOwnedResourceCounts()
h.reportPrunedObjectKind()
}()
iop := h.iop
if iop.Spec.Revision == "" {
err := h.Prune(nil, true)
return err
}
// Delete IOP with revision:
// for this case we update the status field to pending if there are still proxies pointing to this revision
// and we do not prune shared resources, same effect as `istioctl uninstall --revision foo` command.
status, err := h.PruneControlPlaneByRevisionWithController(iop.Spec)
if err != nil {
return err
}
// check status here because terminating iop's status can't be updated.
if status.Status == v1alpha1.InstallStatus_ACTION_REQUIRED {
return fmt.Errorf("action is required before deleting the iop instance: %s", status.Message)
}
// updating status taking no effect for terminating resources.
if err := h.SetStatusComplete(status); err != nil {
return err
}
return nil
}
func (h *HelmReconciler) DeleteIOPInClusterIfExists(iop *istioV1Alpha1.IstioOperator) {
// Delete the previous IstioOperator CR if it exists.
objectKey := client.ObjectKeyFromObject(iop)
receiver := &unstructured.Unstructured{}
receiver.SetGroupVersionKind(istioV1Alpha1.IstioOperatorGVK)
if err := h.client.Get(context.TODO(), objectKey, receiver); err == nil {
_ = h.client.Delete(context.TODO(), receiver)
}
}
// SetStatusBegin updates the status field on the IstioOperator instance before reconciling.
func (h *HelmReconciler) SetStatusBegin() error {
isop := &istioV1Alpha1.IstioOperator{}
if err := h.getClient().Get(context.TODO(), config.NamespacedName(h.iop), isop); err != nil {
if runtime.IsNotRegisteredError(err) {
// CRD not yet installed in cluster, nothing to update.
return nil
}
return fmt.Errorf("failed to get IstioOperator before updating status due to %v", err)
}
if isop.Status == nil {
isop.Status = &v1alpha1.InstallStatus{Status: v1alpha1.InstallStatus_RECONCILING}
} else {
cs := isop.Status.ComponentStatus
for cn := range cs {
cs[cn] = &v1alpha1.InstallStatus_VersionStatus{
Status: v1alpha1.InstallStatus_RECONCILING,
}
}
isop.Status.Status = v1alpha1.InstallStatus_RECONCILING
}
return h.getClient().Status().Update(context.TODO(), isop)
}
// SetStatusComplete updates the status field on the IstioOperator instance based on the resulting err parameter.
func (h *HelmReconciler) SetStatusComplete(status *v1alpha1.InstallStatus) error {
iop := &istioV1Alpha1.IstioOperator{}
if err := h.getClient().Get(context.TODO(), config.NamespacedName(h.iop), iop); err != nil {
return fmt.Errorf("failed to get IstioOperator before updating status due to %v", err)
}
iop.Status = status
return h.getClient().Status().Update(context.TODO(), iop)
}
// setStatus sets the status for the component with the given name, which is a key in the given map.
// If the status is InstallStatus_NONE, the component name is deleted from the map.
// Otherwise, if the map key/value is missing, one is created.
func setStatus(s map[string]*v1alpha1.InstallStatus_VersionStatus, componentName name.ComponentName, status v1alpha1.InstallStatus_Status, err error) {
cn := string(componentName)
if status == v1alpha1.InstallStatus_NONE {
delete(s, cn)
return
}
if _, ok := s[cn]; !ok {
s[cn] = &v1alpha1.InstallStatus_VersionStatus{}
}
s[cn].Status = status
if err != nil {
s[cn].Error = err.Error()
}
}
// overallStatus returns the summary status over all components.
// - If all components are HEALTHY, overall status is HEALTHY.
// - If one or more components are RECONCILING and others are HEALTHY, overall status is RECONCILING.
// - If one or more components are UPDATING and others are HEALTHY, overall status is UPDATING.
// - If components are a mix of RECONCILING, UPDATING and HEALTHY, overall status is UPDATING.
// - If any component is in ERROR state, overall status is ERROR.
func overallStatus(componentStatus map[string]*v1alpha1.InstallStatus_VersionStatus) v1alpha1.InstallStatus_Status {
ret := v1alpha1.InstallStatus_HEALTHY
for _, cs := range componentStatus {
if cs.Status == v1alpha1.InstallStatus_ERROR {
ret = v1alpha1.InstallStatus_ERROR
break
} else if cs.Status == v1alpha1.InstallStatus_UPDATING {
ret = v1alpha1.InstallStatus_UPDATING
break
} else if cs.Status == v1alpha1.InstallStatus_RECONCILING {
ret = v1alpha1.InstallStatus_RECONCILING
break
}
}
return ret
}
// getCoreOwnerLabels returns a map of labels for associating installation resources. This is the common
// labels shared between all resources; see getOwnerLabels to get labels per-component labels
func (h *HelmReconciler) getCoreOwnerLabels() (map[string]string, error) {
crName, err := h.getCRName()
if err != nil {
return nil, err
}
crNamespace, err := h.getCRNamespace()
if err != nil {
return nil, err
}
labels := make(map[string]string)
labels[operatorLabelStr] = operatorReconcileStr
if crName != "" {
labels[OwningResourceName] = crName
}
if crNamespace != "" {
labels[OwningResourceNamespace] = crNamespace
}
labels[istioVersionLabelStr] = version.Info.Version
revision := ""
if h.iop != nil {
revision = h.iop.Spec.Revision
}
if revision == "" {
revision = "default"
}
labels[label.IoIstioRev.Name] = revision
return labels, nil
}
func (h *HelmReconciler) addComponentLabels(coreLabels map[string]string, componentName string) map[string]string {
labels := map[string]string{}
for k, v := range coreLabels {
labels[k] = v
}
labels[IstioComponentLabelStr] = componentName
return labels
}
// getOwnerLabels returns a map of labels for the given component name, revision and owning CR resource name.
func (h *HelmReconciler) getOwnerLabels(componentName string) (map[string]string, error) {
labels, err := h.getCoreOwnerLabels()
if err != nil {
return nil, err
}
return h.addComponentLabels(labels, componentName), nil
}
// applyLabelsAndAnnotations applies owner labels and annotations to the object.
func (h *HelmReconciler) applyLabelsAndAnnotations(obj runtime.Object, componentName string) error {
labels, err := h.getOwnerLabels(componentName)
if err != nil {
return err
}
for k, v := range labels {
err := util.SetLabel(obj, k, v)
if err != nil {
return err
}
}
return nil
}
// getCRName returns the name of the CR associated with h.
func (h *HelmReconciler) getCRName() (string, error) {
if h.iop == nil {
return "", nil
}
objAccessor, err := meta.Accessor(h.iop)
if err != nil {
return "", err
}
return objAccessor.GetName(), nil
}
// getCRHash returns the cluster unique hash of the CR associated with h.
func (h *HelmReconciler) getCRHash(componentName string) (string, error) {
crName, err := h.getCRName()
if err != nil {
return "", err
}
crNamespace, err := h.getCRNamespace()
if err != nil {
return "", err
}
var host string
if h.kubeClient != nil && h.kubeClient.RESTConfig() != nil {
host = h.kubeClient.RESTConfig().Host
}
return strings.Join([]string{crName, crNamespace, componentName, host}, "-"), nil
}
// getCRNamespace returns the namespace of the CR associated with h.
func (h *HelmReconciler) getCRNamespace() (string, error) {
if h.iop == nil {
return "", nil
}
objAccessor, err := meta.Accessor(h.iop)
if err != nil {
return "", err
}
return objAccessor.GetNamespace(), nil
}
// getClient returns the kubernetes client associated with this HelmReconciler
func (h *HelmReconciler) getClient() client.Client {
return h.client
}
func (h *HelmReconciler) addPrunedKind(gk schema.GroupKind) {
h.countLock.Lock()
defer h.countLock.Unlock()
h.prunedKindSet[gk] = struct{}{}
}
func (h *HelmReconciler) reportPrunedObjectKind() {
h.countLock.Lock()
defer h.countLock.Unlock()
for gvk := range h.prunedKindSet {
metrics.ResourcePruneTotal.
With(metrics.ResourceKindLabel.Value(util.GKString(gvk))).
Increment()
}
}
func (h *HelmReconciler) analyzeWebhooks(whs []string) error {
if len(whs) == 0 {
return nil
}
// Add webhook manifests to be applied
var localWebhookYAMLReaders []local.ReaderSource
var parsedK8sObjects object.K8sObjects
exists := revtag.PreviousInstallExists(context.Background(), h.kubeClient.Kube())
for i, wh := range whs {
k8sObjects, err := object.ParseK8sObjectsFromYAMLManifest(wh)
if err != nil {
return err
}
objYaml, err := k8sObjects.YAMLManifest()
if err != nil {
return err
}
// Here if we need to create a default tag, we need to skip the webhooks that are going to be deactivated.
if !DetectIfTagWebhookIsNeeded(h.iop, exists) {
whReaderSource := local.ReaderSource{
Name: fmt.Sprintf("installed-webhook-%d", i),
Reader: strings.NewReader(objYaml),
}
localWebhookYAMLReaders = append(localWebhookYAMLReaders, whReaderSource)
}
parsedK8sObjects = append(parsedK8sObjects, k8sObjects...)
}
sa := local.NewSourceAnalyzer(analysis.Combine("webhook", &webhook.Analyzer{
SkipServiceCheck: true,
}), resource.Namespace(h.iop.Spec.GetNamespace()), resource.Namespace(istioV1Alpha1.Namespace(h.iop.Spec)), nil)
// Add in-cluster webhooks
objects := &unstructured.UnstructuredList{}
objects.SetGroupVersionKind(gvk.MutatingWebhookConfiguration.Kubernetes())
err := h.client.List(context.Background(), objects, &client.ListOptions{})
if err != nil {
return err
}
for i, obj := range objects.Items {
objYAML, err := object.NewK8sObject(&obj, nil, nil).YAML()
if err != nil {
return err
}
whReaderSource := local.ReaderSource{
Name: fmt.Sprintf("in-cluster-webhook-%d", i),
Reader: strings.NewReader(string(objYAML)),
}
err = sa.AddReaderKubeSource([]local.ReaderSource{whReaderSource})
if err != nil {
return err
}
}
err = sa.AddReaderKubeSource(localWebhookYAMLReaders)
if err != nil {
return err
}
// Analyze webhooks
res, err := sa.Analyze(make(chan struct{}))
if err != nil {
return err
}
relevantMessages := filterOutBasedOnResources(res.Messages, parsedK8sObjects)
if len(relevantMessages) > 0 {
o, err := formatting.Print(relevantMessages, formatting.LogFormat, false)
if err != nil {
return err
}
return fmt.Errorf("creating default tag would conflict:\n%v", o)
}
return nil
}
func filterOutBasedOnResources(ms diag.Messages, resources object.K8sObjects) diag.Messages {
outputMessages := diag.Messages{}
for _, m := range ms {
for _, rs := range resources {
if rs.Name == m.Resource.Metadata.FullName.Name.String() {
outputMessages = append(outputMessages, m)
break
}
}
}
return outputMessages
}
func (h *HelmReconciler) networkName() string {
if h.iop.Spec.GetValues() == nil {
return ""
}
globalI := h.iop.Spec.Values.AsMap()["global"]
global, ok := globalI.(map[string]any)
if !ok {
return ""
}
nw, ok := global["network"].(string)
if !ok {
return ""
}
return nw
}
type ProcessDefaultWebhookOptions struct {
Namespace string
DryRun bool
}
func DetectIfTagWebhookIsNeeded(iop *istioV1Alpha1.IstioOperator, exists bool) bool {
rev := iop.Spec.Revision
isDefaultInstallation := rev == "" && iop.Spec.Components.Pilot != nil && iop.Spec.Components.Pilot.Enabled.Value
operatorManageWebhooks := operatorManageWebhooks(iop)
return !operatorManageWebhooks && (!exists || isDefaultInstallation)
}
func ProcessDefaultWebhook(client kube.Client, iop *istioV1Alpha1.IstioOperator, exists bool, opt *ProcessDefaultWebhookOptions) (processed bool, err error) {
// Detect whether previous installation exists prior to performing the installation.
if !DetectIfTagWebhookIsNeeded(iop, exists) {
return false, nil
}
rev := iop.Spec.Revision
if rev == "" {
rev = revtag.DefaultRevisionName
}
autoInjectNamespaces := validateEnableNamespacesByDefault(iop)
ignorePruneLabel := map[string]string{
OwningResourceNotPruned: "true",
}
o := &revtag.GenerateOptions{
Tag: revtag.DefaultRevisionName,
Revision: rev,
Overwrite: true,
AutoInjectNamespaces: autoInjectNamespaces,
CustomLabels: ignorePruneLabel,
Generate: opt.DryRun,
}
// If tag cannot be created could be remote cluster install, don't fail out.
tagManifests, err := revtag.Generate(context.Background(), client, o, opt.Namespace)
if err == nil && !opt.DryRun {
if err = applyManifests(client, tagManifests); err != nil {
return false, err
}
}
return true, nil
}
func applyManifests(kubeClient kube.Client, manifests string) error {
yamls := strings.Split(manifests, helm.YAMLSeparator)
for _, yml := range yamls {
if strings.TrimSpace(yml) == "" {
continue
}
obj := &unstructured.Unstructured{}
if err := yaml.Unmarshal([]byte(yml), obj); err != nil {
return fmt.Errorf("failed to unmarshal YAML: %w", err)
}
var ogvr schema.GroupVersionResource
if obj.GetKind() == name.MutatingWebhookConfigurationStr {
ogvr = gvr.MutatingWebhookConfiguration
} else if obj.GetKind() == name.ValidatingWebhookConfigurationStr {
ogvr = gvr.ValidatingWebhookConfiguration
}
t := true
_, err := kubeClient.Dynamic().Resource(ogvr).Namespace(obj.GetNamespace()).Patch(
context.TODO(), obj.GetName(), types.ApplyPatchType, []byte(yml), metav1.PatchOptions{
Force: &t,
FieldManager: fieldOwnerOperator,
})
if err != nil {
return fmt.Errorf("failed to apply YAML: %w", err)
}
}
return nil
}
// operatorManageWebhooks returns .Values.global.operatorManageWebhooks from the Istio Operator.
func operatorManageWebhooks(iop *istioV1Alpha1.IstioOperator) bool {
if iop.Spec.GetValues() == nil {
return false
}
globalValues := iop.Spec.Values.AsMap()["global"]
global, ok := globalValues.(map[string]any)
if !ok {
return false
}
omw, ok := global["operatorManageWebhooks"].(bool)
if !ok {
return false
}
return omw
}
// validateEnableNamespacesByDefault checks whether there is .Values.sidecarInjectorWebhook.enableNamespacesByDefault set in the Istio Operator.
// Should be used in installer when deciding whether to enable an automatic sidecar injection in all namespaces.
func validateEnableNamespacesByDefault(iop *istioV1Alpha1.IstioOperator) bool {
if iop == nil || iop.Spec == nil || iop.Spec.Values == nil {
return false
}
sidecarValues := iop.Spec.Values.AsMap()["sidecarInjectorWebhook"]
sidecarMap, ok := sidecarValues.(map[string]any)
if !ok {
return false
}
autoInjectNamespaces, ok := sidecarMap["enableNamespacesByDefault"].(bool)
if !ok {
return false
}
return autoInjectNamespaces
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helmreconciler
import (
"fmt"
"istio.io/istio/operator/pkg/controlplane"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/translate"
"istio.io/istio/operator/pkg/validate"
)
// RenderCharts renders charts for h.
func (h *HelmReconciler) RenderCharts() (name.ManifestMap, error) {
iopSpec := h.iop.Spec
if err := validate.CheckIstioOperatorSpec(iopSpec, false); err != nil {
if !h.opts.Force {
return nil, err
}
h.opts.Log.PrintErr(fmt.Sprintf("spec invalid; continuing because of --force: %v\n", err))
}
t := translate.NewTranslator()
ver, err := h.kubeClient.GetKubernetesVersion()
if err != nil {
return nil, err
}
cp, err := controlplane.NewIstioControlPlane(iopSpec, t, nil, ver)
if err != nil {
return nil, err
}
if err := cp.Run(); err != nil {
return nil, fmt.Errorf("failed to create Istio control plane with spec: \n%v\nerror: %s", iopSpec, err)
}
manifests, errs := cp.RenderManifest()
if errs != nil {
err = errs.ToError()
}
h.manifests = manifests
return manifests, err
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package helmreconciler
import (
"context"
"fmt"
"sort"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
kctldeployment "k8s.io/kubectl/pkg/util/deployment"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/object"
"istio.io/istio/operator/pkg/util/progress"
"istio.io/istio/pkg/kube"
)
const (
// defaultWaitResourceTimeout is the maximum wait time for all resources(namespace/deployment/pod) to be created.
defaultWaitResourceTimeout = 300 * time.Second
// cRDPollInterval is how often the state of CRDs is polled when waiting for their creation.
cRDPollInterval = 500 * time.Millisecond
// cRDPollTimeout is the maximum wait time for all CRDs to be created.
cRDPollTimeout = 60 * time.Second
)
// deployment holds associated replicaSets for a deployment
type deployment struct {
replicaSets *appsv1.ReplicaSet
deployment *appsv1.Deployment
}
// WaitForResources polls to get the current status of all pods, PVCs, and Services
// until all are ready or a timeout is reached
func WaitForResources(objects object.K8sObjects, client kube.Client,
waitTimeout time.Duration, dryRun bool, l *progress.ManifestLog,
) error {
if dryRun || TestMode {
return nil
}
if err := waitForCRDs(objects, client); err != nil {
return err
}
var notReady []string
var debugInfo map[string]string
// Check if we are ready immediately, to avoid the 2s delay below when we are already ready
if ready, _, _, err := waitForResources(objects, client.Kube(), l); err == nil && ready {
return nil
}
errPoll := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, waitTimeout, false, func(context.Context) (bool, error) {
isReady, notReadyObjects, debugInfoObjects, err := waitForResources(objects, client.Kube(), l)
notReady = notReadyObjects
debugInfo = debugInfoObjects
return isReady, err
})
messages := []string{}
for _, id := range notReady {
debug, f := debugInfo[id]
if f {
messages = append(messages, fmt.Sprintf(" %s (%s)", id, debug))
} else {
messages = append(messages, fmt.Sprintf(" %s", debug))
}
}
if errPoll != nil {
msg := fmt.Sprintf("resources not ready after %v: %v\n%s", waitTimeout, errPoll, strings.Join(messages, "\n"))
return fmt.Errorf(msg)
}
return nil
}
func waitForResources(objects object.K8sObjects, cs kubernetes.Interface, l *progress.ManifestLog) (bool, []string, map[string]string, error) {
pods := []corev1.Pod{}
deployments := []deployment{}
daemonsets := []*appsv1.DaemonSet{}
statefulsets := []*appsv1.StatefulSet{}
namespaces := []corev1.Namespace{}
for _, o := range objects {
kind := o.GroupVersionKind().Kind
switch kind {
case name.NamespaceStr:
namespace, err := cs.CoreV1().Namespaces().Get(context.TODO(), o.Name, metav1.GetOptions{})
if err != nil {
return false, nil, nil, err
}
namespaces = append(namespaces, *namespace)
case name.DeploymentStr:
currentDeployment, err := cs.AppsV1().Deployments(o.Namespace).Get(context.TODO(), o.Name, metav1.GetOptions{})
if err != nil {
return false, nil, nil, err
}
_, _, newReplicaSet, err := kctldeployment.GetAllReplicaSets(currentDeployment, cs.AppsV1())
if err != nil || newReplicaSet == nil {
return false, nil, nil, err
}
newDeployment := deployment{
newReplicaSet,
currentDeployment,
}
deployments = append(deployments, newDeployment)
case name.DaemonSetStr:
ds, err := cs.AppsV1().DaemonSets(o.Namespace).Get(context.TODO(), o.Name, metav1.GetOptions{})
if err != nil {
return false, nil, nil, err
}
daemonsets = append(daemonsets, ds)
case name.StatefulSetStr:
sts, err := cs.AppsV1().StatefulSets(o.Namespace).Get(context.TODO(), o.Name, metav1.GetOptions{})
if err != nil {
return false, nil, nil, err
}
statefulsets = append(statefulsets, sts)
}
}
resourceDebugInfo := map[string]string{}
dr, dnr := deploymentsReady(cs, deployments, resourceDebugInfo)
dsr, dsnr := daemonsetsReady(daemonsets)
stsr, stsnr := statefulsetsReady(statefulsets)
nsr, nnr := namespacesReady(namespaces)
pr, pnr := podsReady(pods)
isReady := dr && nsr && dsr && stsr && pr
notReady := append(append(append(append(nnr, dnr...), pnr...), dsnr...), stsnr...)
if !isReady {
l.ReportWaiting(notReady)
}
return isReady, notReady, resourceDebugInfo, nil
}
func waitForCRDs(objects object.K8sObjects, client kube.Client) error {
var crdNames []string
for _, o := range object.KindObjects(objects, name.CRDStr) {
crdNames = append(crdNames, o.Name)
}
if len(crdNames) == 0 {
return nil
}
errPoll := wait.PollUntilContextTimeout(context.Background(), cRDPollInterval, cRDPollTimeout, false, func(context.Context) (bool, error) {
descriptor:
for _, crdName := range crdNames {
crd, errGet := client.Ext().ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crdName, metav1.GetOptions{})
if errGet != nil {
return false, errGet
}
for _, cond := range crd.Status.Conditions {
switch cond.Type {
case apiextensions.Established:
if cond.Status == apiextensions.ConditionTrue {
scope.Infof("established CRD %s", crdName)
continue descriptor
}
case apiextensions.NamesAccepted:
if cond.Status == apiextensions.ConditionFalse {
scope.Warnf("name conflict for %v: %v", crdName, cond.Reason)
}
}
}
scope.Infof("missing status condition for %q", crdName)
return false, nil
}
return true, nil
})
if errPoll != nil {
scope.Errorf("failed to verify CRD creation; %s", errPoll)
return fmt.Errorf("failed to verify CRD creation: %s", errPoll)
}
scope.Info("Finished applying CRDs.")
return nil
}
func getPods(client kubernetes.Interface, namespace string, selector labels.Selector) ([]corev1.Pod, error) {
list, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{
LabelSelector: selector.String(),
})
return list.Items, err
}
func namespacesReady(namespaces []corev1.Namespace) (bool, []string) {
var notReady []string
for _, namespace := range namespaces {
if namespace.Status.Phase != corev1.NamespaceActive {
notReady = append(notReady, "Namespace/"+namespace.Name)
}
}
return len(notReady) == 0, notReady
}
func podsReady(pods []corev1.Pod) (bool, []string) {
var notReady []string
for _, pod := range pods {
if !isPodReady(&pod) {
notReady = append(notReady, "Pod/"+pod.Namespace+"/"+pod.Name)
}
}
return len(notReady) == 0, notReady
}
func isPodReady(pod *corev1.Pod) bool {
if len(pod.Status.Conditions) > 0 {
for _, condition := range pod.Status.Conditions {
if condition.Type == corev1.PodReady &&
condition.Status == corev1.ConditionTrue {
return true
}
}
}
return false
}
func deploymentsReady(cs kubernetes.Interface, deployments []deployment, info map[string]string) (bool, []string) {
var notReady []string
for _, v := range deployments {
if v.replicaSets.Status.ReadyReplicas >= *v.deployment.Spec.Replicas {
// Ready
continue
}
id := "Deployment/" + v.deployment.Namespace + "/" + v.deployment.Name
notReady = append(notReady, id)
failure := extractPodFailureReason(cs, v.deployment.Namespace, v.deployment.Spec.Selector)
if failure != "" {
info[id] = failure
}
}
return len(notReady) == 0, notReady
}
func extractPodFailureReason(client kubernetes.Interface, namespace string, selector *metav1.LabelSelector) string {
sel, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
return fmt.Sprintf("failed to get label selector: %v", err)
}
pods, err := getPods(client, namespace, sel)
if err != nil {
return fmt.Sprintf("failed to fetch pods: %v", err)
}
sort.Slice(pods, func(i, j int) bool {
return pods[i].CreationTimestamp.After(pods[j].CreationTimestamp.Time)
})
for _, pod := range pods {
for _, cs := range pod.Status.ContainerStatuses {
if cs.State.Waiting != nil {
return fmt.Sprintf("container failed to start: %v: %v", cs.State.Waiting.Reason, cs.State.Waiting.Message)
}
}
if c := getCondition(pod.Status.Conditions, corev1.PodReady); c != nil && c.Status == corev1.ConditionFalse {
return fmt.Sprintf(c.Message)
}
}
return ""
}
func getCondition(conditions []corev1.PodCondition, condition corev1.PodConditionType) *corev1.PodCondition {
for _, cond := range conditions {
if cond.Type == condition {
return &cond
}
}
return nil
}
func daemonsetsReady(daemonsets []*appsv1.DaemonSet) (bool, []string) {
var notReady []string
for _, ds := range daemonsets {
// Check if the wanting generation is same as the observed generation
// Only when the observed generation is the same as the generation,
// other checks will make sense. If not the same, daemon set is not
// ready
if ds.Status.ObservedGeneration != ds.Generation {
scope.Infof("DaemonSet is not ready: %s/%s. Observed generation: %d expected generation: %d",
ds.Namespace, ds.Name, ds.Status.ObservedGeneration, ds.Generation)
notReady = append(notReady, "DaemonSet/"+ds.Namespace+"/"+ds.Name)
} else {
// Make sure all the updated pods have been scheduled
if ds.Spec.UpdateStrategy.Type == appsv1.OnDeleteDaemonSetStrategyType &&
ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled {
scope.Infof("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled",
ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled)
notReady = append(notReady, "DaemonSet/"+ds.Namespace+"/"+ds.Name)
}
if ds.Spec.UpdateStrategy.Type == appsv1.RollingUpdateDaemonSetStrategyType {
if ds.Status.DesiredNumberScheduled <= 0 {
// If DesiredNumberScheduled less then or equal 0, there some cases:
// 1) daemonset is just created
// 2) daemonset desired no pod
// 3) somebody changed it manually
// All the case is not a ready signal
scope.Infof("DaemonSet is not ready: %s/%s. Initializing, no pods is running",
ds.Namespace, ds.Name)
notReady = append(notReady, "DaemonSet/"+ds.Namespace+"/"+ds.Name)
} else if ds.Status.NumberReady < ds.Status.DesiredNumberScheduled {
// Make sure every node has a ready pod
scope.Infof("DaemonSet is not ready: %s/%s. %d out of %d expected pods are ready",
ds.Namespace, ds.Name, ds.Status.NumberReady, ds.Status.UpdatedNumberScheduled)
notReady = append(notReady, "DaemonSet/"+ds.Namespace+"/"+ds.Name)
} else if ds.Status.UpdatedNumberScheduled != ds.Status.DesiredNumberScheduled {
// Make sure all the updated pods have been scheduled
scope.Infof("DaemonSet is not ready: %s/%s. %d out of %d expected pods have been scheduled",
ds.Namespace, ds.Name, ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled)
notReady = append(notReady, "DaemonSet/"+ds.Namespace+"/"+ds.Name)
}
}
}
}
return len(notReady) == 0, notReady
}
func statefulsetsReady(statefulsets []*appsv1.StatefulSet) (bool, []string) {
var notReady []string
for _, sts := range statefulsets {
// Make sure all the updated pods have been scheduled
if sts.Spec.UpdateStrategy.Type == appsv1.OnDeleteStatefulSetStrategyType &&
sts.Status.UpdatedReplicas != sts.Status.Replicas {
scope.Infof("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled",
sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, sts.Status.Replicas)
notReady = append(notReady, "StatefulSet/"+sts.Namespace+"/"+sts.Name)
}
if sts.Spec.UpdateStrategy.Type == appsv1.RollingUpdateStatefulSetStrategyType {
// Dereference all the pointers because StatefulSets like them
var partition int
// default replicas for sts is 1
replicas := 1
// the rollingUpdate field can be nil even if the update strategy is a rolling update.
if sts.Spec.UpdateStrategy.RollingUpdate != nil &&
sts.Spec.UpdateStrategy.RollingUpdate.Partition != nil {
partition = int(*sts.Spec.UpdateStrategy.RollingUpdate.Partition)
}
if sts.Spec.Replicas != nil {
replicas = int(*sts.Spec.Replicas)
}
expectedReplicas := replicas - partition
// Make sure all the updated pods have been scheduled
if int(sts.Status.UpdatedReplicas) != expectedReplicas {
scope.Infof("StatefulSet is not ready: %s/%s. %d out of %d expected pods have been scheduled",
sts.Namespace, sts.Name, sts.Status.UpdatedReplicas, expectedReplicas)
notReady = append(notReady, "StatefulSet/"+sts.Namespace+"/"+sts.Name)
continue
}
}
}
return len(notReady) == 0, notReady
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package metrics defines metrics and monitoring functionality
// used throughout operator.
package metrics
import (
"istio.io/istio/pkg/monitoring"
)
var (
// OperatorVersionLabel describes version of running binary.
OperatorVersionLabel = monitoring.CreateLabel("version")
// MergeErrorLabel describes the type of merge error.
MergeErrorLabel = monitoring.CreateLabel("error_type")
// RenderErrorLabel describes the type of the error while rendering.
RenderErrorLabel = monitoring.CreateLabel("render_error")
// CRFetchErrorReasonLabel describes the reason/HTTP code
// for failing to fetch CR.
CRFetchErrorReasonLabel = monitoring.CreateLabel("reason")
// ComponentNameLabel represents istio component name - like
// core, pilot, istio-cni etc.
ComponentNameLabel = monitoring.CreateLabel("component")
// ResourceKindLabel indicates the kind of resource owned
// or created or updated or deleted or pruned by operator.
ResourceKindLabel = monitoring.CreateLabel("kind")
// ReconcileRequestReasonLabel describes reason of reconcile request.
ReconcileRequestReasonLabel = monitoring.CreateLabel("reason")
)
// MergeErrorType describes the class of errors that could
// occur while merging profile, user supplied YAML, values
// overridden by --set and so on.
type MergeErrorType string
const (
// CannotFetchProfileError occurs when profile cannot be found.
CannotFetchProfileError MergeErrorType = "cannot_fetch_profile"
// OverlayError overlaying YAMLs to combine profile, user
// defined settings in CR, Hub-tag etc. fails.
OverlayError MergeErrorType = "overlay"
// IOPFormatError occurs when supplied CR cannot be marshaled
// or unmarshaled to/from YAML.
IOPFormatError MergeErrorType = "iop_format"
// TranslateValuesError occurs when translating from legacy API fails.
TranslateValuesError MergeErrorType = "translate_values"
// InternalYAMLParseError occurs when spec section in merged CR
// cannot be accessed for some reason (either missing or multiple).
InternalYAMLParseError MergeErrorType = "internal_yaml_parse"
)
// RenderErrorType describes the class of errors that could
// occur while rendering Kubernetes manifest from given CR.
type RenderErrorType string
const (
RenderNotStartedError RenderErrorType = "render_not_started"
// HelmTranslateIOPToValuesError describes render error where renderer for
// a component cannot create values.yaml tree from given CR.
HelmTranslateIOPToValuesError RenderErrorType = "helm_translate_iop_to_values"
// HelmChartRenderError describes error where Helm charts cannot be rendered
// for the generated values.yaml tree.
HelmChartRenderError RenderErrorType = "helm_chart_render"
// K8SSettingsOverlayError describes the K8s overlay error after
// rendering Helm charts successfully.
K8SSettingsOverlayError RenderErrorType = "k8s_settings_overlay"
// K8SManifestPatchError describes errors while patching generated manifest.
K8SManifestPatchError RenderErrorType = "k8s_manifest_patch"
)
var (
// Version is the version of the operator binary running currently.
Version = monitoring.NewGauge(
"version",
"Version of operator binary",
)
ReconcileRequestTotal = monitoring.NewSum(
"reconcile_request_total",
"Number of times requesting Reconcile",
)
// GetCRErrorTotal counts the number of times fetching
// CR fails from API server.
GetCRErrorTotal = monitoring.NewSum(
"get_cr_error_total",
"Number of times fetching CR from apiserver failed",
)
// CRMergeFailureTotal counts number of CR merge failures.
CRMergeFailureTotal = monitoring.NewSum(
"cr_merge_failure_total",
"Number of IstioOperator CR merge failures",
)
// CRDeletionTotal counts the number of times
// IstioOperator CR was deleted.
CRDeletionTotal = monitoring.NewSum(
"cr_deletion_total",
"Number of IstioOperator CR deleted",
)
// CRValidationErrorTotal counts the number of CR
// validation failures.
CRValidationErrorTotal = monitoring.NewSum(
"cr_validation_error_total",
"Number of IstioOperator CR validation failures",
)
// RenderManifestTotal counts the number of manifest
// renders at each component level.
RenderManifestTotal = monitoring.NewSum(
"render_manifest_total",
"Number of component manifests rendered",
)
// OwnedResourceTotal indicates the number of resources
// currently owned by the CR with given name and revision.
OwnedResourceTotal = monitoring.NewGauge(
"owned_resource_total",
"Number of resources currently owned by the operator",
)
// ResourceCreationTotal indicates the number of resources
// created by the operator for a CR and revision.
ResourceCreationTotal = monitoring.NewSum(
"resource_creation_total",
"Number of resources created by the operator",
)
// ResourceUpdateTotal indicates the number of resources updated by
// the operator in response to CR updates for a revision.
ResourceUpdateTotal = monitoring.NewSum(
"resource_update_total",
"Number of resources updated by the operator",
)
// ResourceDeletionTotal indicates the number of resources deleted
// by the operator in response to CR update or delete operation (like
// ingress-gateway which was enabled could be disabled and this requires
// deleting ingress-gateway deployment).
ResourceDeletionTotal = monitoring.NewSum(
"resource_deletion_total",
"Number of resources deleted by the operator",
)
// ResourcePruneTotal indicates the resources pruned as a result of update.
ResourcePruneTotal = monitoring.NewSum(
"resource_prune_total",
"Number of resources pruned by the operator",
)
// ManifestPatchErrorTotal counts the total number of K8S patch errors.
ManifestPatchErrorTotal = monitoring.NewSum(
"manifest_patch_error_total",
"Number of times K8S patch overlays failed",
)
// ManifestRenderErrorTotal counts errors occurred while rendering manifest.
ManifestRenderErrorTotal = monitoring.NewSum(
"manifest_render_error_total",
"Number of times error occurred during rendering output manifest",
)
// LegacyPathTranslationTotal counts the translations from legacy API to new one.
LegacyPathTranslationTotal = monitoring.NewSum(
"legacy_path_translation_total",
"Number of times a legacy API path is translated",
)
// CacheFlushTotal counts number of cache flushes.
CacheFlushTotal = monitoring.NewSum(
"cache_flush_total",
"number of times operator cache was flushed",
)
)
func init() {
initOperatorCrdResourceMetrics()
}
func IncrementReconcileRequest(reason string) {
ReconcileRequestTotal.With(ReconcileRequestReasonLabel.Value(reason)).Increment()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import (
"sync"
"k8s.io/apimachinery/pkg/runtime/schema"
"istio.io/istio/operator/pkg/util"
)
// resourceCounts keeps track of the number of resources owned by each
// IstioOperator resource. The reported metric is the sum across all these.
type resourceCounts struct {
mu *sync.Mutex
resources map[schema.GroupKind]map[string]struct{}
}
var rc *resourceCounts
func initOperatorCrdResourceMetrics() {
rc = &resourceCounts{
mu: &sync.Mutex{},
resources: map[schema.GroupKind]map[string]struct{}{},
}
}
// AddResource adds the resource of given kind to the set of owned objects
func AddResource(name string, gk schema.GroupKind) {
rc.mu.Lock()
defer rc.mu.Unlock()
if _, present := rc.resources[gk]; !present {
rc.resources[gk] = map[string]struct{}{}
}
rc.resources[gk][name] = struct{}{}
}
// RemoveResource removes the resource of given kind to the set of owned objects
func RemoveResource(name string, gk schema.GroupKind) {
rc.mu.Lock()
defer rc.mu.Unlock()
delete(rc.resources[gk], name)
}
// ReportOwnedResourceCounts reports the owned resource count
// metric by Group and Kind.
func ReportOwnedResourceCounts() {
rc.mu.Lock()
defer rc.mu.Unlock()
for gk, r := range rc.resources {
OwnedResourceTotal.
With(ResourceKindLabel.Value(util.GKString(gk))).
Record(float64(len(r)))
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/operator/pkg/name"
)
// CountCRMergeFail increments the count of CR merge failure
// for the given merge error type.
func CountCRMergeFail(reason MergeErrorType) {
CRMergeFailureTotal.
With(MergeErrorLabel.Value(string(reason))).
Increment()
}
// CountManifestRenderError increments the count of manifest
// render errors.
func CountManifestRenderError(cn name.ComponentName, reason RenderErrorType) {
ManifestRenderErrorTotal.
With(ComponentNameLabel.Value(string(cn))).
With(RenderErrorLabel.Value(string(reason))).
Increment()
}
// CountCRFetchFail increments the count of CR fetch failure
// for a given name and the error status.
func CountCRFetchFail(reason metav1.StatusReason) {
errorReason := string(reason)
if reason == metav1.StatusReasonUnknown {
errorReason = "unknown"
}
GetCRErrorTotal.
With(CRFetchErrorReasonLabel.Value(errorReason)).
Increment()
}
// CountManifestRender increments the count of rendered
// manifest from IstioOperator CR by component name.
func CountManifestRender(name name.ComponentName) {
RenderManifestTotal.
With(ComponentNameLabel.Value(string(name))).
Increment()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package name
import (
"fmt"
"strings"
"istio.io/api/operator/v1alpha1"
iop "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/helm"
"istio.io/istio/operator/pkg/tpath"
)
// Kubernetes Kind strings.
const (
CRDStr = "CustomResourceDefinition"
ClusterRoleStr = "ClusterRole"
ClusterRoleBindingStr = "ClusterRoleBinding"
CMStr = "ConfigMap"
DaemonSetStr = "DaemonSet"
DeploymentStr = "Deployment"
EndpointStr = "Endpoints"
HPAStr = "HorizontalPodAutoscaler"
IstioOperator = "IstioOperator"
MutatingWebhookConfigurationStr = "MutatingWebhookConfiguration"
NamespaceStr = "Namespace"
NetworkAttachmentDefinitionStr = "NetworkAttachmentDefinition"
PodStr = "Pod"
PDBStr = "PodDisruptionBudget"
ReplicaSetStr = "ReplicaSet"
RoleStr = "Role"
RoleBindingStr = "RoleBinding"
SAStr = "ServiceAccount"
ServiceStr = "Service"
SecretStr = "Secret"
StatefulSetStr = "StatefulSet"
ValidatingWebhookConfigurationStr = "ValidatingWebhookConfiguration"
)
const (
// IstioOperatorStr is the kind name of the IstioOperator CRD.
IstioOperatorStr = "IstioOperator"
// OperatorAPINamespace is the API namespace for operator config.
// TODO: move this to a base definitions file when one is created.
OperatorAPINamespace = "operator.istio.io"
// DefaultProfileName is the name of the default profile.
DefaultProfileName = "default"
)
// ComponentName is a component name string, typed to constrain allowed values.
type ComponentName string
const (
// IstioComponent names corresponding to the IstioOperator proto component names. Must be the same, since these
// are used for struct traversal.
IstioBaseComponentName ComponentName = "Base"
PilotComponentName ComponentName = "Pilot"
CNIComponentName ComponentName = "Cni"
ZtunnelComponentName ComponentName = "Ztunnel"
// istiod remote component
IstiodRemoteComponentName ComponentName = "IstiodRemote"
// Gateway components
IngressComponentName ComponentName = "IngressGateways"
EgressComponentName ComponentName = "EgressGateways"
// Operator components
IstioOperatorComponentName ComponentName = "IstioOperator"
IstioOperatorCustomResourceName ComponentName = "IstioOperatorCustomResource"
)
// ComponentNamesConfig is used for unmarshaling legacy and addon naming data.
type ComponentNamesConfig struct {
DeprecatedComponentNames []string
}
var (
AllCoreComponentNames = []ComponentName{
IstioBaseComponentName,
PilotComponentName,
CNIComponentName,
IstiodRemoteComponentName,
ZtunnelComponentName,
}
// AllComponentNames is a list of all Istio components.
AllComponentNames = append(AllCoreComponentNames, IngressComponentName, EgressComponentName,
IstioOperatorComponentName, IstioOperatorCustomResourceName)
// ValuesEnablementPathMap defines a mapping between legacy values enablement paths and the corresponding enablement
// paths in IstioOperator.
ValuesEnablementPathMap = map[string]string{
"spec.values.gateways.istio-ingressgateway.enabled": "spec.components.ingressGateways.[name:istio-ingressgateway].enabled",
"spec.values.gateways.istio-egressgateway.enabled": "spec.components.egressGateways.[name:istio-egressgateway].enabled",
}
// userFacingComponentNames are the names of components that are displayed to the user in high level CLIs
// (like progress log).
userFacingComponentNames = map[ComponentName]string{
IstioBaseComponentName: "Istio core",
PilotComponentName: "Istiod",
CNIComponentName: "CNI",
ZtunnelComponentName: "Ztunnel",
IngressComponentName: "Ingress gateways",
EgressComponentName: "Egress gateways",
IstioOperatorComponentName: "Istio operator",
IstioOperatorCustomResourceName: "Istio operator CRDs",
IstiodRemoteComponentName: "Istiod remote",
}
)
// Manifest defines a manifest for a component.
type Manifest struct {
Name ComponentName
Content string
}
// ManifestMap is a map of ComponentName to its manifest string.
type ManifestMap map[ComponentName][]string
// Consolidated returns a representation of mm where all manifests in the slice under a key are combined into a single
// manifest.
func (mm ManifestMap) Consolidated() map[string]string {
out := make(map[string]string)
for cname, ms := range mm {
allM := ""
for _, m := range ms {
allM += m + helm.YAMLSeparator
}
out[string(cname)] = allM
}
return out
}
// MergeManifestSlices merges a slice of manifests into a single manifest string.
func MergeManifestSlices(manifests []string) string {
return strings.Join(manifests, helm.YAMLSeparator)
}
// String implements the Stringer interface.
func (mm ManifestMap) String() string {
out := ""
for _, ms := range mm {
for _, m := range ms {
out += m + helm.YAMLSeparator
}
}
return out
}
// IsGateway reports whether cn is a gateway component.
func (cn ComponentName) IsGateway() bool {
return cn == IngressComponentName || cn == EgressComponentName
}
// Namespace returns the namespace for the component. It follows these rules:
// 1. If DefaultNamespace is unset, log and error and return the empty string.
// 2. If the feature and component namespaces are unset, return DefaultNamespace.
// 3. If the feature namespace is set but component name is unset, return the feature namespace.
// 4. Otherwise return the component namespace.
// Namespace assumes that controlPlaneSpec has been validated.
// TODO: remove extra validations when comfort level is high enough.
func Namespace(componentName ComponentName, controlPlaneSpec *v1alpha1.IstioOperatorSpec) (string, error) {
defaultNamespace := iop.Namespace(controlPlaneSpec)
componentNodeI, found, err := tpath.GetFromStructPath(controlPlaneSpec, "Components."+string(componentName)+".Namespace")
if err != nil {
return "", fmt.Errorf("error in Namespace GetFromStructPath componentNamespace for component=%s: %s", componentName, err)
}
if !found {
return defaultNamespace, nil
}
if componentNodeI == nil {
return defaultNamespace, nil
}
componentNamespace, ok := componentNodeI.(string)
if !ok {
return "", fmt.Errorf("component %s enabled has bad type %T, expect string", componentName, componentNodeI)
}
if componentNamespace == "" {
return defaultNamespace, nil
}
return componentNamespace, nil
}
// TitleCase returns a capitalized version of n.
func TitleCase(n ComponentName) ComponentName {
s := string(n)
return ComponentName(strings.ToUpper(s[0:1]) + s[1:])
}
// UserFacingComponentName returns the name of the given component that should be displayed to the user in high
// level CLIs (like progress log).
func UserFacingComponentName(name ComponentName) string {
ret, ok := userFacingComponentNames[name]
if !ok {
return "Unknown"
}
return ret
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package manifest provides functions for going between in-memory k8s objects (unstructured.Unstructured) and their JSON
or YAML representations.
*/
package object
import (
"bytes"
"fmt"
"io"
"sort"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/intstr"
k8syaml "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
"istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/helm"
names "istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/pkg/log"
)
const (
// YAMLSeparator is a separator for multi-document YAML files.
YAMLSeparator = "\n---\n"
)
// K8sObject is an in-memory representation of a k8s object, used for moving between different representations
// (Unstructured, JSON, YAML) with cached rendering.
type K8sObject struct {
object *unstructured.Unstructured
Group string
Kind string
Name string
Namespace string
json []byte
yaml []byte
}
// NewK8sObject creates a new K8sObject and returns a ptr to it.
func NewK8sObject(u *unstructured.Unstructured, json, yaml []byte) *K8sObject {
o := &K8sObject{
object: u,
json: json,
yaml: yaml,
}
gvk := u.GetObjectKind().GroupVersionKind()
o.Group = gvk.Group
o.Kind = gvk.Kind
o.Name = u.GetName()
o.Namespace = u.GetNamespace()
return o
}
// Hash returns a unique, insecure hash based on kind, namespace and name.
func Hash(kind, namespace, name string) string {
switch kind {
case names.ClusterRoleStr, names.ClusterRoleBindingStr:
namespace = ""
}
return strings.Join([]string{kind, namespace, name}, ":")
}
// FromHash parses kind, namespace and name from a hash.
func FromHash(hash string) (kind, namespace, name string) {
hv := strings.Split(hash, ":")
if len(hv) != 3 {
return "Bad hash string: " + hash, "", ""
}
kind, namespace, name = hv[0], hv[1], hv[2]
return
}
// HashNameKind returns a unique, insecure hash based on kind and name.
func HashNameKind(kind, name string) string {
return strings.Join([]string{kind, name}, ":")
}
// ParseJSONToK8sObject parses JSON to an K8sObject.
func ParseJSONToK8sObject(json []byte) (*K8sObject, error) {
o, _, err := unstructured.UnstructuredJSONScheme.Decode(json, nil, nil)
if err != nil {
return nil, fmt.Errorf("error parsing json into unstructured object: %v", err)
}
u, ok := o.(*unstructured.Unstructured)
if !ok {
return nil, fmt.Errorf("parsed unexpected type %T", o)
}
return NewK8sObject(u, json, nil), nil
}
// ParseYAMLToK8sObject parses YAML to an Object.
func ParseYAMLToK8sObject(yaml []byte) (*K8sObject, error) {
objects, err := ParseK8sObjectsFromYAMLManifest(string(yaml))
if err != nil {
return nil, err
}
if len(objects) > 1 {
return nil, fmt.Errorf("expect one object, actually: %d", len(objects))
}
if len(objects) == 0 || objects[0] == nil {
return nil, fmt.Errorf("decoding object %v: %v", string(yaml), "no object found")
}
return objects[0], nil
}
// UnstructuredObject exposes the raw object, primarily for testing
func (o *K8sObject) UnstructuredObject() *unstructured.Unstructured {
return o.object
}
// ResolveK8sConflict - This method resolves k8s object possible
// conflicting settings. Which K8sObjects may need such method
// depends on the type of the K8sObject.
func (o *K8sObject) ResolveK8sConflict() *K8sObject {
if o.Kind == names.PDBStr {
return resolvePDBConflict(o)
}
return o
}
// Unstructured exposes the raw object content, primarily for testing
func (o *K8sObject) Unstructured() map[string]any {
return o.UnstructuredObject().UnstructuredContent()
}
// Container returns a container subtree for Deployment objects if one is found, or nil otherwise.
func (o *K8sObject) Container(name string) map[string]any {
u := o.Unstructured()
path := fmt.Sprintf("spec.template.spec.containers.[name:%s]", name)
node, f, err := tpath.GetPathContext(u, util.PathFromString(path), false)
if err == nil && f {
// Must be the type from the schema.
return node.Node.(map[string]any)
}
return nil
}
// GroupVersionKind returns the GroupVersionKind for the K8sObject
func (o *K8sObject) GroupVersionKind() schema.GroupVersionKind {
return o.object.GroupVersionKind()
}
// Version returns the APIVersion of the K8sObject
func (o *K8sObject) Version() string {
return o.object.GetAPIVersion()
}
// Hash returns a unique hash for the K8sObject
func (o *K8sObject) Hash() string {
return Hash(o.Kind, o.Namespace, o.Name)
}
// HashNameKind returns a hash for the K8sObject based on the name and kind only.
func (o *K8sObject) HashNameKind() string {
return HashNameKind(o.Kind, o.Name)
}
// JSON returns a JSON representation of the K8sObject, using an internal cache.
func (o *K8sObject) JSON() ([]byte, error) {
if o.json != nil {
return o.json, nil
}
b, err := o.object.MarshalJSON()
if err != nil {
return nil, err
}
return b, nil
}
// YAML returns a YAML representation of the K8sObject, using an internal cache.
func (o *K8sObject) YAML() ([]byte, error) {
if o == nil {
return nil, nil
}
if o.yaml != nil {
return o.yaml, nil
}
oj, err := o.JSON()
if err != nil {
return nil, err
}
o.json = oj
y, err := yaml.JSONToYAML(oj)
if err != nil {
return nil, err
}
o.yaml = y
return y, nil
}
// YAMLDebugString returns a YAML representation of the K8sObject, or an error string if the K8sObject cannot be rendered to YAML.
func (o *K8sObject) YAMLDebugString() string {
y, err := o.YAML()
if err != nil {
return err.Error()
}
return string(y)
}
// K8sObjects holds a collection of k8s objects, so that we can filter / sequence them
type K8sObjects []*K8sObject
// String implements the Stringer interface.
func (os K8sObjects) String() string {
var out []string
for _, oo := range os {
out = append(out, oo.YAMLDebugString())
}
return strings.Join(out, helm.YAMLSeparator)
}
// Keys returns a slice with the keys of os.
func (os K8sObjects) Keys() []string {
out := make([]string, 0, len(os))
for _, oo := range os {
out = append(out, oo.Hash())
}
return out
}
// UnstructuredItems returns the list of items of unstructured.Unstructured.
func (os K8sObjects) UnstructuredItems() []unstructured.Unstructured {
usList := make([]unstructured.Unstructured, 0, len(os))
for _, obj := range os {
usList = append(usList, *obj.UnstructuredObject())
}
return usList
}
// ParseK8sObjectsFromYAMLManifest returns a K8sObjects representation of manifest.
func ParseK8sObjectsFromYAMLManifest(manifest string) (K8sObjects, error) {
return ParseK8sObjectsFromYAMLManifestFailOption(manifest, true)
}
// ParseK8sObjectsFromYAMLManifestFailOption returns a K8sObjects representation of manifest. Continues parsing when a bad object
// is found if failOnError is set to false.
func ParseK8sObjectsFromYAMLManifestFailOption(manifest string, failOnError bool) (K8sObjects, error) {
jsonDecoder := k8syaml.NewYAMLToJSONDecoder(bytes.NewReader([]byte(manifest)))
var objects K8sObjects
wrapErr := func(err error) error {
return fmt.Errorf("failed to parse YAML to a k8s object: %v", err)
}
s := json.NewSerializerWithOptions(json.DefaultMetaFactory, nil, nil, json.SerializerOptions{
Yaml: true,
Pretty: true,
Strict: true,
})
for {
var obj unstructured.Unstructured
if err := jsonDecoder.Decode(&obj); err != nil {
if err == io.EOF {
break
}
err = wrapErr(err)
if failOnError {
return nil, err
}
log.Error(err.Error())
continue
}
if obj.Object == nil {
continue
}
if !isValidKubernetesObject(obj) {
if failOnError {
err := wrapErr(fmt.Errorf("failed to parse YAML to a k8s object: object is an invalid k8s object: %v", obj))
return nil, err
}
}
// Convert the unstructured object back into YAML, without comments
var buf bytes.Buffer
if err := s.Encode(&obj, &buf); err != nil {
err = wrapErr(err)
if failOnError {
return nil, err
}
log.Error(err.Error())
continue
}
cleanedYaml := buf.String()
k8sObj := NewK8sObject(&obj, nil, []byte(cleanedYaml))
if k8sObj.Valid() {
objects = append(objects, k8sObj)
}
}
return objects, nil
}
// YAMLManifest returns a YAML representation of K8sObjects os.
func (os K8sObjects) YAMLManifest() (string, error) {
var b bytes.Buffer
for i, item := range os {
if i != 0 {
if _, err := b.WriteString("\n\n"); err != nil {
return "", err
}
}
ym, err := item.YAML()
if err != nil {
return "", fmt.Errorf("error building yaml: %v", err)
}
if _, err := b.Write(ym); err != nil {
return "", err
}
if _, err := b.WriteString(YAMLSeparator); err != nil {
return "", err
}
}
return b.String(), nil
}
// Sort will order the items in K8sObjects in order of score, group, kind, name. The intent is to
// have a deterministic ordering in which K8sObjects are applied.
func (os K8sObjects) Sort(score func(o *K8sObject) int) {
sort.Slice(os, func(i, j int) bool {
iScore := score(os[i])
jScore := score(os[j])
return iScore < jScore ||
(iScore == jScore &&
os[i].Group < os[j].Group) ||
(iScore == jScore &&
os[i].Group == os[j].Group &&
os[i].Kind < os[j].Kind) ||
(iScore == jScore &&
os[i].Group == os[j].Group &&
os[i].Kind == os[j].Kind &&
os[i].Name < os[j].Name)
})
}
// ToMap returns a map of K8sObject hash to K8sObject.
func (os K8sObjects) ToMap() map[string]*K8sObject {
ret := make(map[string]*K8sObject)
for _, oo := range os {
if oo.Valid() {
ret[oo.Hash()] = oo
}
}
return ret
}
// ToNameKindMap returns a map of K8sObject name/kind hash to K8sObject.
func (os K8sObjects) ToNameKindMap() map[string]*K8sObject {
ret := make(map[string]*K8sObject)
for _, oo := range os {
if oo.Valid() {
ret[oo.HashNameKind()] = oo
}
}
return ret
}
// Valid checks returns true if Kind of K8sObject is not empty.
func (o *K8sObject) Valid() bool {
return o.Kind != ""
}
// FullName returns namespace/name of K8s object
func (o *K8sObject) FullName() string {
return fmt.Sprintf("%s/%s", o.Namespace, o.Name)
}
// Equal returns true if o and other are both valid and equal to each other.
func (o *K8sObject) Equal(other *K8sObject) bool {
if o == nil {
return other == nil
}
if other == nil {
return o == nil
}
ay, err := o.YAML()
if err != nil {
return false
}
by, err := other.YAML()
if err != nil {
return false
}
return util.IsYAMLEqual(string(ay), string(by))
}
// DefaultObjectOrder is default sorting function used to sort k8s objects.
func DefaultObjectOrder() func(o *K8sObject) int {
return func(o *K8sObject) int {
gk := o.Group + "/" + o.Kind
switch {
// Create CRDs asap - both because they are slow and because we will likely create instances of them soon
case gk == "apiextensions.k8s.io/CustomResourceDefinition":
return -1000
// We need to create ServiceAccounts, Roles before we bind them with a RoleBinding
case gk == "/ServiceAccount" || gk == "rbac.authorization.k8s.io/ClusterRole":
return 1
case gk == "rbac.authorization.k8s.io/ClusterRoleBinding":
return 2
// validatingwebhookconfiguration is configured to FAIL-OPEN in the default install. For the
// re-install case we want to apply the validatingwebhookconfiguration first to reset any
// orphaned validatingwebhookconfiguration that is FAIL-CLOSE.
case gk == "admissionregistration.k8s.io/ValidatingWebhookConfiguration":
return 3
// Pods might need configmap or secrets - avoid backoff by creating them first
case gk == "/ConfigMap" || gk == "/Secrets":
return 100
// Create the pods after we've created other things they might be waiting for
case gk == "extensions/Deployment" || gk == "apps/Deployment":
return 1000
// Autoscalers typically act on a deployment
case gk == "autoscaling/HorizontalPodAutoscaler":
return 1001
// Create services late - after pods have been started
case gk == "/Service":
return 10000
default:
return 1000
}
}
}
func ObjectsNotInLists(objects K8sObjects, lists ...K8sObjects) K8sObjects {
var ret K8sObjects
filterMap := make(map[*K8sObject]bool)
for _, list := range lists {
for _, object := range list {
filterMap[object] = true
}
}
for _, o := range objects {
if !filterMap[o] {
ret = append(ret, o)
}
}
return ret
}
// KindObjects returns the subset of objs with the given kind.
func KindObjects(objs K8sObjects, kind string) K8sObjects {
var ret K8sObjects
for _, o := range objs {
if o.Kind == kind {
ret = append(ret, o)
}
}
return ret
}
// ParseK8SYAMLToIstioOperator parses a IstioOperator CustomResource YAML string and unmarshals in into
// an IstioOperatorSpec object. It returns the object and an API group/version with it.
func ParseK8SYAMLToIstioOperator(yml string) (*v1alpha1.IstioOperator, *schema.GroupVersionKind, error) {
o, err := ParseYAMLToK8sObject([]byte(yml))
if err != nil {
return nil, nil, err
}
iop := &v1alpha1.IstioOperator{}
if err := yaml.UnmarshalStrict([]byte(yml), iop); err != nil {
return nil, nil, err
}
gvk := o.GroupVersionKind()
v1alpha1.SetNamespace(iop.Spec, o.Namespace)
return iop, &gvk, nil
}
// AllObjectHashes returns a map with object hashes of all the objects contained in cmm as the keys.
func AllObjectHashes(m string) map[string]bool {
ret := make(map[string]bool)
objs, err := ParseK8sObjectsFromYAMLManifest(m)
if err != nil {
log.Error(err.Error())
}
for _, o := range objs {
ret[o.Hash()] = true
}
return ret
}
// resolvePDBConflict When user uses both minAvailable and
// maxUnavailable to configure istio instances, these two
// parameters are mutually exclusive, care must be taken
// to resolve the issue
func resolvePDBConflict(o *K8sObject) *K8sObject {
if o.json == nil {
return o
}
if o.object.Object["spec"] == nil {
return o
}
spec := o.object.Object["spec"].(map[string]any)
isDefault := func(item any) bool {
var ii intstr.IntOrString
switch item := item.(type) {
case int:
ii = intstr.FromInt32(int32(item))
case int64:
ii = intstr.FromInt32(int32(item))
case string:
ii = intstr.FromString(item)
default:
ii = intstr.FromInt32(0)
}
intVal, err := intstr.GetScaledValueFromIntOrPercent(&ii, 100, false)
if err != nil || intVal == 0 {
return true
}
return false
}
if spec["maxUnavailable"] != nil && spec["minAvailable"] != nil {
// When both maxUnavailable and minAvailable present and
// neither has value 0, this is considered a conflict,
// then maxUnavailable will take precedence.
if !isDefault(spec["maxUnavailable"]) && !isDefault(spec["minAvailable"]) {
delete(spec, "minAvailable")
// Make sure that the json and yaml representation of the object
// is consistent with the changed object
o.json = nil
o.json, _ = o.JSON()
if o.yaml != nil {
o.yaml = nil
o.yaml, _ = o.YAML()
}
}
}
return o
}
func isValidKubernetesObject(obj unstructured.Unstructured) bool {
if _, ok := obj.Object["apiVersion"]; !ok {
return false
}
if _, ok := obj.Object["kind"]; !ok {
return false
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package patch implements a simple patching mechanism for k8s resources.
Paths are specified in the form a.b.c.[key:value].d.[list_entry_value], where:
- [key:value] selects a list entry in list c which contains an entry with key:value
- [list_entry_value] selects a list entry in list d which is a regex match of list_entry_value.
Some examples are given below. Given a resource:
kind: Deployment
metadata:
name: istio-citadel
namespace: istio-system
a:
b:
- name: n1
value: v1
- name: n2
list:
- "vv1"
- vv2=foo
values and list entries can be added, modified or deleted.
# MODIFY
1. set v1 to v1new
path: a.b.[name:n1].value
value: v1new
2. set vv1 to vv3
// Note the lack of quotes around vv1 (see NOTES below).
path: a.b.[name:n2].list.[vv1]
value: vv3
3. set vv2=foo to vv2=bar (using regex match)
path: a.b.[name:n2].list.[vv2]
value: vv2=bar
4. replace a port whose port was 15010
- path: spec.ports.[port:15010]
value:
port: 15020
name: grpc-xds
protocol: TCP
# DELETE
1. Delete container with name: n1
path: a.b.[name:n1]
2. Delete list value vv1
path: a.b.[name:n2].list.[vv1]
# ADD
1. Add vv3 to list
path: a.b.[name:n2].list.[1000]
value: vv3
Note: the value 1000 is an example. That value used in the patch should
be a value greater than number of the items in the list. Choose 1000 is
just an example which normally is greater than the most of the lists used.
2. Add new key:value to container name: n1
path: a.b.[name:n1]
value:
new_attr: v3
*NOTES*
- Due to loss of string quoting during unmarshaling, keys and values should not be string quoted, even if they appear
that way in the object being patched.
- [key:value] treats ':' as a special separator character. Any ':' in the key or value string must be escaped as \:.
*/
package patch
import (
"fmt"
"strings"
yaml2 "gopkg.in/yaml.v2"
"istio.io/api/operator/v1alpha1"
"istio.io/istio/operator/pkg/helm"
"istio.io/istio/operator/pkg/metrics"
"istio.io/istio/operator/pkg/object"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/pkg/log"
)
var scope = log.RegisterScope("patch", "patch")
// overlayMatches reports whether obj matches the overlay for either the default namespace or no namespace (cluster scope).
func overlayMatches(overlay *v1alpha1.K8SObjectOverlay, obj *object.K8sObject, defaultNamespace string) bool {
oh := obj.Hash()
if oh == object.Hash(overlay.Kind, defaultNamespace, overlay.Name) ||
oh == object.Hash(overlay.Kind, "", overlay.Name) {
return true
}
return false
}
// YAMLManifestPatch patches a base YAML in the given namespace with a list of overlays.
// Each overlay has the format described in the K8SObjectOverlay definition.
// It returns the patched manifest YAML.
func YAMLManifestPatch(baseYAML string, defaultNamespace string, overlays []*v1alpha1.K8SObjectOverlay) (string, error) {
var ret strings.Builder
var errs util.Errors
objs, err := object.ParseK8sObjectsFromYAMLManifest(baseYAML)
if err != nil {
return "", err
}
matches := make(map[*v1alpha1.K8SObjectOverlay]object.K8sObjects)
// Try to apply the defined overlays.
for _, obj := range objs {
oy, err := obj.YAML()
if err != nil {
errs = util.AppendErr(errs, fmt.Errorf("object to YAML error (%s) for base object: \n%s", err, obj.YAMLDebugString()))
continue
}
oys := string(oy)
for _, overlay := range overlays {
if overlayMatches(overlay, obj, defaultNamespace) {
matches[overlay] = append(matches[overlay], obj)
var errs2 util.Errors
oys, errs2 = applyPatches(obj, overlay.Patches)
errs = util.AppendErrs(errs, errs2)
}
}
if _, err := ret.WriteString(oys + helm.YAMLSeparator); err != nil {
errs = util.AppendErr(errs, fmt.Errorf("writeString: %s", err))
}
}
for _, overlay := range overlays {
// Each overlay should have exactly one match in the output manifest.
switch {
case len(matches[overlay]) == 0:
errs = util.AppendErr(errs, fmt.Errorf("overlay for %s:%s does not match any object in output manifest. Available objects are:\n%s",
overlay.Kind, overlay.Name, strings.Join(objs.Keys(), "\n")))
case len(matches[overlay]) > 1:
errs = util.AppendErr(errs, fmt.Errorf("overlay for %s:%s matches multiple objects in output manifest:\n%s",
overlay.Kind, overlay.Name, strings.Join(objs.Keys(), "\n")))
}
}
return ret.String(), errs.ToError()
}
// applyPatches applies the given patches against the given object. It returns the resulting patched YAML if successful,
// or a list of errors otherwise.
func applyPatches(base *object.K8sObject, patches []*v1alpha1.K8SObjectOverlay_PathValue) (outYAML string, errs util.Errors) {
bo := make(map[any]any)
by, err := base.YAML()
if err != nil {
return "", util.NewErrs(err)
}
// Use yaml2 specifically to allow interface{} as key which WritePathContext treats specially
err = yaml2.Unmarshal(by, &bo)
if err != nil {
return "", util.NewErrs(err)
}
for _, p := range patches {
v := p.Value.AsInterface()
if strings.TrimSpace(p.Path) == "" {
scope.Warnf("value=%s has empty path, skip\n", v)
continue
}
scope.Debugf("applying path=%s, value=%s\n", p.Path, v)
inc, _, err := tpath.GetPathContext(bo, util.PathFromString(p.Path), true)
if err != nil {
errs = util.AppendErr(errs, err)
metrics.ManifestPatchErrorTotal.Increment()
continue
}
err = tpath.WritePathContext(inc, v, false)
if err != nil {
errs = util.AppendErr(errs, err)
metrics.ManifestPatchErrorTotal.Increment()
}
}
oy, err := yaml2.Marshal(bo)
if err != nil {
return "", util.AppendErr(errs, err)
}
return string(oy), errs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
struct.go contains functions for traversing and modifying trees of Go structs.
*/
package tpath
import (
"fmt"
"reflect"
"strconv"
"google.golang.org/protobuf/types/known/structpb"
"istio.io/istio/operator/pkg/util"
)
// GetFromStructPath returns the value at path from the given node, or false if the path does not exist.
func GetFromStructPath(node any, path string) (any, bool, error) {
return getFromStructPath(node, util.PathFromString(path))
}
// getFromStructPath is the internal implementation of GetFromStructPath which recurses through a tree of Go structs
// given a path. It terminates when the end of the path is reached or a path element does not exist.
func getFromStructPath(node any, path util.Path) (any, bool, error) {
scope.Debugf("getFromStructPath path=%s, node(%T)", path, node)
if len(path) == 0 {
scope.Debugf("getFromStructPath returning node(%T)%v", node, node)
return node, !util.IsValueNil(node), nil
}
// For protobuf types, switch them out with standard types; otherwise we will traverse protobuf internals rather
// than the standard representation
if v, ok := node.(*structpb.Struct); ok {
node = v.AsMap()
}
if v, ok := node.(*structpb.Value); ok {
node = v.AsInterface()
}
val := reflect.ValueOf(node)
kind := reflect.TypeOf(node).Kind()
var structElems reflect.Value
switch kind {
case reflect.Map:
if path[0] == "" {
return nil, false, fmt.Errorf("getFromStructPath path %s, empty map key value", path)
}
mapVal := val.MapIndex(reflect.ValueOf(path[0]))
if !mapVal.IsValid() {
return nil, false, fmt.Errorf("getFromStructPath path %s, path does not exist", path)
}
return getFromStructPath(mapVal.Interface(), path[1:])
case reflect.Slice:
idx, err := strconv.Atoi(path[0])
if err != nil {
return nil, false, fmt.Errorf("getFromStructPath path %s, expected index number, got %s", path, path[0])
}
return getFromStructPath(val.Index(idx).Interface(), path[1:])
case reflect.Ptr:
structElems = reflect.ValueOf(node).Elem()
if !util.IsStruct(structElems) {
return nil, false, fmt.Errorf("getFromStructPath path %s, expected struct ptr, got %T", path, node)
}
default:
return nil, false, fmt.Errorf("getFromStructPath path %s, unsupported type %T", path, node)
}
if util.IsNilOrInvalidValue(structElems) {
return nil, false, nil
}
for i := 0; i < structElems.NumField(); i++ {
fieldName := structElems.Type().Field(i).Name
if fieldName != path[0] {
continue
}
fv := structElems.Field(i)
return getFromStructPath(fv.Interface(), path[1:])
}
return nil, false, nil
}
// SetFromPath sets out with the value at path from node. out is not set if the path doesn't exist or the value is nil.
// All intermediate along path must be type struct ptr. Out must be either a struct ptr or map ptr.
// TODO: move these out to a separate package (istio/istio#15494).
func SetFromPath(node any, path string, out any) (bool, error) {
val, found, err := GetFromStructPath(node, path)
if err != nil {
return false, err
}
if !found {
return false, nil
}
return true, Set(val, out)
}
// Set sets out with the value at path from node. out is not set if the path doesn't exist or the value is nil.
func Set(val, out any) error {
// Special case: map out type must be set through map ptr.
if util.IsMap(val) && util.IsMapPtr(out) {
reflect.ValueOf(out).Elem().Set(reflect.ValueOf(val))
return nil
}
if util.IsSlice(val) && util.IsSlicePtr(out) {
reflect.ValueOf(out).Elem().Set(reflect.ValueOf(val))
return nil
}
if reflect.TypeOf(val) != reflect.TypeOf(out) {
return fmt.Errorf("setFromPath from type %T != to type %T, %v", val, out, util.IsSlicePtr(out))
}
if !reflect.ValueOf(out).CanSet() {
return fmt.Errorf("can't set %v(%T) to out type %T", val, val, out)
}
reflect.ValueOf(out).Set(reflect.ValueOf(val))
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
tree.go contains functions for traversing and updating a tree constructed from yaml or json.Unmarshal.
Nodes in such trees have the form map[interface{}]interface{} or map[interface{}][]interface{}.
For some tree updates, like delete or append, it's necessary to have access to the parent node. PathContext is a
tree constructed during tree traversal that gives access to ancestor nodes all the way up to the root, which can be
used for this purpose.
*/
package tpath
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"regexp"
"strconv"
"strings"
"gopkg.in/yaml.v2"
yaml2 "sigs.k8s.io/yaml"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/pkg/log"
)
var scope = log.RegisterScope("tpath", "tree traverser")
// PathContext provides a means for traversing a tree towards the root.
type PathContext struct {
// Parent in the Parent of this PathContext.
Parent *PathContext
// KeyToChild is the key required to reach the child.
KeyToChild any
// Node is the actual Node in the data tree.
Node any
}
// String implements the Stringer interface.
func (nc *PathContext) String() string {
ret := "\n--------------- NodeContext ------------------\n"
if nc.Parent != nil {
ret += fmt.Sprintf("Parent.Node=\n%s\n", nc.Parent.Node)
ret += fmt.Sprintf("KeyToChild=%v\n", nc.Parent.KeyToChild)
}
ret += fmt.Sprintf("Node=\n%s\n", nc.Node)
ret += "----------------------------------------------\n"
return ret
}
// GetPathContext returns the PathContext for the Node which has the given path from root.
// It returns false and no error if the given path is not found, or an error code in other error situations, like
// a malformed path.
// It also creates a tree of PathContexts during the traversal so that Parent nodes can be updated if required. This is
// required when (say) appending to a list, where the parent list itself must be updated.
func GetPathContext(root any, path util.Path, createMissing bool) (*PathContext, bool, error) {
return getPathContext(&PathContext{Node: root}, path, path, createMissing)
}
// WritePathContext writes the given value to the Node in the given PathContext.
func WritePathContext(nc *PathContext, value any, merge bool) error {
scope.Debugf("WritePathContext PathContext=%s, value=%v", nc, value)
if !util.IsValueNil(value) {
return setPathContext(nc, value, merge)
}
scope.Debug("delete")
if nc.Parent == nil {
return errors.New("cannot delete root element")
}
switch {
case isSliceOrPtrInterface(nc.Parent.Node):
if err := util.DeleteFromSlicePtr(nc.Parent.Node, nc.Parent.KeyToChild.(int)); err != nil {
return err
}
if isMapOrInterface(nc.Parent.Parent.Node) {
return util.InsertIntoMap(nc.Parent.Parent.Node, nc.Parent.Parent.KeyToChild, nc.Parent.Node)
}
// TODO: The case of deleting a list.list.node element is not currently supported.
return fmt.Errorf("cannot delete path: unsupported parent.parent type %T for delete", nc.Parent.Parent.Node)
case util.IsMap(nc.Parent.Node):
return util.DeleteFromMap(nc.Parent.Node, nc.Parent.KeyToChild)
default:
}
return fmt.Errorf("cannot delete path: unsupported parent type %T for delete", nc.Parent.Node)
}
// WriteNode writes value to the tree in root at the given path, creating any required missing internal nodes in path.
func WriteNode(root any, path util.Path, value any) error {
pc, _, err := getPathContext(&PathContext{Node: root}, path, path, true)
if err != nil {
return err
}
return WritePathContext(pc, value, false)
}
// MergeNode merges value to the tree in root at the given path, creating any required missing internal nodes in path.
func MergeNode(root any, path util.Path, value any) error {
pc, _, err := getPathContext(&PathContext{Node: root}, path, path, true)
if err != nil {
return err
}
return WritePathContext(pc, value, true)
}
// Find returns the value at path from the given tree, or false if the path does not exist.
// It behaves differently from GetPathContext in that it never creates map entries at the leaf and does not provide
// a way to mutate the parent of the found node.
func Find(inputTree map[string]any, path util.Path) (any, bool, error) {
scope.Debugf("Find path=%s", path)
if len(path) == 0 {
return nil, false, fmt.Errorf("path is empty")
}
node, found := find(inputTree, path)
return node, found, nil
}
// Delete sets value at path of input untyped tree to nil
func Delete(root map[string]any, path util.Path) (bool, error) {
pc, _, err := getPathContext(&PathContext{Node: root}, path, path, false)
if err != nil {
return false, err
}
return true, WritePathContext(pc, nil, false)
}
// getPathContext is the internal implementation of GetPathContext.
// If createMissing is true, it creates any missing map (but NOT list) path entries in root.
func getPathContext(nc *PathContext, fullPath, remainPath util.Path, createMissing bool) (*PathContext, bool, error) {
scope.Debugf("getPathContext remainPath=%s, Node=%v", remainPath, nc.Node)
if len(remainPath) == 0 {
return nc, true, nil
}
pe := remainPath[0]
if nc.Node == nil {
if !createMissing {
return nil, false, fmt.Errorf("node %s is zero", pe)
}
if util.IsNPathElement(pe) || util.IsKVPathElement(pe) {
nc.Node = []any{}
} else {
nc.Node = make(map[string]any)
}
}
v := reflect.ValueOf(nc.Node)
if v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface {
v = v.Elem()
}
ncNode := v.Interface()
// For list types, we need a key to identify the selected list item. This can be either a value key of the
// form :matching_value in the case of a leaf list, or a matching key:value in the case of a non-leaf list.
if lst, ok := ncNode.([]any); ok {
scope.Debug("list type")
// If the path element has the form [N], a list element is being selected by index. Return the element at index
// N if it exists.
if util.IsNPathElement(pe) {
idx, err := util.PathN(pe)
if err != nil {
return nil, false, fmt.Errorf("path %s, index %s: %s", fullPath, pe, err)
}
var foundNode any
if idx >= len(lst) || idx < 0 {
if !createMissing {
return nil, false, fmt.Errorf("index %d exceeds list length %d at path %s", idx, len(lst), remainPath)
}
idx = len(lst)
foundNode = make(map[string]any)
} else {
foundNode = lst[idx]
}
nn := &PathContext{
Parent: nc,
Node: foundNode,
}
nc.KeyToChild = idx
return getPathContext(nn, fullPath, remainPath[1:], createMissing)
}
// Otherwise the path element must have form [key:value]. In this case, go through all list elements, which
// must have map type, and try to find one which has a matching key:value.
for idx, le := range lst {
// non-leaf list, expect to match item by key:value.
if lm, ok := le.(map[any]any); ok {
k, v, err := util.PathKV(pe)
if err != nil {
return nil, false, fmt.Errorf("path %s: %s", fullPath, err)
}
if stringsEqual(lm[k], v) {
scope.Debugf("found matching kv %v:%v", k, v)
nn := &PathContext{
Parent: nc,
Node: lm,
}
nc.KeyToChild = idx
nn.KeyToChild = k
if len(remainPath) == 1 {
scope.Debug("KV terminate")
return nn, true, nil
}
return getPathContext(nn, fullPath, remainPath[1:], createMissing)
}
continue
}
// repeat of the block above for the case where tree unmarshals to map[string]interface{}. There doesn't
// seem to be a way to merge this case into the above block.
if lm, ok := le.(map[string]any); ok {
k, v, err := util.PathKV(pe)
if err != nil {
return nil, false, fmt.Errorf("path %s: %s", fullPath, err)
}
if stringsEqual(lm[k], v) {
scope.Debugf("found matching kv %v:%v", k, v)
nn := &PathContext{
Parent: nc,
Node: lm,
}
nc.KeyToChild = idx
nn.KeyToChild = k
if len(remainPath) == 1 {
scope.Debug("KV terminate")
return nn, true, nil
}
return getPathContext(nn, fullPath, remainPath[1:], createMissing)
}
continue
}
// leaf list, expect path element [V], match based on value V.
v, err := util.PathV(pe)
if err != nil {
return nil, false, fmt.Errorf("path %s: %s", fullPath, err)
}
if matchesRegex(v, le) {
scope.Debugf("found matching key %v, index %d", le, idx)
nn := &PathContext{
Parent: nc,
Node: le,
}
nc.KeyToChild = idx
return getPathContext(nn, fullPath, remainPath[1:], createMissing)
}
}
return nil, false, fmt.Errorf("path %s: element %s not found", fullPath, pe)
}
if util.IsMap(ncNode) {
scope.Debug("map type")
var nn any
if m, ok := ncNode.(map[any]any); ok {
nn, ok = m[pe]
if !ok {
// remainPath == 1 means the patch is creation of a new leaf.
if createMissing || len(remainPath) == 1 {
m[pe] = make(map[any]any)
nn = m[pe]
} else {
return nil, false, fmt.Errorf("path not found at element %s in path %s", pe, fullPath)
}
}
}
if reflect.ValueOf(ncNode).IsNil() {
ncNode = make(map[string]any)
nc.Node = ncNode
}
if m, ok := ncNode.(map[string]any); ok {
nn, ok = m[pe]
if !ok {
// remainPath == 1 means the patch is creation of a new leaf.
if createMissing || len(remainPath) == 1 {
nextElementNPath := len(remainPath) > 1 && util.IsNPathElement(remainPath[1])
if nextElementNPath {
scope.Debug("map type, slice child")
m[pe] = make([]any, 0)
} else {
scope.Debug("map type, map child")
m[pe] = make(map[string]any)
}
nn = m[pe]
} else {
return nil, false, fmt.Errorf("path not found at element %s in path %s", pe, fullPath)
}
}
}
npc := &PathContext{
Parent: nc,
Node: nn,
}
// for slices, use the address so that the slice can be mutated.
if util.IsSlice(nn) {
npc.Node = &nn
}
nc.KeyToChild = pe
return getPathContext(npc, fullPath, remainPath[1:], createMissing)
}
return nil, false, fmt.Errorf("leaf type %T in non-leaf Node %s", nc.Node, remainPath)
}
// setPathContext writes the given value to the Node in the given PathContext,
// enlarging all PathContext lists to ensure all indexes are valid.
func setPathContext(nc *PathContext, value any, merge bool) error {
processParent, err := setValueContext(nc, value, merge)
if err != nil || !processParent {
return err
}
// If the path included insertions, process them now
if nc.Parent.Parent == nil {
return nil
}
return setPathContext(nc.Parent, nc.Parent.Node, false) // note: tail recursive
}
// setValueContext writes the given value to the Node in the given PathContext.
// If setting the value requires growing the final slice, grows it.
func setValueContext(nc *PathContext, value any, merge bool) (bool, error) {
if nc.Parent == nil {
return false, nil
}
vv, mapFromString := tryToUnmarshalStringToYAML(value)
switch parentNode := nc.Parent.Node.(type) {
case *any:
switch vParentNode := (*parentNode).(type) {
case []any:
idx := nc.Parent.KeyToChild.(int)
if idx == -1 {
// Treat -1 as insert-at-end of list
idx = len(vParentNode)
}
if idx >= len(vParentNode) {
newElements := make([]any, idx-len(vParentNode)+1)
vParentNode = append(vParentNode, newElements...)
*parentNode = vParentNode
}
merged, err := mergeConditional(vv, nc.Node, merge)
if err != nil {
return false, err
}
vParentNode[idx] = merged
nc.Node = merged
default:
return false, fmt.Errorf("don't know about vtype %T", vParentNode)
}
case map[string]any:
key := nc.Parent.KeyToChild.(string)
// Update is treated differently depending on whether the value is a scalar or map type. If scalar,
// insert a new element into the terminal node, otherwise replace the terminal node with the new subtree.
if ncNode, ok := nc.Node.(*any); ok && !mapFromString {
switch vNcNode := (*ncNode).(type) {
case []any:
switch vv.(type) {
case map[string]any:
// the vv is a map, and the node is a slice
mergedValue := append(vNcNode, vv)
parentNode[key] = mergedValue
case *any:
merged, err := mergeConditional(vv, vNcNode, merge)
if err != nil {
return false, err
}
parentNode[key] = merged
nc.Node = merged
default:
// the vv is an basic JSON type (int, float, string, bool)
vv = append(vNcNode, vv)
parentNode[key] = vv
nc.Node = vv
}
default:
return false, fmt.Errorf("don't know about vnc type %T", vNcNode)
}
} else {
// For map passed as string type, the root is the new key.
if mapFromString {
if err := util.DeleteFromMap(nc.Parent.Node, nc.Parent.KeyToChild); err != nil {
return false, err
}
vm := vv.(map[string]any)
newKey := getTreeRoot(vm)
return false, util.InsertIntoMap(nc.Parent.Node, newKey, vm[newKey])
}
parentNode[key] = vv
nc.Node = vv
}
// TODO `map[interface{}]interface{}` is used by tests in operator/cmd/mesh, we should add our own tests
case map[any]any:
key := nc.Parent.KeyToChild.(string)
parentNode[key] = vv
nc.Node = vv
default:
return false, fmt.Errorf("don't know about type %T", parentNode)
}
return true, nil
}
// mergeConditional returns a merge of newVal and originalVal if merge is true, otherwise it returns newVal.
func mergeConditional(newVal, originalVal any, merge bool) (any, error) {
if !merge || util.IsValueNilOrDefault(originalVal) {
return newVal, nil
}
newS, err := yaml.Marshal(newVal)
if err != nil {
return nil, err
}
if util.IsYAMLEmpty(string(newS)) {
return originalVal, nil
}
originalS, err := yaml.Marshal(originalVal)
if err != nil {
return nil, err
}
if util.IsYAMLEmpty(string(originalS)) {
return newVal, nil
}
mergedS, err := util.OverlayYAML(string(originalS), string(newS))
if err != nil {
return nil, err
}
if util.IsMap(originalVal) {
// For JSON compatibility
out := make(map[string]any)
if err := yaml.Unmarshal([]byte(mergedS), &out); err != nil {
return nil, err
}
return out, nil
}
// For scalars and slices, copy the type
out := originalVal
if err := yaml.Unmarshal([]byte(mergedS), &out); err != nil {
return nil, err
}
return out, nil
}
// find returns the value at path from the given tree, or false if the path does not exist.
func find(treeNode any, path util.Path) (any, bool) {
if len(path) == 0 || treeNode == nil {
return nil, false
}
switch nt := treeNode.(type) {
case map[any]any:
val := nt[path[0]]
if val == nil {
return nil, false
}
if len(path) == 1 {
return val, true
}
return find(val, path[1:])
case map[string]any:
val := nt[path[0]]
if val == nil {
return nil, false
}
if len(path) == 1 {
return val, true
}
return find(val, path[1:])
case []any:
idx, err := strconv.Atoi(path[0])
if err != nil {
return nil, false
}
if idx >= len(nt) {
return nil, false
}
val := nt[idx]
return find(val, path[1:])
default:
return nil, false
}
}
// stringsEqual reports whether the string representations of a and b are equal. a and b may have different types.
func stringsEqual(a, b any) bool {
return fmt.Sprint(a) == fmt.Sprint(b)
}
// matchesRegex reports whether str regex matches pattern.
func matchesRegex(pattern, str any) bool {
match, err := regexp.MatchString(fmt.Sprint(pattern), fmt.Sprint(str))
if err != nil {
log.Errorf("bad regex expression %s", fmt.Sprint(pattern))
return false
}
scope.Debugf("%v regex %v? %v\n", pattern, str, match)
return match
}
// isSliceOrPtrInterface reports whether v is a slice, a ptr to slice or interface to slice.
func isSliceOrPtrInterface(v any) bool {
vv := reflect.ValueOf(v)
if vv.Kind() == reflect.Ptr {
vv = vv.Elem()
}
if vv.Kind() == reflect.Interface {
vv = vv.Elem()
}
return vv.Kind() == reflect.Slice
}
// isMapOrInterface reports whether v is a map, or interface to a map.
func isMapOrInterface(v any) bool {
vv := reflect.ValueOf(v)
if vv.Kind() == reflect.Interface {
vv = vv.Elem()
}
return vv.Kind() == reflect.Map
}
// tryToUnmarshalStringToYAML tries to unmarshal something that may be a YAML list or map into a structure. If not
// possible, returns original scalar value.
func tryToUnmarshalStringToYAML(s any) (any, bool) {
// If value type is a string it could either be a literal string or a map type passed as a string. Try to unmarshal
// to discover it's the latter.
vv := s
if reflect.TypeOf(vv).Kind() == reflect.String {
sv := strings.Split(vv.(string), "\n")
// Need to be careful not to transform string literals into maps unless they really are maps, since scalar handling
// is different for inserts.
if len(sv) == 1 && strings.Contains(s.(string), ": ") ||
len(sv) > 1 && strings.Contains(s.(string), ":") {
nv := make(map[string]any)
if err := json.Unmarshal([]byte(vv.(string)), &nv); err == nil {
// treat JSON as string
return vv, false
}
if err := yaml2.Unmarshal([]byte(vv.(string)), &nv); err == nil {
return nv, true
}
}
}
// looks like a literal or failed unmarshal, return original type.
return vv, false
}
// getTreeRoot returns the first key found in m. It assumes a single root tree.
func getTreeRoot(m map[string]any) string {
for k := range m {
return k
}
return ""
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
util.go contains utility function for dealing with trees.
*/
package tpath
import (
"gopkg.in/yaml.v2"
yaml2 "sigs.k8s.io/yaml"
"istio.io/istio/operator/pkg/util"
)
// AddSpecRoot adds a root node called "spec" to the given tree and returns the resulting tree.
func AddSpecRoot(tree string) (string, error) {
t, nt := make(map[string]any), make(map[string]any)
if err := yaml.Unmarshal([]byte(tree), &t); err != nil {
return "", err
}
nt["spec"] = t
out, err := yaml.Marshal(nt)
if err != nil {
return "", err
}
return string(out), nil
}
// GetSpecSubtree returns the subtree under "spec".
func GetSpecSubtree(yml string) (string, error) {
return GetConfigSubtree(yml, "spec")
}
// GetConfigSubtree returns the subtree at the given path.
func GetConfigSubtree(manifest, path string) (string, error) {
root := make(map[string]any)
if err := yaml2.Unmarshal([]byte(manifest), &root); err != nil {
return "", err
}
nc, _, err := GetPathContext(root, util.PathFromString(path), false)
if err != nil {
return "", err
}
out, err := yaml2.Marshal(nc.Node)
if err != nil {
return "", err
}
return string(out), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package translate defines translations from installer proto to values.yaml.
package translate
import (
"encoding/json"
"fmt"
"reflect"
"sort"
"strings"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/structpb"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/yaml"
"istio.io/api/operator/v1alpha1"
"istio.io/istio/operator/pkg/apis/istio"
iopv1alpha1 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/object"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/operator/pkg/version"
oversion "istio.io/istio/operator/version"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
const (
// HelmValuesEnabledSubpath is the subpath from the component root to the enabled parameter.
HelmValuesEnabledSubpath = "enabled"
// HelmValuesNamespaceSubpath is the subpath from the component root to the namespace parameter.
HelmValuesNamespaceSubpath = "namespace"
// HelmValuesHubSubpath is the subpath from the component root to the hub parameter.
HelmValuesHubSubpath = "hub"
// HelmValuesTagSubpath is the subpath from the component root to the tag parameter.
HelmValuesTagSubpath = "tag"
// default ingress gateway name
defaultIngressGWName = "istio-ingressgateway"
// default egress gateway name
defaultEgressGWName = "istio-egressgateway"
)
var scope = log.RegisterScope("translator", "API translator")
// Translator is a set of mappings to translate between API paths, charts, values.yaml and k8s paths.
type Translator struct {
// Translations remain the same within a minor version.
Version version.MinorVersion
// APIMapping is a mapping between an API path and the corresponding values.yaml path using longest prefix
// match. If the path is a non-leaf node, the output path is the matching portion of the path, plus any remaining
// output path.
APIMapping map[string]*Translation `yaml:"apiMapping"`
// KubernetesMapping defines mappings from an IstioOperator API paths to k8s resource paths.
KubernetesMapping map[string]*Translation `yaml:"kubernetesMapping"`
// GlobalNamespaces maps feature namespaces to Helm global namespace definitions.
GlobalNamespaces map[name.ComponentName]string `yaml:"globalNamespaces"`
// ComponentMaps is a set of mappings for each Istio component.
ComponentMaps map[name.ComponentName]*ComponentMaps `yaml:"componentMaps"`
// checkedDeprecatedAutoscalingFields represents whether the translator already checked the deprecated fields already.
// Different components do not need to rerun the translation logic
checkedDeprecatedAutoscalingFields bool
}
// ComponentMaps is a set of mappings for an Istio component.
type ComponentMaps struct {
// ResourceType maps a ComponentName to the type of the rendered k8s resource.
ResourceType string
// ResourceName maps a ComponentName to the name of the rendered k8s resource.
ResourceName string
// ContainerName maps a ComponentName to the name of the container in a Deployment.
ContainerName string
// HelmSubdir is a mapping between a component name and the subdirectory of the component Chart.
HelmSubdir string
// ToHelmValuesTreeRoot is the tree root in values YAML files for the component.
ToHelmValuesTreeRoot string
// SkipReverseTranslate defines whether reverse translate of this component need to be skipped.
SkipReverseTranslate bool
// FlattenValues, if true, means the component expects values not prefixed with ToHelmValuesTreeRoot
// For example `.name=foo` instead of `.component.name=foo`.
FlattenValues bool
}
// TranslationFunc maps a yamlStr API path into a YAML values tree.
type TranslationFunc func(t *Translation, root map[string]any, valuesPath string, value any) error
// Translation is a mapping to an output path using a translation function.
type Translation struct {
// OutPath defines the position in the yaml file
OutPath string `yaml:"outPath"`
translationFunc TranslationFunc
}
// NewTranslator creates a new translator for minorVersion and returns a ptr to it.
func NewTranslator() *Translator {
t := &Translator{
Version: oversion.OperatorBinaryVersion.MinorVersion,
APIMapping: map[string]*Translation{
"hub": {OutPath: "global.hub"},
"tag": {OutPath: "global.tag"},
"revision": {OutPath: "revision"},
"meshConfig": {OutPath: "meshConfig"},
"compatibilityVersion": {OutPath: "compatibilityVersion"},
},
GlobalNamespaces: map[name.ComponentName]string{
name.PilotComponentName: "istioNamespace",
},
ComponentMaps: map[name.ComponentName]*ComponentMaps{
name.IstioBaseComponentName: {
HelmSubdir: "base",
ToHelmValuesTreeRoot: "global",
SkipReverseTranslate: true,
},
name.PilotComponentName: {
ResourceType: "Deployment",
ResourceName: "istiod",
ContainerName: "discovery",
HelmSubdir: "istio-control/istio-discovery",
ToHelmValuesTreeRoot: "pilot",
},
name.IngressComponentName: {
ResourceType: "Deployment",
ResourceName: "istio-ingressgateway",
ContainerName: "istio-proxy",
HelmSubdir: "gateways/istio-ingress",
ToHelmValuesTreeRoot: "gateways.istio-ingressgateway",
},
name.EgressComponentName: {
ResourceType: "Deployment",
ResourceName: "istio-egressgateway",
ContainerName: "istio-proxy",
HelmSubdir: "gateways/istio-egress",
ToHelmValuesTreeRoot: "gateways.istio-egressgateway",
},
name.CNIComponentName: {
ResourceType: "DaemonSet",
ResourceName: "istio-cni-node",
ContainerName: "install-cni",
HelmSubdir: "istio-cni",
ToHelmValuesTreeRoot: "cni",
},
name.IstiodRemoteComponentName: {
HelmSubdir: "istiod-remote",
ToHelmValuesTreeRoot: "global",
SkipReverseTranslate: true,
},
name.ZtunnelComponentName: {
ResourceType: "DaemonSet",
ResourceName: "ztunnel",
HelmSubdir: "ztunnel",
ToHelmValuesTreeRoot: "ztunnel",
ContainerName: "istio-proxy",
FlattenValues: true,
},
},
// nolint: lll
KubernetesMapping: map[string]*Translation{
"Components.{{.ComponentName}}.K8S.Affinity": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.template.spec.affinity"},
"Components.{{.ComponentName}}.K8S.Env": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.template.spec.containers.[name:{{.ContainerName}}].env"},
"Components.{{.ComponentName}}.K8S.HpaSpec": {OutPath: "[HorizontalPodAutoscaler:{{.ResourceName}}].spec"},
"Components.{{.ComponentName}}.K8S.ImagePullPolicy": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.template.spec.containers.[name:{{.ContainerName}}].imagePullPolicy"},
"Components.{{.ComponentName}}.K8S.NodeSelector": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.template.spec.nodeSelector"},
"Components.{{.ComponentName}}.K8S.PodDisruptionBudget": {OutPath: "[PodDisruptionBudget:{{.ResourceName}}].spec"},
"Components.{{.ComponentName}}.K8S.PodAnnotations": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.template.metadata.annotations"},
"Components.{{.ComponentName}}.K8S.PriorityClassName": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.template.spec.priorityClassName."},
"Components.{{.ComponentName}}.K8S.ReadinessProbe": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.template.spec.containers.[name:{{.ContainerName}}].readinessProbe"},
"Components.{{.ComponentName}}.K8S.ReplicaCount": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.replicas"},
"Components.{{.ComponentName}}.K8S.Resources": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.template.spec.containers.[name:{{.ContainerName}}].resources"},
"Components.{{.ComponentName}}.K8S.Strategy": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.strategy"},
"Components.{{.ComponentName}}.K8S.Tolerations": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.template.spec.tolerations"},
"Components.{{.ComponentName}}.K8S.ServiceAnnotations": {OutPath: "[Service:{{.ResourceName}}].metadata.annotations"},
"Components.{{.ComponentName}}.K8S.Service": {OutPath: "[Service:{{.ResourceName}}].spec"},
"Components.{{.ComponentName}}.K8S.SecurityContext": {OutPath: "[{{.ResourceType}}:{{.ResourceName}}].spec.template.spec.securityContext"},
},
}
return t
}
// OverlayK8sSettings overlays k8s settings from iop over the manifest objects, based on t's translation mappings.
func (t *Translator) OverlayK8sSettings(yml string, iop *v1alpha1.IstioOperatorSpec, componentName name.ComponentName,
resourceName string, index int) (string, error,
) {
// om is a map of kind:name string to Object ptr.
// This is lazy loaded to avoid parsing when there are no overlays
var om map[string]*object.K8sObject
var objects object.K8sObjects
for inPath, v := range t.KubernetesMapping {
inPath, err := renderFeatureComponentPathTemplate(inPath, componentName)
if err != nil {
return "", err
}
renderedInPath := strings.Replace(inPath, "gressGateways.", "gressGateways."+fmt.Sprint(index)+".", 1)
scope.Debugf("Checking for path %s in IstioOperatorSpec", renderedInPath)
m, found, err := tpath.GetFromStructPath(iop, renderedInPath)
if err != nil {
return "", err
}
if !found {
scope.Debugf("path %s not found in IstioOperatorSpec, skip mapping.", renderedInPath)
continue
}
if mstr, ok := m.(string); ok && mstr == "" {
scope.Debugf("path %s is empty string, skip mapping.", renderedInPath)
continue
}
// Zero int values are due to proto3 compiling to scalars rather than ptrs. Skip these because values of 0 are
// the default in destination fields and need not be set explicitly.
if mint, ok := util.ToIntValue(m); ok && mint == 0 {
scope.Debugf("path %s is int 0, skip mapping.", renderedInPath)
continue
}
if componentName == name.IstioBaseComponentName {
return "", fmt.Errorf("base component can only have k8s.overlays, not other K8s settings")
}
inPathParts := strings.Split(inPath, ".")
outPath, err := t.renderResourceComponentPathTemplate(v.OutPath, componentName, resourceName, iop.Revision)
if err != nil {
return "", err
}
scope.Debugf("path has value in IstioOperatorSpec, mapping to output path %s", outPath)
path := util.PathFromString(outPath)
pe := path[0]
// Output path must start with [kind:name], which is used to map to the object to overlay.
if !util.IsKVPathElement(pe) {
return "", fmt.Errorf("path %s has an unexpected first element %s in OverlayK8sSettings", path, pe)
}
// We need to apply overlay, lazy load om
if om == nil {
objects, err = object.ParseK8sObjectsFromYAMLManifest(yml)
if err != nil {
return "", err
}
if scope.DebugEnabled() {
scope.Debugf("Manifest contains the following objects:")
for _, o := range objects {
scope.Debugf("%s", o.HashNameKind())
}
}
om = objects.ToNameKindMap()
}
// After brackets are removed, the remaining "kind:name" is the same format as the keys in om.
pe, _ = util.RemoveBrackets(pe)
oo, ok := om[pe]
if !ok {
// skip to overlay the K8s settings if the corresponding resource doesn't exist.
scope.Infof("resource Kind:name %s doesn't exist in the output manifest, skip overlay.", pe)
continue
}
// When autoscale is enabled we should not overwrite replica count, consider following scenario:
// 0. Set values.pilot.autoscaleEnabled=true, components.pilot.k8s.replicaCount=1
// 1. In istio operator it "caches" the generated manifests (with istiod.replicas=1)
// 2. HPA autoscales our pilot replicas to 3
// 3. Set values.pilot.autoscaleEnabled=false
// 4. The generated manifests (with istiod.replicas=1) is same as istio operator "cache",
// the deployment will not get updated unless istio operator is restarted.
if inPathParts[len(inPathParts)-1] == "ReplicaCount" {
if skipReplicaCountWithAutoscaleEnabled(iop, componentName) {
continue
}
}
// strategic merge overlay m to the base object oo
mergedObj, err := MergeK8sObject(oo, m, path[1:])
if err != nil {
return "", err
}
// Apply the workaround for merging service ports with (port,protocol) composite
// keys instead of just the merging by port.
if inPathParts[len(inPathParts)-1] == "Service" {
if msvc, ok := m.(*v1alpha1.ServiceSpec); ok {
mergedObj, err = t.fixMergedObjectWithCustomServicePortOverlay(oo, msvc, mergedObj)
if err != nil {
return "", err
}
}
}
// Update the original object in objects slice, since the output should be ordered.
*(om[pe]) = *mergedObj
}
if objects != nil {
return objects.YAMLManifest()
}
return yml, nil
}
var componentToAutoScaleEnabledPath = map[name.ComponentName]string{
name.PilotComponentName: "pilot.autoscaleEnabled",
name.IngressComponentName: "gateways.istio-ingressgateway.autoscaleEnabled",
name.EgressComponentName: "gateways.istio-egressgateway.autoscaleEnabled",
}
// checkDeprecatedHPAFields is a helper function to check for the deprecated fields usage in HorizontalPodAutoscalerSpec
func checkDeprecatedHPAFields(iop *v1alpha1.IstioOperatorSpec) bool {
hpaSpecs := []*v1alpha1.HorizontalPodAutoscalerSpec{}
if iop.GetComponents().GetPilot().GetK8S().GetHpaSpec() != nil {
hpaSpecs = append(hpaSpecs, iop.GetComponents().GetPilot().GetK8S().GetHpaSpec())
}
for _, gwSpec := range iop.GetComponents().GetIngressGateways() {
if gwSpec.Name == defaultIngressGWName && gwSpec.GetK8S().GetHpaSpec() != nil {
hpaSpecs = append(hpaSpecs, gwSpec.GetK8S().GetHpaSpec())
}
}
for _, gwSpec := range iop.GetComponents().GetEgressGateways() {
if gwSpec.Name == defaultEgressGWName && gwSpec.GetK8S().GetHpaSpec() != nil {
hpaSpecs = append(hpaSpecs, gwSpec.GetK8S().GetHpaSpec())
}
}
for _, hpaSpec := range hpaSpecs {
if hpaSpec.GetMetrics() != nil {
for _, me := range hpaSpec.GetMetrics() {
// nolint: staticcheck
if me.GetObject().GetMetricName() != "" || me.GetObject().GetAverageValue() != nil ||
// nolint: staticcheck
me.GetObject().GetSelector() != nil || me.GetObject().GetTargetValue() != nil {
return true
}
// nolint: staticcheck
if me.GetPods().GetMetricName() != "" || me.GetPods().GetSelector() != nil ||
// nolint: staticcheck
me.GetPods().GetTargetAverageValue() != nil {
return true
}
// nolint: staticcheck
if me.GetResource().GetTargetAverageValue() != nil || me.GetResource().GetTargetAverageUtilization() != 0 {
return true
}
// nolint: staticcheck
if me.GetExternal().GetTargetAverageValue() != nil || me.GetExternal().GetTargetValue() != nil ||
// nolint: staticcheck
me.GetExternal().GetMetricName() != "" || me.GetExternal().GetMetricSelector() != nil {
return true
}
}
}
}
return false
}
// translateDeprecatedAutoscalingFields checks for existence of deprecated HPA fields, if found, set values.global.autoscalingv2API to false
// It only needs to run the logic for the first component because we are setting the values.global field instead of per component ones.
// we do not set per component values because we may want to avoid mixture of v2 and v2beta1 autoscaling templates usage
func (t *Translator) translateDeprecatedAutoscalingFields(values map[string]any, iop *v1alpha1.IstioOperatorSpec) error {
if t.checkedDeprecatedAutoscalingFields || checkDeprecatedHPAFields(iop) {
path := util.PathFromString("global.autoscalingv2API")
if err := tpath.WriteNode(values, path, false); err != nil {
return fmt.Errorf("failed to set autoscalingv2API path: %v", err)
}
t.checkedDeprecatedAutoscalingFields = true
}
return nil
}
func skipReplicaCountWithAutoscaleEnabled(iop *v1alpha1.IstioOperatorSpec, componentName name.ComponentName) bool {
values := iop.GetValues().AsMap()
path, ok := componentToAutoScaleEnabledPath[componentName]
if !ok {
return false
}
enabledVal, found, err := tpath.GetFromStructPath(values, path)
if err != nil || !found {
return false
}
enabled, ok := enabledVal.(bool)
return ok && enabled
}
func (t *Translator) fixMergedObjectWithCustomServicePortOverlay(oo *object.K8sObject,
msvc *v1alpha1.ServiceSpec, mergedObj *object.K8sObject,
) (*object.K8sObject, error) {
var basePorts []*v1.ServicePort
bps, _, err := unstructured.NestedSlice(oo.Unstructured(), "spec", "ports")
if err != nil {
return nil, err
}
bby, err := json.Marshal(bps)
if err != nil {
return nil, err
}
if err = json.Unmarshal(bby, &basePorts); err != nil {
return nil, err
}
overlayPorts := make([]*v1.ServicePort, 0, len(msvc.GetPorts()))
for _, p := range msvc.GetPorts() {
var pr v1.Protocol
switch strings.ToLower(p.GetProtocol()) {
case "udp":
pr = v1.ProtocolUDP
default:
pr = v1.ProtocolTCP
}
port := &v1.ServicePort{
Name: p.GetName(),
Protocol: pr,
Port: p.GetPort(),
NodePort: p.GetNodePort(),
}
if p.GetAppProtocol() != "" {
ap := p.AppProtocol
port.AppProtocol = &ap
}
if p.TargetPort != nil {
port.TargetPort = p.TargetPort.ToKubernetes()
}
overlayPorts = append(overlayPorts, port)
}
mergedPorts := strategicMergePorts(basePorts, overlayPorts)
mpby, err := json.Marshal(mergedPorts)
if err != nil {
return nil, err
}
var mergedPortSlice []any
if err = json.Unmarshal(mpby, &mergedPortSlice); err != nil {
return nil, err
}
if err = unstructured.SetNestedSlice(mergedObj.Unstructured(), mergedPortSlice, "spec", "ports"); err != nil {
return nil, err
}
// Now fix the merged object
mjsonby, err := json.Marshal(mergedObj.Unstructured())
if err != nil {
return nil, err
}
if mergedObj, err = object.ParseJSONToK8sObject(mjsonby); err != nil {
return nil, err
}
return mergedObj, nil
}
type portWithProtocol struct {
port int32
protocol v1.Protocol
}
func portIndexOf(element portWithProtocol, data []portWithProtocol) int {
for k, v := range data {
if element == v {
return k
}
}
return len(data)
}
// strategicMergePorts merges the base with the given overlay considering both
// port and the protocol as the merge keys. This is a workaround for the strategic
// merge patch in Kubernetes which only uses port number as the key. This causes
// an issue when we have to expose the same port with different protocols.
// See - https://github.com/kubernetes/kubernetes/issues/103544
// TODO(su225): Remove this once the above issue is addressed in Kubernetes
func strategicMergePorts(base, overlay []*v1.ServicePort) []*v1.ServicePort {
// We want to keep the original port order with base first and then the newly
// added ports through the overlay. This is because there are some cases where
// port order actually matters. For instance, some cloud load balancers use the
// first port for health-checking (in Istio it is 15021). So we must keep maintain
// it in order not to break the users
// See - https://github.com/istio/istio/issues/12503 for more information
//
// Or changing port order might generate weird diffs while upgrading or changing
// IstioOperator spec. It is annoying. So better maintain original order while
// appending newly added ports through overlay.
portPriority := make([]portWithProtocol, 0, len(base)+len(overlay))
for _, p := range base {
if p.Protocol == "" {
p.Protocol = v1.ProtocolTCP
}
portPriority = append(portPriority, portWithProtocol{port: p.Port, protocol: p.Protocol})
}
for _, p := range overlay {
if p.Protocol == "" {
p.Protocol = v1.ProtocolTCP
}
portPriority = append(portPriority, portWithProtocol{port: p.Port, protocol: p.Protocol})
}
sortFn := func(ps []*v1.ServicePort) func(int, int) bool {
return func(i, j int) bool {
pi := portIndexOf(portWithProtocol{port: ps[i].Port, protocol: ps[i].Protocol}, portPriority)
pj := portIndexOf(portWithProtocol{port: ps[j].Port, protocol: ps[j].Protocol}, portPriority)
return pi < pj
}
}
if overlay == nil {
sort.Slice(base, sortFn(base))
return base
}
if base == nil {
sort.Slice(overlay, sortFn(overlay))
return overlay
}
// first add the base and then replace appropriate
// keys with the items in the overlay list
merged := make(map[portWithProtocol]*v1.ServicePort)
for _, p := range base {
key := portWithProtocol{port: p.Port, protocol: p.Protocol}
merged[key] = p
}
for _, p := range overlay {
key := portWithProtocol{port: p.Port, protocol: p.Protocol}
merged[key] = p
}
res := make([]*v1.ServicePort, 0, len(merged))
for _, pv := range merged {
res = append(res, pv)
}
sort.Slice(res, sortFn(res))
return res
}
// ProtoToValues traverses the supplied IstioOperatorSpec and returns a values.yaml translation from it.
func (t *Translator) ProtoToValues(ii *v1alpha1.IstioOperatorSpec) (string, error) {
root, err := t.ProtoToHelmValues2(ii)
if err != nil {
return "", err
}
// Special additional handling not covered by simple translation rules.
if err := t.setComponentProperties(root, ii); err != nil {
return "", err
}
// Special handling of the settings of legacy fields in autoscaling/v2beta1
if err := t.translateDeprecatedAutoscalingFields(root, ii); err != nil {
return "", err
}
// Return blank string for empty case.
if len(root) == 0 {
return "", nil
}
y, err := yaml.Marshal(root)
if err != nil {
return "", err
}
return string(y), nil
}
// Fields, beyond 'global', that apply to each chart at the top level of values.yaml
var topLevelFields = sets.New(
"ownerName",
"revision",
"compatibilityVersion",
"profile",
)
// TranslateHelmValues creates a Helm values.yaml config data tree from iop using the given translator.
func (t *Translator) TranslateHelmValues(iop *v1alpha1.IstioOperatorSpec, componentsSpec any, componentName name.ComponentName) (string, error) {
apiVals := make(map[string]any)
// First, translate the IstioOperator API to helm Values.
apiValsStr, err := t.ProtoToValues(iop)
if err != nil {
return "", err
}
err = yaml.Unmarshal([]byte(apiValsStr), &apiVals)
if err != nil {
return "", err
}
scope.Debugf("Values translated from IstioOperator API:\n%s", apiValsStr)
// Add global overlay from IstioOperatorSpec.Values/UnvalidatedValues.
globalVals := iop.GetValues().AsMap()
globalUnvalidatedVals := iop.GetUnvalidatedValues().AsMap()
if scope.DebugEnabled() {
scope.Debugf("Values from IstioOperatorSpec.Values:\n%s", util.ToYAML(globalVals))
scope.Debugf("Values from IstioOperatorSpec.UnvalidatedValues:\n%s", util.ToYAML(globalUnvalidatedVals))
}
mergedVals, err := util.OverlayTrees(apiVals, globalVals)
if err != nil {
return "", err
}
mergedVals, err = util.OverlayTrees(mergedVals, globalUnvalidatedVals)
if err != nil {
return "", err
}
c, f := t.ComponentMaps[componentName]
if f && c.FlattenValues {
globals, ok := mergedVals["global"].(map[string]any)
if !ok {
return "", fmt.Errorf("global value isn't a map")
}
components, ok := mergedVals[c.ToHelmValuesTreeRoot].(map[string]any)
if !ok {
return "", fmt.Errorf("component value isn't a map")
}
finalVals := map[string]any{}
// strip out anything from the original apiVals which are a map[string]any but populate other top-level fields
for k, v := range apiVals {
_, isMap := v.(map[string]any)
if !isMap {
finalVals[k] = v
}
}
for k := range topLevelFields {
if v, f := mergedVals[k]; f {
finalVals[k] = v
}
}
for k, v := range globals {
finalVals[k] = v
}
for k, v := range components {
finalVals[k] = v
}
mergedVals = finalVals
}
mergedYAML, err := yaml.Marshal(mergedVals)
if err != nil {
return "", err
}
mergedYAML, err = applyGatewayTranslations(mergedYAML, componentName, componentsSpec)
if err != nil {
return "", err
}
return string(mergedYAML), err
}
// applyGatewayTranslations writes gateway name gwName at the appropriate values path in iop and maps k8s.service.ports
// to values. It returns the resulting YAML tree.
func applyGatewayTranslations(iop []byte, componentName name.ComponentName, componentSpec any) ([]byte, error) {
if !componentName.IsGateway() {
return iop, nil
}
iopt := make(map[string]any)
if err := yaml.Unmarshal(iop, &iopt); err != nil {
return nil, err
}
gwSpec := componentSpec.(*v1alpha1.GatewaySpec)
k8s := gwSpec.K8S
switch componentName {
case name.IngressComponentName:
setYAMLNodeByMapPath(iopt, util.PathFromString("gateways.istio-ingressgateway.name"), gwSpec.Name)
if len(gwSpec.Label) != 0 {
setYAMLNodeByMapPath(iopt, util.PathFromString("gateways.istio-ingressgateway.labels"), gwSpec.Label)
}
if k8s != nil && k8s.Service != nil && k8s.Service.Ports != nil {
setYAMLNodeByMapPath(iopt, util.PathFromString("gateways.istio-ingressgateway.ports"), k8s.Service.Ports)
}
case name.EgressComponentName:
setYAMLNodeByMapPath(iopt, util.PathFromString("gateways.istio-egressgateway.name"), gwSpec.Name)
if len(gwSpec.Label) != 0 {
setYAMLNodeByMapPath(iopt, util.PathFromString("gateways.istio-egressgateway.labels"), gwSpec.Label)
}
if k8s != nil && k8s.Service != nil && k8s.Service.Ports != nil {
setYAMLNodeByMapPath(iopt, util.PathFromString("gateways.istio-egressgateway.ports"), k8s.Service.Ports)
}
}
return yaml.Marshal(iopt)
}
// setYAMLNodeByMapPath sets the value at the given path to val in treeNode. The path cannot traverse lists and
// treeNode must be a YAML tree unmarshaled into a plain map data structure.
func setYAMLNodeByMapPath(treeNode any, path util.Path, val any) {
if len(path) == 0 || treeNode == nil {
return
}
pe := path[0]
switch nt := treeNode.(type) {
case map[any]any:
if len(path) == 1 {
nt[pe] = val
return
}
if nt[pe] == nil {
return
}
setYAMLNodeByMapPath(nt[pe], path[1:], val)
case map[string]any:
if len(path) == 1 {
nt[pe] = val
return
}
if nt[pe] == nil {
return
}
setYAMLNodeByMapPath(nt[pe], path[1:], val)
}
}
// ComponentMap returns a ComponentMaps struct ptr for the given component name if one exists.
// If the name of the component is lower case, the function will use the capitalized version
// of the name.
func (t *Translator) ComponentMap(cns string) *ComponentMaps {
cn := name.TitleCase(name.ComponentName(cns))
return t.ComponentMaps[cn]
}
func (t *Translator) ProtoToHelmValues2(ii *v1alpha1.IstioOperatorSpec) (map[string]any, error) {
by, err := json.Marshal(ii)
if err != nil {
return nil, err
}
res := map[string]any{}
err = json.Unmarshal(by, &res)
if err != nil {
return nil, err
}
r2 := map[string]any{}
errs := t.ProtoToHelmValues(res, r2, nil)
return r2, errs.ToError()
}
// ProtoToHelmValues function below is used by third party for integrations and has to be public
// ProtoToHelmValues takes an interface which must be a struct ptr and recursively iterates through all its fields.
// For each leaf, if looks for a mapping from the struct data path to the corresponding YAML path and if one is
// found, it calls the associated mapping function if one is defined to populate the values YAML path.
// If no mapping function is defined, it uses the default mapping function.
func (t *Translator) ProtoToHelmValues(node any, root map[string]any, path util.Path) (errs util.Errors) {
scope.Debugf("ProtoToHelmValues with path %s, %v (%T)", path, node, node)
if util.IsValueNil(node) {
return nil
}
vv := reflect.ValueOf(node)
vt := reflect.TypeOf(node)
switch vt.Kind() {
case reflect.Ptr:
if !util.IsNilOrInvalidValue(vv.Elem()) {
errs = util.AppendErrs(errs, t.ProtoToHelmValues(vv.Elem().Interface(), root, path))
}
case reflect.Struct:
scope.Debug("Struct")
for i := 0; i < vv.NumField(); i++ {
fieldName := vv.Type().Field(i).Name
fieldValue := vv.Field(i)
scope.Debugf("Checking field %s", fieldName)
if a, ok := vv.Type().Field(i).Tag.Lookup("json"); ok && a == "-" {
continue
}
if !fieldValue.CanInterface() {
continue
}
errs = util.AppendErrs(errs, t.ProtoToHelmValues(fieldValue.Interface(), root, append(path, fieldName)))
}
case reflect.Map:
scope.Debug("Map")
for _, key := range vv.MapKeys() {
nnp := append(path, key.String())
errs = util.AppendErrs(errs, t.insertLeaf(root, nnp, vv.MapIndex(key)))
}
case reflect.Slice:
scope.Debug("Slice")
for i := 0; i < vv.Len(); i++ {
errs = util.AppendErrs(errs, t.ProtoToHelmValues(vv.Index(i).Interface(), root, path))
}
default:
// Must be a leaf
scope.Debugf("field has kind %s", vt.Kind())
if vv.CanInterface() {
errs = util.AppendErrs(errs, t.insertLeaf(root, path, vv))
}
}
return errs
}
// setComponentProperties translates properties (e.g., enablement and namespace) of each component
// in the baseYAML values tree, based on feature/component inheritance relationship.
func (t *Translator) setComponentProperties(root map[string]any, iop *v1alpha1.IstioOperatorSpec) error {
var keys []string
for k := range t.ComponentMaps {
if k != name.IngressComponentName && k != name.EgressComponentName {
keys = append(keys, string(k))
}
}
sort.Strings(keys)
l := len(keys)
for i := l - 1; i >= 0; i-- {
cn := name.ComponentName(keys[i])
c := t.ComponentMaps[cn]
e, err := t.IsComponentEnabled(cn, iop)
if err != nil {
return err
}
enablementPath := c.ToHelmValuesTreeRoot
// CNI calls itself "cni" in the chart but "istio_cni" for enablement outside of the chart.
if cn == name.CNIComponentName {
enablementPath = "istio_cni"
}
if err := tpath.WriteNode(root, util.PathFromString(enablementPath+"."+HelmValuesEnabledSubpath), e); err != nil {
return err
}
ns, err := name.Namespace(cn, iop)
if err != nil {
return err
}
if err := tpath.WriteNode(root, util.PathFromString(c.ToHelmValuesTreeRoot+"."+HelmValuesNamespaceSubpath), ns); err != nil {
return err
}
hub, found, _ := tpath.GetFromStructPath(iop, "Components."+string(cn)+".Hub")
// Unmarshal unfortunately creates struct fields with "" for unset values. Skip these cases to avoid
// overwriting current value with an empty string.
hubStr, ok := hub.(string)
if found && !(ok && hubStr == "") {
if err := tpath.WriteNode(root, util.PathFromString(c.ToHelmValuesTreeRoot+"."+HelmValuesHubSubpath), hub); err != nil {
return err
}
}
tag, found, _ := tpath.GetFromStructPath(iop, "Components."+string(cn)+".Tag")
tagv, ok := tag.(*structpb.Value)
if found && !(ok && util.ValueString(tagv) == "") {
if err := tpath.WriteNode(root, util.PathFromString(c.ToHelmValuesTreeRoot+"."+HelmValuesTagSubpath), util.ValueString(tagv)); err != nil {
return err
}
}
}
for cn, gns := range t.GlobalNamespaces {
ns, err := name.Namespace(cn, iop)
if err != nil {
return err
}
if err := tpath.WriteNode(root, util.PathFromString("global."+gns), ns); err != nil {
return err
}
}
return nil
}
// IsComponentEnabled reports whether the component with name cn is enabled, according to the translations in t,
// and the contents of ocp.
func (t *Translator) IsComponentEnabled(cn name.ComponentName, iop *v1alpha1.IstioOperatorSpec) (bool, error) {
if t.ComponentMaps[cn] == nil {
return false, nil
}
return IsComponentEnabledInSpec(cn, iop)
}
// insertLeaf inserts a leaf with value into root at path, which is first mapped using t.APIMapping.
func (t *Translator) insertLeaf(root map[string]any, path util.Path, value reflect.Value) (errs util.Errors) {
// Must be a scalar leaf. See if we have a mapping.
valuesPath, m := getValuesPathMapping(t.APIMapping, path)
var v any
if value.Kind() == reflect.Ptr {
v = value.Elem().Interface()
} else {
v = value.Interface()
}
switch {
case m == nil:
break
case m.translationFunc == nil:
// Use default translation which just maps to a different part of the tree.
errs = util.AppendErr(errs, defaultTranslationFunc(m, root, valuesPath, v))
default:
// Use a custom translation function.
errs = util.AppendErr(errs, m.translationFunc(m, root, valuesPath, v))
}
return errs
}
// getValuesPathMapping tries to map path against the passed in mappings with a longest prefix match. If a matching prefix
// is found, it returns the translated YAML path and the corresponding translation.
// e.g. for mapping "a.b" -> "1.2", the input path "a.b.c.d" would yield "1.2.c.d".
func getValuesPathMapping(mappings map[string]*Translation, path util.Path) (string, *Translation) {
p := path
var m *Translation
for ; len(p) > 0; p = p[0 : len(p)-1] {
m = mappings[p.String()]
if m != nil {
break
}
}
if m == nil {
return "", nil
}
if m.OutPath == "" {
return "", m
}
out := m.OutPath + "." + path[len(p):].String()
scope.Debugf("translating %s to %s", path, out)
return out, m
}
// renderFeatureComponentPathTemplate renders a template of the form <path>{{.ComponentName}}<path> with
// the supplied parameters.
func renderFeatureComponentPathTemplate(tmpl string, componentName name.ComponentName) (string, error) {
type Temp struct {
ComponentName name.ComponentName
}
ts := Temp{
ComponentName: componentName,
}
return util.RenderTemplate(tmpl, ts)
}
// renderResourceComponentPathTemplate renders a template of the form <path>{{.ResourceName}}<path>{{.ContainerName}}<path> with
// the supplied parameters.
func (t *Translator) renderResourceComponentPathTemplate(tmpl string, componentName name.ComponentName,
resourceName, revision string,
) (string, error) {
cn := string(componentName)
cmp := t.ComponentMap(cn)
if cmp == nil {
return "", fmt.Errorf("component: %s does not exist in the componentMap", cn)
}
if resourceName == "" {
resourceName = cmp.ResourceName
}
// The istiod resource will be istiod-<REVISION>, so we need to append the revision suffix
if revision != "" && resourceName == "istiod" {
resourceName += "-" + revision
}
ts := struct {
ResourceType string
ResourceName string
ContainerName string
}{
ResourceType: cmp.ResourceType,
ResourceName: resourceName,
ContainerName: cmp.ContainerName,
}
return util.RenderTemplate(tmpl, ts)
}
// defaultTranslationFunc is the default translation to values. It maps a Go data path into a YAML path.
func defaultTranslationFunc(m *Translation, root map[string]any, valuesPath string, value any) error {
var path []string
if util.IsEmptyString(value) {
scope.Debugf("Skip empty string value for path %s", m.OutPath)
return nil
}
if valuesPath == "" {
scope.Debugf("Not mapping to values, resources path is %s", m.OutPath)
return nil
}
for _, p := range util.PathFromString(valuesPath) {
path = append(path, firstCharToLower(p))
}
return tpath.WriteNode(root, path, value)
}
func firstCharToLower(s string) string {
return strings.ToLower(s[0:1]) + s[1:]
}
// MergeK8sObject function below is used by third party for integrations and has to be public
// MergeK8sObject does strategic merge for overlayNode on the base object.
func MergeK8sObject(base *object.K8sObject, overlayNode any, path util.Path) (*object.K8sObject, error) {
overlay, err := createPatchObjectFromPath(overlayNode, path)
if err != nil {
return nil, err
}
overlayYAML, err := yaml.Marshal(overlay)
if err != nil {
return nil, err
}
overlayJSON, err := yaml.YAMLToJSON(overlayYAML)
if err != nil {
return nil, fmt.Errorf("yamlToJSON error in overlayYAML: %s\n%s", err, overlayYAML)
}
baseJSON, err := base.JSON()
if err != nil {
return nil, err
}
// get a versioned object from the scheme, we can use the strategic patching mechanism
// (i.e. take advantage of patchStrategy in the type)
versionedObject, err := scheme.Scheme.New(base.GroupVersionKind())
if err != nil {
return nil, err
}
// strategic merge patch
newBytes, err := strategicpatch.StrategicMergePatch(baseJSON, overlayJSON, versionedObject)
if err != nil {
return nil, fmt.Errorf("get error: %s to merge patch:\n%s for base:\n%s", err, overlayJSON, baseJSON)
}
newObj, err := object.ParseJSONToK8sObject(newBytes)
if err != nil {
return nil, err
}
return newObj.ResolveK8sConflict(), nil
}
// createPatchObjectFromPath constructs patch object for node with path, returns nil object and error if the path is invalid.
// e.g. node:
// - name: NEW_VAR
// value: new_value
//
// and path:
//
// spec.template.spec.containers.[name:discovery].env
// will construct the following patch object:
// spec:
// template:
// spec:
// containers:
// - name: discovery
// env:
// - name: NEW_VAR
// value: new_value
func createPatchObjectFromPath(node any, path util.Path) (map[string]any, error) {
if len(path) == 0 {
return nil, fmt.Errorf("empty path %s", path)
}
if util.IsKVPathElement(path[0]) {
return nil, fmt.Errorf("path %s has an unexpected first element %s", path, path[0])
}
length := len(path)
if util.IsKVPathElement(path[length-1]) {
return nil, fmt.Errorf("path %s has an unexpected last element %s", path, path[length-1])
}
patchObj := make(map[string]any)
var currentNode, nextNode any
nextNode = patchObj
for i, pe := range path {
currentNode = nextNode
// last path element
if i == length-1 {
currentNode, ok := currentNode.(map[string]any)
if !ok {
return nil, fmt.Errorf("path %s has an unexpected non KV element %s", path, pe)
}
currentNode[pe] = node
break
}
if util.IsKVPathElement(pe) {
currentNode, ok := currentNode.([]any)
if !ok {
return nil, fmt.Errorf("path %s has an unexpected KV element %s", path, pe)
}
k, v, err := util.PathKV(pe)
if err != nil {
return nil, err
}
if k == "" || v == "" {
return nil, fmt.Errorf("path %s has an invalid KV element %s", path, pe)
}
currentNode[0] = map[string]any{k: v}
nextNode = currentNode[0]
continue
}
currentNode, ok := currentNode.(map[string]any)
if !ok {
return nil, fmt.Errorf("path %s has an unexpected non KV element %s", path, pe)
}
// next path element determines the next node type
if util.IsKVPathElement(path[i+1]) {
currentNode[pe] = make([]any, 1)
} else {
currentNode[pe] = make(map[string]any)
}
nextNode = currentNode[pe]
}
return patchObj, nil
}
// IOPStoIOP takes an IstioOperatorSpec and returns a corresponding IstioOperator with the given name and namespace.
func IOPStoIOP(iops proto.Message, name, namespace string) (*iopv1alpha1.IstioOperator, error) {
iopStr, err := IOPStoIOPstr(iops, name, namespace)
if err != nil {
return nil, err
}
iop, err := istio.UnmarshalIstioOperator(iopStr, false)
if err != nil {
return nil, err
}
return iop, nil
}
// IOPStoIOPstr takes an IstioOperatorSpec and returns a corresponding IstioOperator string with the given name and namespace.
func IOPStoIOPstr(iops proto.Message, name, namespace string) (string, error) {
iopsStr, err := util.MarshalWithJSONPB(iops)
if err != nil {
return "", err
}
spec, err := tpath.AddSpecRoot(iopsStr)
if err != nil {
return "", err
}
tmpl := `
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
metadata:
namespace: {{ .Namespace }}
name: {{ .Name }}
`
// Passing into template causes reformatting, use simple concatenation instead.
tmpl += spec
type Temp struct {
Namespace string
Name string
}
ts := Temp{
Namespace: namespace,
Name: name,
}
return util.RenderTemplate(tmpl, ts)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package translate
import (
"fmt"
"github.com/golang/protobuf/ptypes/wrappers"
"istio.io/api/operator/v1alpha1"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/util"
)
// IsComponentEnabledInSpec reports whether the given component is enabled in the given spec.
// IsComponentEnabledInSpec assumes that controlPlaneSpec has been validated.
// TODO: remove extra validations when comfort level is high enough.
func IsComponentEnabledInSpec(componentName name.ComponentName, controlPlaneSpec *v1alpha1.IstioOperatorSpec) (bool, error) {
componentNodeI, found, err := tpath.GetFromStructPath(controlPlaneSpec, "Components."+string(componentName)+".Enabled")
if err != nil {
return false, fmt.Errorf("error in IsComponentEnabledInSpec GetFromStructPath componentEnabled for component=%s: %s",
componentName, err)
}
if !found || componentNodeI == nil {
return false, nil
}
componentNode, ok := componentNodeI.(*wrappers.BoolValue)
if !ok {
return false, fmt.Errorf("component %s enabled has bad type %T, expect *v1alpha1.BoolValueForPB", componentName, componentNodeI)
}
if componentNode == nil {
return false, nil
}
return componentNode.Value, nil
}
// IsComponentEnabledFromValue get whether component is enabled in helm value.yaml tree.
// valuePath points to component path in the values tree.
func IsComponentEnabledFromValue(cn name.ComponentName, valueSpec map[string]any) (enabled bool, pathExist bool, err error) {
t := NewTranslator()
cnMap, ok := t.ComponentMaps[cn]
if !ok {
return false, false, nil
}
valuePath := cnMap.ToHelmValuesTreeRoot
enabledPath := valuePath + ".enabled"
enableNodeI, found, err := tpath.Find(valueSpec, util.ToYAMLPath(enabledPath))
if err != nil {
return false, false, fmt.Errorf("error finding component enablement path: %s in helm value.yaml tree", enabledPath)
}
if !found {
// Some components do not specify enablement should be treated as enabled if the root node in the component subtree exists.
_, found, err := tpath.Find(valueSpec, util.ToYAMLPath(valuePath))
if err != nil {
return false, false, err
}
if found {
return true, false, nil
}
return false, false, nil
}
enableNode, ok := enableNodeI.(bool)
if !ok {
return false, true, fmt.Errorf("node at valuePath %s has bad type %T, expect bool", enabledPath, enableNodeI)
}
return enableNode, true, nil
}
// OverlayValuesEnablement overlays any enablement in values path from the user file overlay or set flag overlay.
// The overlay is translated from values to the corresponding addonComponents enablement paths.
func OverlayValuesEnablement(baseYAML, fileOverlayYAML, setOverlayYAML string) (string, error) {
overlayYAML, err := util.OverlayYAML(fileOverlayYAML, setOverlayYAML)
if err != nil {
return "", fmt.Errorf("could not overlay user config over base: %s", err)
}
return YAMLTree(overlayYAML, baseYAML, name.ValuesEnablementPathMap)
}
// GetEnabledComponents get all the enabled components from the given istio operator spec
func GetEnabledComponents(iopSpec *v1alpha1.IstioOperatorSpec) ([]string, error) {
var enabledComponents []string
if iopSpec.Components != nil {
for _, c := range name.AllCoreComponentNames {
enabled, err := IsComponentEnabledInSpec(c, iopSpec)
if err != nil {
return nil, fmt.Errorf("failed to check if component: %s is enabled or not: %v", string(c), err)
}
if enabled {
enabledComponents = append(enabledComponents, string(c))
}
}
for _, c := range iopSpec.Components.IngressGateways {
if c != nil && c.Enabled.GetValue() {
enabledComponents = append(enabledComponents, string(name.IngressComponentName))
break
}
}
for _, c := range iopSpec.Components.EgressGateways {
if c != nil && c.Enabled.GetValue() {
enabledComponents = append(enabledComponents, string(name.EgressComponentName))
break
}
}
}
return enabledComponents, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package translate
import (
"fmt"
"sort"
"strings"
"sigs.k8s.io/yaml"
"istio.io/api/operator/v1alpha1"
"istio.io/istio/operator/pkg/metrics"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/operator/pkg/version"
oversion "istio.io/istio/operator/version"
)
// ReverseTranslator is a set of mappings to translate between values.yaml and API paths, charts, k8s paths.
type ReverseTranslator struct {
Version version.MinorVersion
// APIMapping is Values.yaml path to API path mapping using longest prefix match. If the path is a non-leaf node,
// the output path is the matching portion of the path, plus any remaining output path.
APIMapping map[string]*Translation `yaml:"apiMapping,omitempty"`
// KubernetesPatternMapping defines mapping patterns from k8s resource paths to IstioOperator API paths.
KubernetesPatternMapping map[string]string `yaml:"kubernetesPatternMapping,omitempty"`
// KubernetesMapping defines actual k8s mappings generated from KubernetesPatternMapping before each translation.
KubernetesMapping map[string]*Translation `yaml:"kubernetesMapping,omitempty"`
// GatewayKubernetesMapping defines actual k8s mappings for gateway components generated from KubernetesPatternMapping before each translation.
GatewayKubernetesMapping gatewayKubernetesMapping `yaml:"GatewayKubernetesMapping,omitempty"`
// ValuesToComponentName defines mapping from value path to component name in API paths.
ValuesToComponentName map[string]name.ComponentName `yaml:"valuesToComponentName,omitempty"`
}
type gatewayKubernetesMapping struct {
IngressMapping map[string]*Translation
EgressMapping map[string]*Translation
}
var (
// Component enablement mapping. Ex "{{.ValueComponent}}.enabled": Components.{{.ComponentName}}.enabled}", nil},
componentEnablementPattern = "Components.{{.ComponentName}}.Enabled"
// specialComponentPath lists cases of component path of values.yaml we need to have special treatment.
specialComponentPath = map[string]bool{
"gateways": true,
"gateways.istio-ingressgateway": true,
"gateways.istio-egressgateway": true,
}
skipTranslate = map[name.ComponentName]bool{
name.IstioBaseComponentName: true,
name.IstioOperatorComponentName: true,
name.IstioOperatorCustomResourceName: true,
name.CNIComponentName: true,
name.IstiodRemoteComponentName: true,
name.ZtunnelComponentName: true,
}
gatewayPathMapping = map[string]name.ComponentName{
"gateways.istio-ingressgateway": name.IngressComponentName,
"gateways.istio-egressgateway": name.EgressComponentName,
}
)
// initAPIMapping generate the reverse mapping from original translator apiMapping.
func (t *ReverseTranslator) initAPIAndComponentMapping() {
ts := NewTranslator()
t.APIMapping = make(map[string]*Translation)
t.KubernetesMapping = make(map[string]*Translation)
t.ValuesToComponentName = make(map[string]name.ComponentName)
for valKey, outVal := range ts.APIMapping {
t.APIMapping[outVal.OutPath] = &Translation{valKey, nil}
}
for cn, cm := range ts.ComponentMaps {
// we use dedicated translateGateway for gateway instead
if !skipTranslate[cn] && !cm.SkipReverseTranslate && !cn.IsGateway() {
t.ValuesToComponentName[cm.ToHelmValuesTreeRoot] = cn
}
}
}
// initK8SMapping generates the k8s settings mapping for components that are enabled based on templates.
func (t *ReverseTranslator) initK8SMapping() error {
outputMapping := make(map[string]*Translation)
for valKey, componentName := range t.ValuesToComponentName {
for K8SValKey, outPathTmpl := range t.KubernetesPatternMapping {
newKey, err := renderComponentName(K8SValKey, valKey)
if err != nil {
return err
}
newVal, err := renderFeatureComponentPathTemplate(outPathTmpl, componentName)
if err != nil {
return err
}
outputMapping[newKey] = &Translation{newVal, nil}
}
}
t.KubernetesMapping = outputMapping
igwOutputMapping := make(map[string]*Translation)
egwOutputMapping := make(map[string]*Translation)
for valKey, componentName := range gatewayPathMapping {
mapping := igwOutputMapping
if componentName == name.EgressComponentName {
mapping = egwOutputMapping
}
for K8SValKey, outPathTmpl := range t.KubernetesPatternMapping {
newKey, err := renderComponentName(K8SValKey, valKey)
if err != nil {
return err
}
newP := util.PathFromString(outPathTmpl)
mapping[newKey] = &Translation{newP[len(newP)-2:].String(), nil}
}
}
t.GatewayKubernetesMapping = gatewayKubernetesMapping{IngressMapping: igwOutputMapping, EgressMapping: egwOutputMapping}
return nil
}
// NewReverseTranslator creates a new ReverseTranslator for minorVersion and returns a ptr to it.
func NewReverseTranslator() *ReverseTranslator {
rt := &ReverseTranslator{
KubernetesPatternMapping: map[string]string{
"{{.ValueComponentName}}.env": "Components.{{.ComponentName}}.K8s.Env",
"{{.ValueComponentName}}.autoscaleEnabled": "Components.{{.ComponentName}}.K8s.HpaSpec",
"{{.ValueComponentName}}.imagePullPolicy": "Components.{{.ComponentName}}.K8s.ImagePullPolicy",
"{{.ValueComponentName}}.nodeSelector": "Components.{{.ComponentName}}.K8s.NodeSelector",
"{{.ValueComponentName}}.tolerations": "Components.{{.ComponentName}}.K8s.Tolerations",
"{{.ValueComponentName}}.podDisruptionBudget": "Components.{{.ComponentName}}.K8s.PodDisruptionBudget",
"{{.ValueComponentName}}.podAnnotations": "Components.{{.ComponentName}}.K8s.PodAnnotations",
"{{.ValueComponentName}}.priorityClassName": "Components.{{.ComponentName}}.K8s.PriorityClassName",
"{{.ValueComponentName}}.readinessProbe": "Components.{{.ComponentName}}.K8s.ReadinessProbe",
"{{.ValueComponentName}}.replicaCount": "Components.{{.ComponentName}}.K8s.ReplicaCount",
"{{.ValueComponentName}}.resources": "Components.{{.ComponentName}}.K8s.Resources",
"{{.ValueComponentName}}.rollingMaxSurge": "Components.{{.ComponentName}}.K8s.Strategy",
"{{.ValueComponentName}}.rollingMaxUnavailable": "Components.{{.ComponentName}}.K8s.Strategy",
"{{.ValueComponentName}}.serviceAnnotations": "Components.{{.ComponentName}}.K8s.ServiceAnnotations",
},
}
rt.initAPIAndComponentMapping()
rt.Version = oversion.OperatorBinaryVersion.MinorVersion
return rt
}
// TranslateFromValueToSpec translates from values.yaml value to IstioOperatorSpec.
func (t *ReverseTranslator) TranslateFromValueToSpec(values []byte, force bool) (controlPlaneSpec *v1alpha1.IstioOperatorSpec, err error) {
yamlTree := make(map[string]any)
err = yaml.Unmarshal(values, &yamlTree)
if err != nil {
return nil, fmt.Errorf("error when unmarshalling into untype tree %v", err)
}
outputTree := make(map[string]any)
err = t.TranslateTree(yamlTree, outputTree, nil)
if err != nil {
return nil, err
}
outputVal, err := yaml.Marshal(outputTree)
if err != nil {
return nil, err
}
cpSpec := &v1alpha1.IstioOperatorSpec{}
err = util.UnmarshalWithJSONPB(string(outputVal), cpSpec, force)
if err != nil {
return nil, fmt.Errorf("error when unmarshalling into control plane spec %v, \nyaml:\n %s", err, outputVal)
}
return cpSpec, nil
}
// TranslateTree translates input value.yaml Tree to ControlPlaneSpec Tree.
func (t *ReverseTranslator) TranslateTree(valueTree map[string]any, cpSpecTree map[string]any, path util.Path) error {
// translate enablement and namespace
err := t.setEnablementFromValue(valueTree, cpSpecTree)
if err != nil {
return fmt.Errorf("error when translating enablement and namespace from value.yaml tree: %v", err)
}
// translate with api mapping
err = t.translateAPI(valueTree, cpSpecTree)
if err != nil {
return fmt.Errorf("error when translating value.yaml tree with global mapping: %v", err)
}
// translate with k8s mapping
if err := t.TranslateK8S(valueTree, cpSpecTree); err != nil {
return err
}
if err := t.translateGateway(valueTree, cpSpecTree); err != nil {
return fmt.Errorf("error when translating gateway with kubernetes mapping: %v", err.Error())
}
// translate remaining untranslated paths into component values
err = t.translateRemainingPaths(valueTree, cpSpecTree, nil)
if err != nil {
return fmt.Errorf("error when translating remaining path: %v", err)
}
return nil
}
// TranslateK8S is a helper function to translate k8s settings from values.yaml to IstioOperator, except for gateways.
func (t *ReverseTranslator) TranslateK8S(valueTree map[string]any, cpSpecTree map[string]any) error {
// translate with k8s mapping
if err := t.initK8SMapping(); err != nil {
return fmt.Errorf("error when initiating k8s mapping: %v", err)
}
if err := t.translateK8sTree(valueTree, cpSpecTree, t.KubernetesMapping); err != nil {
return fmt.Errorf("error when translating value.yaml tree with kubernetes mapping: %v", err)
}
return nil
}
// setEnablementFromValue translates the enablement value of components in the values.yaml
// tree, based on feature/component inheritance relationship.
func (t *ReverseTranslator) setEnablementFromValue(valueSpec map[string]any, root map[string]any) error {
for _, cni := range t.ValuesToComponentName {
enabled, pathExist, err := IsComponentEnabledFromValue(cni, valueSpec)
if err != nil {
return err
}
if !pathExist {
continue
}
tmpl := componentEnablementPattern
ceVal, err := renderFeatureComponentPathTemplate(tmpl, cni)
if err != nil {
return err
}
outCP := util.ToYAMLPath(ceVal)
// set component enablement
if err := tpath.WriteNode(root, outCP, enabled); err != nil {
return err
}
}
return nil
}
// WarningForGatewayK8SSettings creates deprecated warning messages
// when user try to set kubernetes settings for gateways via values api.
func (t *ReverseTranslator) WarningForGatewayK8SSettings(valuesOverlay string) (string, error) {
gwOverlay, err := tpath.GetConfigSubtree(valuesOverlay, "gateways")
if err != nil {
return "", fmt.Errorf("error getting gateways overlay from valuesOverlayYaml %v", err)
}
if gwOverlay == "" {
return "", nil
}
var deprecatedFields []string
for inPath := range t.GatewayKubernetesMapping.IngressMapping {
_, found, err := tpath.GetPathContext(valuesOverlay, util.ToYAMLPath(inPath), false)
if err != nil {
scope.Debug(err.Error())
continue
}
if found {
deprecatedFields = append(deprecatedFields, inPath)
}
}
for inPath := range t.GatewayKubernetesMapping.EgressMapping {
_, found, err := tpath.GetPathContext(valuesOverlay, util.ToYAMLPath(inPath), false)
if err != nil {
scope.Debug(err.Error())
continue
}
if found {
deprecatedFields = append(deprecatedFields, inPath)
}
}
if len(deprecatedFields) == 0 {
return "", nil
}
warningMessage := fmt.Sprintf("using deprecated values api paths: %s.\n"+
" please use k8s spec of gateway components instead\n", strings.Join(deprecatedFields, ","))
return warningMessage, nil
}
// translateGateway handles translation for gateways specific configuration
func (t *ReverseTranslator) translateGateway(valueSpec map[string]any, root map[string]any) error {
for inPath, outPath := range gatewayPathMapping {
enabled, pathExist, err := IsComponentEnabledFromValue(outPath, valueSpec)
if err != nil {
return err
}
if !pathExist && !enabled {
continue
}
gwSpecs := make([]map[string]any, 1)
gwSpec := make(map[string]any)
gwSpecs[0] = gwSpec
gwSpec["enabled"] = enabled
gwSpec["name"] = util.ToYAMLPath(inPath)[1]
outCP := util.ToYAMLPath("Components." + string(outPath))
if enabled {
mapping := t.GatewayKubernetesMapping.IngressMapping
if outPath == name.EgressComponentName {
mapping = t.GatewayKubernetesMapping.EgressMapping
}
err = t.translateK8sTree(valueSpec, gwSpec, mapping)
if err != nil {
return err
}
}
err = tpath.WriteNode(root, outCP, gwSpecs)
if err != nil {
return err
}
}
return nil
}
// TranslateK8SfromValueToIOP use reverse translation to convert k8s settings defined in values API to IOP API.
// this ensures that user overlays that set k8s through spec.values
// are not overridden by spec.components.X.k8s settings in the base profiles
func (t *ReverseTranslator) TranslateK8SfromValueToIOP(userOverlayYaml string) (string, error) {
valuesOverlay, err := tpath.GetConfigSubtree(userOverlayYaml, "spec.values")
if err != nil {
scope.Debugf("no spec.values section from userOverlayYaml %v", err)
return "", nil
}
valuesOverlayTree := make(map[string]any)
err = yaml.Unmarshal([]byte(valuesOverlay), &valuesOverlayTree)
if err != nil {
return "", fmt.Errorf("error unmarshalling values overlay yaml into untype tree %v", err)
}
iopSpecTree := make(map[string]any)
iopSpecOverlay, err := tpath.GetConfigSubtree(userOverlayYaml, "spec")
if err != nil {
return "", fmt.Errorf("error getting iop spec subtree from overlay yaml %v", err)
}
err = yaml.Unmarshal([]byte(iopSpecOverlay), &iopSpecTree)
if err != nil {
return "", fmt.Errorf("error unmarshalling spec overlay yaml into tree %v", err)
}
if err = t.TranslateK8S(valuesOverlayTree, iopSpecTree); err != nil {
return "", err
}
warning, err := t.WarningForGatewayK8SSettings(valuesOverlay)
if err != nil {
return "", fmt.Errorf("error handling values gateway k8s settings: %v", err)
}
if warning != "" {
return "", fmt.Errorf(warning)
}
iopSpecTreeYAML, err := yaml.Marshal(iopSpecTree)
if err != nil {
return "", fmt.Errorf("error marshaling reverse translated tree %v", err)
}
iopTreeYAML, err := tpath.AddSpecRoot(string(iopSpecTreeYAML))
if err != nil {
return "", fmt.Errorf("error adding spec root: %v", err)
}
// overlay the reverse translated iopTreeYAML back to userOverlayYaml
finalYAML, err := util.OverlayYAML(userOverlayYaml, iopTreeYAML)
if err != nil {
return "", fmt.Errorf("failed to overlay the reverse translated iopTreeYAML: %v", err)
}
return finalYAML, err
}
// translateStrategy translates Deployment Strategy related configurations from helm values.yaml tree.
func translateStrategy(fieldName string, outPath string, value any, cpSpecTree map[string]any) error {
fieldMap := map[string]string{
"rollingMaxSurge": "maxSurge",
"rollingMaxUnavailable": "maxUnavailable",
}
newFieldName, ok := fieldMap[fieldName]
if !ok {
return fmt.Errorf("expected field name found in values.yaml: %s", fieldName)
}
outPath += ".rollingUpdate." + newFieldName
scope.Debugf("path has value in helm Value.yaml tree, mapping to output path %s", outPath)
if err := tpath.WriteNode(cpSpecTree, util.ToYAMLPath(outPath), value); err != nil {
return err
}
return nil
}
// translateEnv translates env value from helm values.yaml tree.
func translateEnv(outPath string, value any, cpSpecTree map[string]any) error {
envMap, ok := value.(map[string]any)
if !ok {
return fmt.Errorf("expect env node type to be map[string]interface{} but got: %T", value)
}
if len(envMap) == 0 {
return nil
}
scope.Debugf("path has value in helm Value.yaml tree, mapping to output path %s", outPath)
nc, found, _ := tpath.GetPathContext(cpSpecTree, util.ToYAMLPath(outPath), false)
var envValStr []byte
if nc != nil {
envValStr, _ = yaml.Marshal(nc.Node)
}
if !found || strings.TrimSpace(string(envValStr)) == "{}" {
scope.Debugf("path doesn't have value in k8s setting with output path %s, override with helm Value.yaml tree", outPath)
outEnv := make([]map[string]any, len(envMap))
keys := make([]string, 0, len(envMap))
for k := range envMap {
keys = append(keys, k)
}
sort.Strings(keys)
for i, k := range keys {
outEnv[i] = make(map[string]any)
outEnv[i]["name"] = k
outEnv[i]["value"] = fmt.Sprintf("%v", envMap[k])
}
if err := tpath.WriteNode(cpSpecTree, util.ToYAMLPath(outPath), outEnv); err != nil {
return err
}
} else {
scope.Debugf("path has value in k8s setting with output path %s, merge it with helm Value.yaml tree", outPath)
keys := make([]string, 0, len(envMap))
for k := range envMap {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
outEnv := make(map[string]any)
outEnv["name"] = k
outEnv["value"] = fmt.Sprintf("%v", envMap[k])
if err := tpath.MergeNode(cpSpecTree, util.ToYAMLPath(outPath), outEnv); err != nil {
return err
}
}
}
return nil
}
// translateK8sTree is internal method for translating K8s configurations from value.yaml tree.
func (t *ReverseTranslator) translateK8sTree(valueTree map[string]any,
cpSpecTree map[string]any, mapping map[string]*Translation,
) error {
for inPath, v := range mapping {
scope.Debugf("Checking for k8s path %s in helm Value.yaml tree", inPath)
path := util.PathFromString(inPath)
k8sSettingName := ""
if len(path) != 0 {
k8sSettingName = path[len(path)-1]
}
if k8sSettingName == "autoscaleEnabled" {
continue
}
m, found, err := tpath.Find(valueTree, util.ToYAMLPath(inPath))
if err != nil {
return err
}
if !found {
scope.Debugf("path %s not found in helm Value.yaml tree, skip mapping.", inPath)
continue
}
if mstr, ok := m.(string); ok && mstr == "" {
scope.Debugf("path %s is empty string, skip mapping.", inPath)
continue
}
// Zero int values are due to proto3 compiling to scalars rather than ptrs. Skip these because values of 0 are
// the default in destination fields and need not be set explicitly.
if mint, ok := util.ToIntValue(m); ok && mint == 0 {
scope.Debugf("path %s is int 0, skip mapping.", inPath)
continue
}
switch k8sSettingName {
case "env":
err := translateEnv(v.OutPath, m, cpSpecTree)
if err != nil {
return fmt.Errorf("error in translating k8s Env: %s", err)
}
case "rollingMaxSurge", "rollingMaxUnavailable":
err := translateStrategy(k8sSettingName, v.OutPath, m, cpSpecTree)
if err != nil {
return fmt.Errorf("error in translating k8s Strategy: %s", err)
}
default:
if util.IsValueNilOrDefault(m) {
continue
}
output := util.ToYAMLPath(v.OutPath)
scope.Debugf("path has value in helm Value.yaml tree, mapping to output path %s", output)
if err := tpath.WriteNode(cpSpecTree, output, m); err != nil {
return err
}
}
metrics.LegacyPathTranslationTotal.Increment()
if _, err := tpath.Delete(valueTree, util.ToYAMLPath(inPath)); err != nil {
return err
}
}
return nil
}
// translateRemainingPaths translates remaining paths that are not available in existing mappings.
func (t *ReverseTranslator) translateRemainingPaths(valueTree map[string]any,
cpSpecTree map[string]any, path util.Path,
) error {
for key, val := range valueTree {
newPath := append(path, key)
// value set to nil means no translation needed or being translated already.
if val == nil {
continue
}
switch node := val.(type) {
case map[string]any:
err := t.translateRemainingPaths(node, cpSpecTree, newPath)
if err != nil {
return err
}
case []any:
if err := tpath.WriteNode(cpSpecTree, util.ToYAMLPath("Values."+newPath.String()), node); err != nil {
return err
}
// remaining leaf need to be put into root.values
default:
if t.isEnablementPath(newPath) {
continue
}
if err := tpath.WriteNode(cpSpecTree, util.ToYAMLPath("Values."+newPath.String()), val); err != nil {
return err
}
}
}
return nil
}
// translateAPI is internal method for translating value.yaml tree based on API mapping.
func (t *ReverseTranslator) translateAPI(valueTree map[string]any,
cpSpecTree map[string]any,
) error {
for inPath, v := range t.APIMapping {
scope.Debugf("Checking for path %s in helm Value.yaml tree", inPath)
m, found, err := tpath.Find(valueTree, util.ToYAMLPath(inPath))
if err != nil {
return err
}
if !found {
scope.Debugf("path %s not found in helm Value.yaml tree, skip mapping.", inPath)
continue
}
if mstr, ok := m.(string); ok && mstr == "" {
scope.Debugf("path %s is empty string, skip mapping.", inPath)
continue
}
// Zero int values are due to proto3 compiling to scalars rather than ptrs. Skip these because values of 0 are
// the default in destination fields and need not be set explicitly.
if mint, ok := util.ToIntValue(m); ok && mint == 0 {
scope.Debugf("path %s is int 0, skip mapping.", inPath)
continue
}
path := util.ToYAMLPath(v.OutPath)
scope.Debugf("path has value in helm Value.yaml tree, mapping to output path %s", path)
metrics.LegacyPathTranslationTotal.
With(metrics.ResourceKindLabel.Value(inPath)).Increment()
if err := tpath.WriteNode(cpSpecTree, path, m); err != nil {
return err
}
if _, err := tpath.Delete(valueTree, util.ToYAMLPath(inPath)); err != nil {
return err
}
}
return nil
}
// isEnablementPath is helper function to check whether paths represent enablement of components in values.yaml
func (t *ReverseTranslator) isEnablementPath(path util.Path) bool {
if len(path) < 2 || path[len(path)-1] != "enabled" {
return false
}
pf := path[:len(path)-1].String()
if specialComponentPath[pf] {
return true
}
_, exist := t.ValuesToComponentName[pf]
return exist
}
// renderComponentName renders a template of the form <path>{{.ComponentName}}<path> with
// the supplied parameters.
func renderComponentName(tmpl string, componentName string) (string, error) {
type temp struct {
ValueComponentName string
}
return util.RenderTemplate(tmpl, temp{componentName})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package translate
import (
"gopkg.in/yaml.v2"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/util"
)
// YAMLTree takes an input tree inTreeStr, a partially constructed output tree outTreeStr, and a map of
// translations of source-path:dest-path in pkg/tpath format. It returns an output tree with paths from the input
// tree, translated and overlaid on the output tree.
func YAMLTree(inTreeStr, outTreeStr string, translations map[string]string) (string, error) {
inTree := make(map[string]any)
if err := yaml.Unmarshal([]byte(inTreeStr), &inTree); err != nil {
return "", err
}
outTree := make(map[string]any)
if err := yaml.Unmarshal([]byte(outTreeStr), &outTree); err != nil {
return "", err
}
for inPath, translation := range translations {
path := util.PathFromString(inPath)
node, found, err := tpath.Find(inTree, path)
if err != nil {
return "", err
}
if !found {
continue
}
if err := tpath.MergeNode(outTree, util.PathFromString(translation), node); err != nil {
return "", err
}
}
outYAML, err := yaml.Marshal(outTree)
if err != nil {
return "", err
}
return string(outYAML), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clog
import (
"fmt"
"io"
"os"
"istio.io/istio/pkg/log"
)
// Logger provides optional log taps for console and test buffer outputs.
type Logger interface {
LogAndPrint(v ...any)
LogAndError(v ...any)
LogAndFatal(a ...any)
LogAndPrintf(format string, a ...any)
LogAndErrorf(format string, a ...any)
LogAndFatalf(format string, a ...any)
Print(s string)
PrintErr(s string)
}
// ConsoleLogger is the struct used for mesh command
type ConsoleLogger struct {
stdOut io.Writer
stdErr io.Writer
scope *log.Scope
}
// NewConsoleLogger creates a new logger and returns a pointer to it.
// stdOut and stdErr can be used to capture output for testing. If scope is nil, the default scope is used.
func NewConsoleLogger(stdOut, stdErr io.Writer, scope *log.Scope) *ConsoleLogger {
s := scope
if s == nil {
s = log.RegisterScope(log.DefaultScopeName, log.DefaultScopeName)
}
return &ConsoleLogger{
stdOut: stdOut,
stdErr: stdErr,
scope: s,
}
}
// NewDefaultLogger creates a new logger that outputs to stdout/stderr at default scope.
func NewDefaultLogger() *ConsoleLogger {
return NewConsoleLogger(os.Stdout, os.Stderr, nil)
}
func (l *ConsoleLogger) LogAndPrint(v ...any) {
if len(v) == 0 {
return
}
s := fmt.Sprint(v...)
l.Print(s + "\n")
l.scope.Infof(s)
}
func (l *ConsoleLogger) LogAndError(v ...any) {
if len(v) == 0 {
return
}
s := fmt.Sprint(v...)
l.PrintErr(s + "\n")
l.scope.Infof(s)
}
func (l *ConsoleLogger) LogAndFatal(a ...any) {
l.LogAndError(a...)
os.Exit(-1)
}
func (l *ConsoleLogger) LogAndPrintf(format string, a ...any) {
s := fmt.Sprintf(format, a...)
l.Print(s + "\n")
l.scope.Infof(s)
}
func (l *ConsoleLogger) LogAndErrorf(format string, a ...any) {
s := fmt.Sprintf(format, a...)
l.PrintErr(s + "\n")
l.scope.Infof(s)
}
func (l *ConsoleLogger) LogAndFatalf(format string, a ...any) {
l.LogAndErrorf(format, a...)
os.Exit(-1)
}
func (l *ConsoleLogger) Print(s string) {
_, _ = l.stdOut.Write([]byte(s))
}
func (l *ConsoleLogger) PrintErr(s string) {
_, _ = l.stdErr.Write([]byte(s))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"net/url"
"strings"
"istio.io/istio/pkg/log"
)
var scope = log.RegisterScope("util", "util")
// IsFilePath reports whether the given URL is a local file path.
func IsFilePath(path string) bool {
return strings.Contains(path, "/") || strings.Contains(path, ".")
}
// IsHTTPURL checks whether the given URL is a HTTP URL.
func IsHTTPURL(path string) (bool, error) {
u, err := url.Parse(path)
valid := err == nil && u.Host != "" && (u.Scheme == "http" || u.Scheme == "https")
if strings.HasPrefix(path, "http") && !valid {
return false, fmt.Errorf("%s starts with http but is not a valid URL: %s", path, err)
}
return valid, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
)
const (
defaultSeparator = ", "
)
// Errors is a slice of error.
type Errors []error
// Error implements the error#Error method.
func (e Errors) Error() string {
return ToString(e, defaultSeparator)
}
// String implements the stringer#String method.
func (e Errors) String() string {
return e.Error()
}
// ToError returns an error from Errors.
func (e Errors) ToError() error {
if len(e) == 0 {
return nil
}
return fmt.Errorf("%s", e)
}
// Dedup removes any duplicated errors.
func (e Errors) Dedup() Errors {
logCountMap := make(map[string]int)
for _, ee := range e {
if ee == nil {
continue
}
item := ee.Error()
_, exist := logCountMap[item]
if exist {
logCountMap[item]++
} else {
logCountMap[item] = 1
}
}
var out Errors
for _, ee := range e {
item := ee.Error()
count := logCountMap[item]
if count == 0 {
continue
}
times := ""
if count > 1 {
times = fmt.Sprintf(" (repeated %d times)", count)
}
out = AppendErr(out, fmt.Errorf("%s%s", ee, times))
// reset seen log count
logCountMap[item] = 0
}
return out
}
// NewErrs returns a slice of error with a single element err.
// If err is nil, returns nil.
func NewErrs(err error) Errors {
if err == nil {
return nil
}
return []error{err}
}
// AppendErr appends err to errors if it is not nil and returns the result.
// If err is nil, it is not appended.
func AppendErr(errors []error, err error) Errors {
if err == nil {
if len(errors) == 0 {
return nil
}
return errors
}
return append(errors, err)
}
// AppendErrs appends newErrs to errors and returns the result.
// If newErrs is empty, nothing is appended.
func AppendErrs(errors []error, newErrs []error) Errors {
if len(newErrs) == 0 {
return errors
}
for _, e := range newErrs {
errors = AppendErr(errors, e)
}
if len(errors) == 0 {
return nil
}
return errors
}
// ToString returns a string representation of errors, with elements separated by separator string. Any nil errors in the
// slice are skipped.
func ToString(errors []error, separator string) string {
var out string
for i, e := range errors {
if e == nil {
continue
}
if i != 0 {
out += separator
}
out += e.Error()
}
return out
}
// EqualErrors reports whether a and b are equal, regardless of ordering.
func EqualErrors(a, b Errors) bool {
if len(a) != len(b) {
return false
}
m := make(map[string]bool)
for _, e := range b {
m[e.Error()] = true
}
for _, ea := range a {
if !m[ea.Error()] {
return false
}
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"context"
"fmt"
"strconv"
"github.com/prometheus/prometheus/util/strutil"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"istio.io/api/label"
iopv1alpha1 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/kube"
)
type JWTPolicy string
const (
FirstPartyJWT JWTPolicy = "first-party-jwt"
ThirdPartyJWT JWTPolicy = "third-party-jwt"
)
// DetectSupportedJWTPolicy queries the api-server to detect whether it has TokenRequest support
func DetectSupportedJWTPolicy(client kubernetes.Interface) (JWTPolicy, error) {
_, s, err := client.Discovery().ServerGroupsAndResources()
// This may fail if any api service is down. We should only fail if the specific API we care about failed
if err != nil {
if discovery.IsGroupDiscoveryFailedError(err) {
derr := err.(*discovery.ErrGroupDiscoveryFailed)
if _, f := derr.Groups[schema.GroupVersion{Group: "authentication.k8s.io", Version: "v1"}]; f {
return "", err
}
} else {
return "", err
}
}
for _, res := range s {
for _, api := range res.APIResources {
// Appearance of this API indicates we do support third party jwt token
if api.Name == "serviceaccounts/token" {
return ThirdPartyJWT, nil
}
}
}
return FirstPartyJWT, nil
}
// GKString differs from default representation of GroupKind
func GKString(gvk schema.GroupKind) string {
return fmt.Sprintf("%s/%s", gvk.Group, gvk.Kind)
}
// ValidateIOPCAConfig validates if the IstioOperator CA configs are applicable to the K8s cluster
func ValidateIOPCAConfig(client kube.Client, iop *iopv1alpha1.IstioOperator) error {
globalI := iop.Spec.Values.AsMap()["global"]
global, ok := globalI.(map[string]any)
if !ok {
// This means no explicit global configuration. Still okay
return nil
}
ca, ok := global["pilotCertProvider"].(string)
if !ok {
// This means the default pilotCertProvider is being used
return nil
}
if ca == "kubernetes" {
ver, err := client.GetKubernetesVersion()
if err != nil {
return fmt.Errorf("failed to determine support for K8s legacy signer. Use the --force flag to ignore this: %v", err)
}
if kube.IsAtLeastVersion(client, 22) {
return fmt.Errorf("configuration PILOT_CERT_PROVIDER=%s not supported in Kubernetes %v."+
"Please pick another value for PILOT_CERT_PROVIDER", ca, ver.String())
}
}
return nil
}
// CreateNamespace creates a namespace using the given k8s interface.
func CreateNamespace(cs kubernetes.Interface, namespace string, network string, dryRun bool) error {
if dryRun {
scope.Infof("Not applying Namespace %s because of dry run.", namespace)
return nil
}
if namespace == "" {
// Setup default namespace
namespace = constants.IstioSystemNamespace
}
// check if the namespace already exists. If yes, do nothing. If no, create a new one.
if _, err := cs.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Labels: map[string]string{},
}}
if network != "" {
ns.Labels[label.TopologyNetwork.Name] = network
}
_, err := cs.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create namespace %v: %v", namespace, err)
}
return nil
}
return fmt.Errorf("failed to check if namespace %v exists: %v", namespace, err)
}
return nil
}
func PrometheusPathAndPort(pod *v1.Pod) (string, int, error) {
path := "/metrics"
port := 9090
for key, val := range pod.ObjectMeta.Annotations {
switch strutil.SanitizeLabelName(key) {
case "prometheus_io_port":
p, err := strconv.Atoi(val)
if err != nil {
return "", 0, fmt.Errorf("failed to parse port from annotation: %v", err)
}
port = p
case "prometheus_io_path":
path = val
}
}
return path, port, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
)
// SetLabel is a helper function which sets the specified label and value on the specified object.
func SetLabel(resource runtime.Object, label, value string) error {
resourceAccessor, err := meta.Accessor(resource)
if err != nil {
return err
}
labels := resourceAccessor.GetLabels()
if labels == nil {
labels = map[string]string{}
}
labels[label] = value
resourceAccessor.SetLabels(labels)
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"strings"
"google.golang.org/protobuf/types/known/durationpb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/strategicpatch"
yaml2 "sigs.k8s.io/yaml"
v1alpha13 "istio.io/api/mesh/v1alpha1"
"istio.io/api/networking/v1alpha3"
"istio.io/api/operator/v1alpha1"
v1alpha12 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
)
// Partially mirrored from istio/api and operator/pkg/api (for values).
// Struct tags are required to use k8s strategic merge library. It would be possible
// to add these to source protos but because the values field is defined as
// map[string]interface{} here (and similar for MeshConfig in v1alpha1.Values)
// that alone would not be sufficient.
// Only non-scalar types require tags, therefore most fields are omitted here.
type iopMergeStructType struct {
metav1.ObjectMeta `json:"metadata" patchStrategy:"merge"`
Spec istioOperatorSpec `json:"spec" patchStrategy:"merge"`
}
type istioOperatorSpec struct {
MeshConfig *meshConfig `json:"meshConfig" patchStrategy:"merge"`
Components *istioComponentSetSpec `json:"components" patchStrategy:"merge"`
Values *values `json:"values" patchStrategy:"merge"`
}
type istioComponentSetSpec struct {
Base *baseComponentSpec `json:"base" patchStrategy:"merge"`
Pilot *componentSpec `json:"pilot" patchStrategy:"merge"`
Cni *componentSpec `json:"cni" patchStrategy:"merge"`
Ztunel *componentSpec `json:"ztunnel" patchStrategy:"merge"`
IstiodRemote *componentSpec `json:"istiodRemote" patchStrategy:"merge"`
IngressGateways []*gatewaySpec `json:"ingressGateways" patchStrategy:"merge" patchMergeKey:"name"`
EgressGateways []*gatewaySpec `json:"egressGateways" patchStrategy:"merge" patchMergeKey:"name"`
}
type baseComponentSpec struct {
K8S *v1alpha1.KubernetesResourcesSpec `json:"k8s" patchStrategy:"merge"`
}
type componentSpec struct {
K8S *v1alpha1.KubernetesResourcesSpec `json:"k8s" patchStrategy:"merge"`
}
type gatewaySpec struct {
Label map[string]string `json:"label" patchStrategy:"merge"`
K8S *v1alpha1.KubernetesResourcesSpec `json:"k8s" patchStrategy:"merge"`
}
type values struct {
Cni *v1alpha12.CNIConfig `json:"cni" patchStrategy:"merge"`
Gateways *gatewaysConfig `json:"gateways" patchStrategy:"merge"`
Global *v1alpha12.GlobalConfig `json:"global" patchStrategy:"merge"`
Pilot *v1alpha12.PilotConfig `json:"pilot" patchStrategy:"merge"`
Telemetry *telemetryConfig `json:"telemetry" patchStrategy:"merge"`
SidecarInjectorWebhook *v1alpha12.SidecarInjectorConfig `json:"sidecarInjectorWebhook" patchStrategy:"merge"`
IstioCni *v1alpha12.CNIConfig `json:"istio_cni" patchStrategy:"merge"`
MeshConfig *meshConfig `json:"meshConfig" patchStrategy:"merge"`
Base *v1alpha12.BaseConfig `json:"base" patchStrategy:"merge"`
IstiodRemote *v1alpha12.IstiodRemoteConfig `json:"istiodRemote" patchStrategy:"merge"`
Ztunnel map[string]any `json:"ztunnel" patchStrategy:"merge"`
}
type gatewaysConfig struct {
IstioEgressgateway *egressGatewayConfig `json:"istio-egressgateway" patchStrategy:"merge"`
IstioIngressgateway *ingressGatewayConfig `json:"istio-ingressgateway" patchStrategy:"merge"`
}
// Configuration for an ingress gateway.
type ingressGatewayConfig struct {
Env map[string]any `json:"env" patchStrategy:"merge"`
Labels map[string]string `json:"labels" patchStrategy:"merge"`
CPU *v1alpha12.TargetUtilizationConfig `json:"cpu" patchStrategy:"replace"`
Memory *v1alpha12.TargetUtilizationConfig `json:"memory" patchStrategy:"replace"`
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges" patchStrategy:"replace"`
NodeSelector map[string]any `json:"nodeSelector" patchStrategy:"merge"`
PodAntiAffinityLabelSelector []map[string]any `json:"podAntiAffinityLabelSelector" patchStrategy:"replace"`
PodAntiAffinityTermLabelSelector []map[string]any `json:"podAntiAffinityTermLabelSelector" patchStrategy:"replace"`
PodAnnotations map[string]any `json:"podAnnotations" patchStrategy:"merge"`
MeshExpansionPorts []*v1alpha12.PortsConfig `json:"meshExpansionPorts" patchStrategy:"merge" patchMergeKey:"name"`
Ports []*v1alpha12.PortsConfig `json:"ports" patchStrategy:"merge" patchMergeKey:"name"`
Resources *resources `json:"resources" patchStrategy:"merge"`
SecretVolumes []*v1alpha12.SecretVolume `json:"secretVolumes" patchStrategy:"merge" patchMergeKey:"name"`
ServiceAnnotations map[string]any `json:"serviceAnnotations" patchStrategy:"merge"`
Tolerations []map[string]any `json:"tolerations" patchStrategy:"replace"`
IngressPorts []map[string]any `json:"ingressPorts" patchStrategy:"replace"`
AdditionalContainers []map[string]any `json:"additionalContainers" patchStrategy:"replace"`
ConfigVolumes []map[string]any `json:"configVolumes" patchStrategy:"replace"`
Zvpn *v1alpha12.IngressGatewayZvpnConfig `json:"zvpn" patchStrategy:"merge"`
}
type resources struct {
Limits map[string]string `json:"limits" patchStrategy:"merge"`
Requests map[string]string `json:"requests" patchStrategy:"merge"`
}
type egressGatewayConfig struct {
Env map[string]any `json:"env" patchStrategy:"merge"`
Labels map[string]string `json:"labels" patchStrategy:"merge"`
NodeSelector map[string]any `json:"nodeSelector" patchStrategy:"merge"`
PodAntiAffinityLabelSelector []map[string]any `json:"podAntiAffinityLabelSelector" patchStrategy:"replace"`
PodAntiAffinityTermLabelSelector []map[string]any `json:"podAntiAffinityTermLabelSelector" patchStrategy:"replace"`
PodAnnotations map[string]any `json:"podAnnotations" patchStrategy:"merge"`
Ports []*v1alpha12.PortsConfig `json:"ports" patchStrategy:"merge" patchMergeKey:"name"`
Resources *resources `json:"resources" patchStrategy:"merge"`
SecretVolumes []*v1alpha12.SecretVolume `json:"secretVolumes" patchStrategy:"merge" patchMergeKey:"name"`
Tolerations []map[string]any `json:"tolerations" patchStrategy:"replace"`
ConfigVolumes []map[string]any `json:"configVolumes" patchStrategy:"replace"`
AdditionalContainers []map[string]any `json:"additionalContainers" patchStrategy:"replace"`
Zvpn *v1alpha12.ZeroVPNConfig `json:"zvpn" patchStrategy:"replace"`
}
type meshConfig struct {
ConnectTimeout *durationpb.Duration `json:"connectTimeout" patchStrategy:"replace"`
ProtocolDetectionTimeout *durationpb.Duration `json:"protocolDetectionTimeout" patchStrategy:"replace"`
RdsRefreshDelay *durationpb.Duration `json:"rdsRefreshDelay" patchStrategy:"replace"`
EnableAutoMtls *wrappers.BoolValue `json:"enableAutoMtls" patchStrategy:"replace"`
EnablePrometheusMerge *wrappers.BoolValue `json:"enablePrometheusMerge" patchStrategy:"replace"`
OutboundTrafficPolicy *v1alpha13.MeshConfig_OutboundTrafficPolicy `json:"outboundTrafficPolicy" patchStrategy:"merge"`
InboundTrafficPolicy *v1alpha13.MeshConfig_InboundTrafficPolicy `json:"inboundTrafficPolicy" patchStrategy:"merge"`
TCPKeepalive *v1alpha3.ConnectionPoolSettings_TCPSettings_TcpKeepalive `json:"tcpKeepalive" patchStrategy:"merge"`
DefaultConfig *proxyConfig `json:"defaultConfig" patchStrategy:"merge"`
ConfigSources []*v1alpha13.ConfigSource `json:"configSources" patchStrategy:"merge" patchMergeKey:"address"`
TrustDomainAliases []string `json:"trustDomainAliases" patchStrategy:"merge"`
DefaultServiceExportTo []string `json:"defaultServiceExportTo" patchStrategy:"merge"`
DefaultVirtualServiceExportTo []string `json:"defaultVirtualServiceExportTo" patchStrategy:"merge"`
DefaultDestinationRuleExportTo []string `json:"defaultDestinationRuleExportTo" patchStrategy:"merge"`
LocalityLbSetting *v1alpha3.LocalityLoadBalancerSetting `json:"localityLbSetting" patchStrategy:"merge"`
DNSRefreshRate *durationpb.Duration `json:"dnsRefreshRate" patchStrategy:"replace"`
Certificates []*v1alpha13.Certificate `json:"certificates" patchStrategy:"merge" patchMergeKey:"secretName"`
ServiceSettings []*meshConfigServiceSettings `json:"serviceSettings" patchStrategy:"replace"`
DefaultProviders *meshConfigDefaultProviders `json:"defaultProviders" patchStrategy:"merge"`
ExtensionProviders []*meshConfigExtensionProvider `json:"extensionProviders" patchStrategy:"merge" patchMergeKey:"name"`
}
type (
meshConfigDefaultProviders struct {
AccessLogging []struct{} `json:"accessLogging"`
Tracing []struct{} `json:"tracing"`
Metrics []struct{} `json:"metrics"`
}
meshConfigExtensionProvider struct {
Name string `json:"string"`
EnvoyOtelAls struct{} `json:"envoyOtelAls"`
Prometheus struct{} `json:"prometheus"`
EnvoyFileAccessLog struct{} `json:"envoyFileAccessLog"`
Stackdriver struct{} `json:"stackdriver"`
EnvoyExtAuthzHTTP struct{} `json:"envoyExtAuthzHttp"`
EnvoyExtAuthzGrpc struct{} `json:"envoyExtAuthzGrpc"`
Zipkin struct{} `json:"zipkin"`
Lightstep struct{} `json:"lightstep"`
Datadog struct{} `json:"datadog"`
Opencensus struct{} `json:"opencensus"`
Skywalking struct{} `json:"skywalking"`
EnvoyHTTPAls struct{} `json:"envoyHttpAls"`
EnvoyTCPAls struct{} `json:"envoyTcpAls"`
OpenTelemetry struct{} `json:"opentelemetry"`
}
clusterName struct {
ServiceCluster *v1alpha13.ProxyConfig_ServiceCluster `json:"serviceCluster,omitempty"`
TracingServiceName *v1alpha13.ProxyConfig_TracingServiceName_ `json:"tracingServiceName,omitempty"`
}
)
type proxyConfig struct {
DrainDuration *durationpb.Duration `json:"drainDuration" patchStrategy:"replace"`
DiscoveryRefreshDelay *durationpb.Duration `json:"discoveryRefreshDelay" patchStrategy:"replace"`
TerminationDrainDuration *durationpb.Duration `json:"terminationDrainDuration" patchStrategy:"replace"`
Concurrency *wrappers.Int32Value `json:"concurrency" patchStrategy:"replace"`
ConfigSources []*v1alpha13.ConfigSource `json:"configSources" patchStrategy:"replace"`
ClusterName *clusterName `json:"clusterName" patchStrategy:"replace"`
TrustDomainAliases []string `json:"trustDomainAliases" patchStrategy:"replace"`
DefaultServiceExportTo []string `json:"defaultServiceExportTo" patchStrategy:"replace"`
DefaultVirtualServiceExportTo []string `json:"defaultVirtualServiceExportTo" patchStrategy:"replace"`
DefaultDestinationRuleExportTo []string `json:"defaultDestinationRuleExportTo" patchStrategy:"replace"`
LocalityLbSetting *v1alpha3.LocalityLoadBalancerSetting `json:"localityLbSetting" patchStrategy:"merge"`
DNSRefreshRate *durationpb.Duration `json:"dnsRefreshRate" patchStrategy:"replace"`
Certificates []*v1alpha13.Certificate `json:"certificates" patchStrategy:"replace"`
ServiceSettings []*v1alpha13.MeshConfig_ServiceSettings `json:"serviceSettings" patchStrategy:"replace"`
Tracing *tracing `json:"tracing" patchStrategy:"replace"`
Sds *v1alpha13.SDS `json:"sds" patchStrategy:"replace"`
EnvoyAccessLogService *v1alpha13.RemoteService `json:"envoyAccessLogService" patchStrategy:"merge" patchMergeKey:"address"`
EnvoyMetricsService *v1alpha13.RemoteService `json:"envoyMetricsService" patchStrategy:"merge" patchMergeKey:"address"`
ProxyMetadata map[string]string `json:"proxyMetadata" patchStrategy:"merge"`
ExtraStatTags []string `json:"extraStatTags" patchStrategy:"replace"`
GatewayTopology *v1alpha13.Topology `json:"gatewayTopology" patchStrategy:"replace"`
}
type tracing struct {
TlSSettings *v1alpha3.ClientTLSSettings `json:"tlsSettings" patchStrategy:"merge"`
}
type meshConfigServiceSettings struct {
Settings *v1alpha13.MeshConfig_ServiceSettings_Settings `json:"settings" patchStrategy:"merge"`
Hosts []string `json:"hosts" patchStrategy:"merge"`
}
type telemetryConfig struct {
V2 *telemetryV2Config `json:"v2" patchStrategy:"merge"`
}
type telemetryV2Config struct {
Prometheus *v1alpha12.TelemetryV2PrometheusConfig `json:"prometheus" patchStrategy:"merge"`
Stackdriver *v1alpha12.TelemetryV2StackDriverConfig `json:"stackdriver" patchStrategy:"merge"`
}
var iopMergeStruct iopMergeStructType
// OverlayIOP overlays over base using JSON strategic merge.
func OverlayIOP(base, overlay string) (string, error) {
if strings.TrimSpace(base) == "" {
return overlay, nil
}
if strings.TrimSpace(overlay) == "" {
return base, nil
}
bj, err := yaml2.YAMLToJSON([]byte(base))
if err != nil {
return "", fmt.Errorf("yamlToJSON error in base: %s\n%s", err, bj)
}
oj, err := yaml2.YAMLToJSON([]byte(overlay))
if err != nil {
return "", fmt.Errorf("yamlToJSON error in overlay: %s\n%s", err, oj)
}
if base == "" {
bj = []byte("{}")
}
if overlay == "" {
oj = []byte("{}")
}
merged, err := strategicpatch.StrategicMergePatch(bj, oj, &iopMergeStruct)
if err != nil {
return "", fmt.Errorf("json merge error (%s) for base object: \n%s\n override object: \n%s", err, bj, oj)
}
my, err := yaml2.JSONToYAML(merged)
if err != nil {
return "", fmt.Errorf("jsonToYAML error (%s) for merged object: \n%s", err, merged)
}
return string(my), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"path/filepath"
"regexp"
"strconv"
"strings"
)
const (
// PathSeparator is the separator between path elements.
PathSeparator = "."
kvSeparatorRune = ':'
// InsertIndex is the index that means "insert" when setting values
InsertIndex = -1
// PathSeparatorRune is the separator between path elements, as a rune.
pathSeparatorRune = '.'
// EscapedPathSeparator is what to use when the path shouldn't separate
EscapedPathSeparator = "\\" + PathSeparator
)
// ValidKeyRegex is a regex for a valid path key element.
var ValidKeyRegex = regexp.MustCompile("^[a-zA-Z0-9_-]*$")
// Path is a path in slice form.
type Path []string
// PathFromString converts a string path of form a.b.c to a string slice representation.
func PathFromString(path string) Path {
path = filepath.Clean(path)
path = strings.TrimPrefix(path, PathSeparator)
path = strings.TrimSuffix(path, PathSeparator)
pv := splitEscaped(path, pathSeparatorRune)
var r []string
for _, str := range pv {
if str != "" {
str = strings.ReplaceAll(str, EscapedPathSeparator, PathSeparator)
// Is str of the form node[expr], convert to "node", "[expr]"?
nBracket := strings.IndexRune(str, '[')
if nBracket > 0 {
r = append(r, str[:nBracket], str[nBracket:])
} else {
// str is "[expr]" or "node"
r = append(r, str)
}
}
}
return r
}
// String converts a string slice path representation of form ["a", "b", "c"] to a string representation like "a.b.c".
func (p Path) String() string {
return strings.Join(p, PathSeparator)
}
func (p Path) Equals(p2 Path) bool {
if len(p) != len(p2) {
return false
}
for i, pp := range p {
if pp != p2[i] {
return false
}
}
return true
}
// ToYAMLPath converts a path string to path such that the first letter of each path element is lower case.
func ToYAMLPath(path string) Path {
p := PathFromString(path)
for i := range p {
p[i] = firstCharToLowerCase(p[i])
}
return p
}
// ToYAMLPathString converts a path string such that the first letter of each path element is lower case.
func ToYAMLPathString(path string) string {
return ToYAMLPath(path).String()
}
// IsValidPathElement reports whether pe is a valid path element.
func IsValidPathElement(pe string) bool {
return ValidKeyRegex.MatchString(pe)
}
// IsKVPathElement report whether pe is a key/value path element.
func IsKVPathElement(pe string) bool {
pe, ok := RemoveBrackets(pe)
if !ok {
return false
}
kv := splitEscaped(pe, kvSeparatorRune)
if len(kv) != 2 || len(kv[0]) == 0 || len(kv[1]) == 0 {
return false
}
return IsValidPathElement(kv[0])
}
// IsVPathElement report whether pe is a value path element.
func IsVPathElement(pe string) bool {
pe, ok := RemoveBrackets(pe)
if !ok {
return false
}
return len(pe) > 1 && pe[0] == ':'
}
// IsNPathElement report whether pe is an index path element.
func IsNPathElement(pe string) bool {
pe, ok := RemoveBrackets(pe)
if !ok {
return false
}
n, err := strconv.Atoi(pe)
return err == nil && n >= InsertIndex
}
// PathKV returns the key and value string parts of the entire key/value path element.
// It returns an error if pe is not a key/value path element.
func PathKV(pe string) (k, v string, err error) {
if !IsKVPathElement(pe) {
return "", "", fmt.Errorf("%s is not a valid key:value path element", pe)
}
pe, _ = RemoveBrackets(pe)
kv := splitEscaped(pe, kvSeparatorRune)
return kv[0], kv[1], nil
}
// PathV returns the value string part of the entire value path element.
// It returns an error if pe is not a value path element.
func PathV(pe string) (string, error) {
// For :val, return the value only
if IsVPathElement(pe) {
v, _ := RemoveBrackets(pe)
return v[1:], nil
}
// For key:val, return the whole thing
v, _ := RemoveBrackets(pe)
if len(v) > 0 {
return v, nil
}
return "", fmt.Errorf("%s is not a valid value path element", pe)
}
// PathN returns the index part of the entire value path element.
// It returns an error if pe is not an index path element.
func PathN(pe string) (int, error) {
if !IsNPathElement(pe) {
return -1, fmt.Errorf("%s is not a valid index path element", pe)
}
v, _ := RemoveBrackets(pe)
return strconv.Atoi(v)
}
// RemoveBrackets removes the [] around pe and returns the resulting string. It returns false if pe is not surrounded
// by [].
func RemoveBrackets(pe string) (string, bool) {
if !strings.HasPrefix(pe, "[") || !strings.HasSuffix(pe, "]") {
return "", false
}
return pe[1 : len(pe)-1], true
}
// splitEscaped splits a string using the rune r as a separator. It does not split on r if it's prefixed by \.
func splitEscaped(s string, r rune) []string {
var prev rune
if len(s) == 0 {
return []string{}
}
prevIdx := 0
var out []string
for i, c := range s {
if c == r && (i == 0 || (i > 0 && prev != '\\')) {
out = append(out, s[prevIdx:i])
prevIdx = i + 1
}
prev = c
}
out = append(out, s[prevIdx:])
return out
}
func firstCharToLowerCase(s string) string {
return strings.ToLower(s[0:1]) + s[1:]
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package progress
import (
"fmt"
"io"
"sort"
"strings"
"sync"
"github.com/cheggaaa/pb/v3"
"istio.io/istio/operator/pkg/name"
)
type InstallState int
const (
StateInstalling InstallState = iota
StatePruning
StateComplete
StateUninstallComplete
)
// Log records the progress of an installation
// This aims to provide information about the install of multiple components in parallel, while working
// around the limitations of the pb library, which will only support single lines. To do this, we aggregate
// the current components into a single line, and as components complete there final state is persisted to a new line.
type Log struct {
components map[string]*ManifestLog
bar *pb.ProgressBar
template string
mu sync.Mutex
state InstallState
}
func NewLog() *Log {
return &Log{
components: map[string]*ManifestLog{},
bar: createBar(),
}
}
const inProgress = `{{ yellow (cycle . "-" "-" "-" " ") }} `
// createStatus will return a string to report the current status.
// ex: - Processing resources for components. Waiting for foo, bar
func (p *Log) createStatus(maxWidth int) string {
comps := make([]string, 0, len(p.components))
wait := make([]string, 0, len(p.components))
for c, l := range p.components {
comps = append(comps, name.UserFacingComponentName(name.ComponentName(c)))
wait = append(wait, l.waitingResources()...)
}
sort.Strings(comps)
sort.Strings(wait)
msg := fmt.Sprintf(`Processing resources for %s.`, strings.Join(comps, ", "))
if len(wait) > 0 {
msg += fmt.Sprintf(` Waiting for %s`, strings.Join(wait, ", "))
}
prefix := inProgress
if !p.bar.GetBool(pb.Terminal) {
// If we aren't a terminal, no need to spam extra lines
prefix = `{{ yellow "-" }} `
}
// reduce by 2 to allow for the "- " that will be added below
maxWidth -= 2
if maxWidth > 0 && len(msg) > maxWidth {
return prefix + msg[:maxWidth-3] + "..."
}
// cycle will alternate between "-" and " ". "-" is given multiple times to avoid quick flashing back and forth
return prefix + msg
}
// For testing only
var testWriter *io.Writer
func createBar() *pb.ProgressBar {
// Don't set a total and use Static so we can explicitly control when you write. This is needed
// for handling the multiline issues.
bar := pb.New(0)
bar.Set(pb.Static, true)
if testWriter != nil {
bar.SetWriter(*testWriter)
}
bar.Start()
// if we aren't a terminal, we will return a new line for each new message
if !bar.GetBool(pb.Terminal) {
bar.Set(pb.ReturnSymbol, "\n")
}
return bar
}
// reportProgress will report an update for a given component
// Because the bar library does not support multiple lines/bars at once, we need to aggregate current
// progress into a single line. For example "Waiting for x, y, z". Once a component completes, we want
// a new line created so the information is not lost. To do this, we spin up a new bar with the remaining components
// on a new line, and create a new bar. For example, this becomes "x succeeded", "waiting for y, z".
func (p *Log) reportProgress(component string) func() {
return func() {
cliName := name.UserFacingComponentName(name.ComponentName(component))
p.mu.Lock()
defer p.mu.Unlock()
cmp := p.components[component]
// The component has completed
cmp.mu.Lock()
finished := cmp.finished
cmpErr := cmp.err
cmp.mu.Unlock()
if finished || cmpErr != "" {
if finished {
p.SetMessage(fmt.Sprintf(`{{ green "✔" }} %s installed`, cliName), true)
} else {
p.SetMessage(fmt.Sprintf(`{{ red "✘" }} %s encountered an error: %s`, cliName, cmpErr), true)
}
// Close the bar out, outputting a new line
delete(p.components, component)
// Now we create a new bar, which will have the remaining components
p.bar = createBar()
return
}
p.SetMessage(p.createStatus(p.bar.Width()), false)
}
}
func (p *Log) SetState(state InstallState) {
p.mu.Lock()
defer p.mu.Unlock()
p.state = state
switch p.state {
case StatePruning:
p.bar.SetTemplateString(inProgress + `Pruning removed resources`)
p.bar.Write()
case StateComplete:
p.bar.SetTemplateString(`{{ green "✔" }} Installation complete`)
p.bar.Write()
case StateUninstallComplete:
p.bar.SetTemplateString(`{{ green "✔" }} Uninstall complete`)
p.bar.Write()
}
}
func (p *Log) NewComponent(component string) *ManifestLog {
ml := &ManifestLog{
report: p.reportProgress(component),
}
p.mu.Lock()
defer p.mu.Unlock()
p.components[component] = ml
return ml
}
func (p *Log) SetMessage(status string, finish bool) {
// if we are not a terminal and there is no change, do not write
// This avoids redundant lines
if !p.bar.GetBool(pb.Terminal) && status == p.template {
return
}
p.template = status
p.bar.SetTemplateString(p.template)
if finish {
p.bar.Finish()
}
p.bar.Write()
}
// ManifestLog records progress for a single component
type ManifestLog struct {
report func()
err string
finished bool
waiting []string
mu sync.Mutex
}
func (p *ManifestLog) ReportProgress() {
if p == nil {
return
}
p.report()
}
func (p *ManifestLog) ReportError(err string) {
if p == nil {
return
}
p.mu.Lock()
p.err = err
p.mu.Unlock()
p.report()
}
func (p *ManifestLog) ReportFinished() {
if p == nil {
return
}
p.mu.Lock()
p.finished = true
p.mu.Unlock()
p.report()
}
func (p *ManifestLog) ReportWaiting(resources []string) {
if p == nil {
return
}
p.mu.Lock()
p.waiting = resources
p.mu.Unlock()
p.report()
}
func (p *ManifestLog) waitingResources() []string {
p.mu.Lock()
defer p.mu.Unlock()
return p.waiting
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"reflect"
)
// kindOf returns the reflection Kind that represents the dynamic type of value.
// If value is a nil interface value, kindOf returns reflect.Invalid.
func kindOf(value any) reflect.Kind {
if value == nil {
return reflect.Invalid
}
return reflect.TypeOf(value).Kind()
}
// IsString reports whether value is a string type.
func IsString(value any) bool {
return kindOf(value) == reflect.String
}
// IsPtr reports whether value is a ptr type.
func IsPtr(value any) bool {
return kindOf(value) == reflect.Ptr
}
// IsMap reports whether value is a map type.
func IsMap(value any) bool {
return kindOf(value) == reflect.Map
}
// IsMapPtr reports whether v is a map ptr type.
func IsMapPtr(v any) bool {
t := reflect.TypeOf(v)
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Map
}
// IsSlice reports whether value is a slice type.
func IsSlice(value any) bool {
return kindOf(value) == reflect.Slice
}
// IsStruct reports whether value is a struct type
func IsStruct(value any) bool {
return kindOf(value) == reflect.Struct
}
// IsSlicePtr reports whether v is a slice ptr type.
func IsSlicePtr(v any) bool {
return kindOf(v) == reflect.Ptr && reflect.TypeOf(v).Elem().Kind() == reflect.Slice
}
// IsSliceInterfacePtr reports whether v is a slice ptr type.
func IsSliceInterfacePtr(v any) bool {
// Must use ValueOf because Elem().Elem() type resolves dynamically.
vv := reflect.ValueOf(v)
return vv.Kind() == reflect.Ptr && vv.Elem().Kind() == reflect.Interface && vv.Elem().Elem().Kind() == reflect.Slice
}
// IsTypeStructPtr reports whether v is a struct ptr type.
func IsTypeStructPtr(t reflect.Type) bool {
if t == reflect.TypeOf(nil) {
return false
}
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
}
// IsTypeSlicePtr reports whether v is a slice ptr type.
func IsTypeSlicePtr(t reflect.Type) bool {
if t == reflect.TypeOf(nil) {
return false
}
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice
}
// IsTypeMap reports whether v is a map type.
func IsTypeMap(t reflect.Type) bool {
if t == reflect.TypeOf(nil) {
return false
}
return t.Kind() == reflect.Map
}
// IsTypeInterface reports whether v is an interface.
func IsTypeInterface(t reflect.Type) bool {
if t == reflect.TypeOf(nil) {
return false
}
return t.Kind() == reflect.Interface
}
// IsTypeSliceOfInterface reports whether v is a slice of interface.
func IsTypeSliceOfInterface(t reflect.Type) bool {
if t == reflect.TypeOf(nil) {
return false
}
return t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Interface
}
// IsNilOrInvalidValue reports whether v is nil or reflect.Zero.
func IsNilOrInvalidValue(v reflect.Value) bool {
return !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) || IsValueNil(v.Interface())
}
// IsValueNil returns true if either value is nil, or has dynamic type {ptr,
// map, slice} with value nil.
func IsValueNil(value any) bool {
if value == nil {
return true
}
switch kindOf(value) {
case reflect.Slice, reflect.Ptr, reflect.Map:
return reflect.ValueOf(value).IsNil()
}
return false
}
// IsValueNilOrDefault returns true if either IsValueNil(value) or the default
// value for the type.
func IsValueNilOrDefault(value any) bool {
if IsValueNil(value) {
return true
}
if !IsValueScalar(reflect.ValueOf(value)) {
// Default value is nil for non-scalar types.
return false
}
return value == reflect.New(reflect.TypeOf(value)).Elem().Interface()
}
// IsValuePtr reports whether v is a ptr type.
func IsValuePtr(v reflect.Value) bool {
return v.Kind() == reflect.Ptr
}
// IsValueInterface reports whether v is an interface type.
func IsValueInterface(v reflect.Value) bool {
return v.Kind() == reflect.Interface
}
// IsValueStruct reports whether v is a struct type.
func IsValueStruct(v reflect.Value) bool {
return v.Kind() == reflect.Struct
}
// IsValueStructPtr reports whether v is a struct ptr type.
func IsValueStructPtr(v reflect.Value) bool {
return v.Kind() == reflect.Ptr && IsValueStruct(v.Elem())
}
// IsValueMap reports whether v is a map type.
func IsValueMap(v reflect.Value) bool {
return v.Kind() == reflect.Map
}
// IsValueSlice reports whether v is a slice type.
func IsValueSlice(v reflect.Value) bool {
return v.Kind() == reflect.Slice
}
// IsValueScalar reports whether v is a scalar type.
func IsValueScalar(v reflect.Value) bool {
if IsNilOrInvalidValue(v) {
return false
}
if IsValuePtr(v) {
if v.IsNil() {
return false
}
v = v.Elem()
}
return !IsValueStruct(v) && !IsValueMap(v) && !IsValueSlice(v)
}
// ValuesAreSameType returns true if v1 and v2 has the same reflect.Type,
// otherwise it returns false.
func ValuesAreSameType(v1 reflect.Value, v2 reflect.Value) bool {
return v1.Type() == v2.Type()
}
// IsEmptyString returns true if value is an empty string.
func IsEmptyString(value any) bool {
if value == nil {
return true
}
switch kindOf(value) {
case reflect.String:
if _, ok := value.(string); ok {
return value.(string) == ""
}
}
return false
}
// DeleteFromSlicePtr deletes an entry at index from the parent, which must be a slice ptr.
func DeleteFromSlicePtr(parentSlice any, index int) error {
scope.Debugf("DeleteFromSlicePtr index=%d, slice=\n%v", index, parentSlice)
pv := reflect.ValueOf(parentSlice)
if !IsSliceInterfacePtr(parentSlice) {
return fmt.Errorf("deleteFromSlicePtr parent type is %T, must be *[]interface{}", parentSlice)
}
pvv := pv.Elem()
if pvv.Kind() == reflect.Interface {
pvv = pvv.Elem()
}
pv.Elem().Set(reflect.AppendSlice(pvv.Slice(0, index), pvv.Slice(index+1, pvv.Len())))
return nil
}
// UpdateSlicePtr updates an entry at index in the parent, which must be a slice ptr, with the given value.
func UpdateSlicePtr(parentSlice any, index int, value any) error {
scope.Debugf("UpdateSlicePtr parent=\n%v\n, index=%d, value=\n%v", parentSlice, index, value)
pv := reflect.ValueOf(parentSlice)
v := reflect.ValueOf(value)
if !IsSliceInterfacePtr(parentSlice) {
return fmt.Errorf("updateSlicePtr parent type is %T, must be *[]interface{}", parentSlice)
}
pvv := pv.Elem()
if pvv.Kind() == reflect.Interface {
pv.Elem().Elem().Index(index).Set(v)
return nil
}
pv.Elem().Index(index).Set(v)
return nil
}
// InsertIntoMap inserts value with key into parent which must be a map, map ptr, or interface to map.
func InsertIntoMap(parentMap any, key any, value any) error {
scope.Debugf("InsertIntoMap key=%v, value=%v, map=\n%v", key, value, parentMap)
v := reflect.ValueOf(parentMap)
kv := reflect.ValueOf(key)
vv := reflect.ValueOf(value)
if v.Type().Kind() == reflect.Ptr {
v = v.Elem()
}
if v.Type().Kind() == reflect.Interface {
v = v.Elem()
}
if v.Type().Kind() != reflect.Map {
scope.Debugf("error %v", v.Type().Kind())
return fmt.Errorf("insertIntoMap parent type is %T, must be map", parentMap)
}
v.SetMapIndex(kv, vv)
return nil
}
// DeleteFromMap deletes an entry with the given key parent, which must be a map.
func DeleteFromMap(parentMap any, key any) error {
scope.Debugf("DeleteFromMap key=%s, parent:\n%v\n", key, parentMap)
pv := reflect.ValueOf(parentMap)
if !IsMap(parentMap) {
return fmt.Errorf("deleteFromMap parent type is %T, must be map", parentMap)
}
pv.SetMapIndex(reflect.ValueOf(key), reflect.Value{})
return nil
}
// ToIntValue returns 0, false if val is not a number type, otherwise it returns the int value of val.
func ToIntValue(val any) (int, bool) {
if IsValueNil(val) {
return 0, false
}
v := reflect.ValueOf(val)
switch {
case IsIntKind(v.Kind()):
return int(v.Int()), true
case IsUintKind(v.Kind()):
return int(v.Uint()), true
}
return 0, false
}
// IsIntKind reports whether k is an integer kind of any size.
func IsIntKind(k reflect.Kind) bool {
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return true
}
return false
}
// IsUintKind reports whether k is an unsigned integer kind of any size.
func IsUintKind(k reflect.Kind) bool {
switch k {
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return true
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"bytes"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"text/template"
"google.golang.org/protobuf/types/known/structpb"
)
type FileFilter func(fileName string) bool
// StringBoolMapToSlice creates and returns a slice of all the map keys with true.
func StringBoolMapToSlice(m map[string]bool) []string {
s := make([]string, 0, len(m))
for k, v := range m {
if v {
s = append(s, k)
}
}
return s
}
// ReadFilesWithFilter reads files from path, for a directory it recursively reads files and filters the results
// for single file it directly reads the file. It returns a concatenated output of all matching files' content.
func ReadFilesWithFilter(path string, filter FileFilter) (string, error) {
fileList, err := FindFiles(path, filter)
if err != nil {
return "", err
}
var sb strings.Builder
for _, file := range fileList {
a, err := os.ReadFile(file)
if err != nil {
return "", err
}
if _, err := sb.WriteString(string(a) + "\n"); err != nil {
return "", err
}
}
return sb.String(), nil
}
// FindFiles reads files from path, and returns the file names that match the filter.
func FindFiles(path string, filter FileFilter) ([]string, error) {
fi, err := os.Stat(path)
if err != nil {
return nil, err
}
var fileList []string
if fi.IsDir() {
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() || !filter(path) {
return nil
}
fileList = append(fileList, path)
return nil
})
if err != nil {
return nil, err
}
} else {
fileList = append(fileList, path)
}
return fileList, nil
}
// ParseValue parses string into a value
func ParseValue(valueStr string) any {
var value any
if v, err := strconv.Atoi(valueStr); err == nil {
value = v
} else if v, err := strconv.ParseFloat(valueStr, 64); err == nil {
value = v
} else if v, err := strconv.ParseBool(valueStr); err == nil {
value = v
} else {
value = strings.ReplaceAll(valueStr, "\\,", ",")
}
return value
}
// ConsolidateLog is a helper function to dedup the log message.
func ConsolidateLog(logMessage string) string {
logCountMap := make(map[string]int)
stderrSlice := strings.Split(logMessage, "\n")
for _, item := range stderrSlice {
if item == "" {
continue
}
_, exist := logCountMap[item]
if exist {
logCountMap[item]++
} else {
logCountMap[item] = 1
}
}
var sb strings.Builder
for _, item := range stderrSlice {
if logCountMap[item] == 0 {
continue
}
sb.WriteString(fmt.Sprintf("%s (repeated %v times)\n", item, logCountMap[item]))
// reset seen log count
logCountMap[item] = 0
}
return sb.String()
}
// RenderTemplate is a helper method to render a template with the given values.
func RenderTemplate(tmpl string, ts any) (string, error) {
t, err := template.New("").Parse(tmpl)
if err != nil {
return "", err
}
buf := new(bytes.Buffer)
err = t.Execute(buf, ts)
if err != nil {
return "", err
}
return buf.String(), nil
}
func ValueString(v *structpb.Value) string {
switch x := v.Kind.(type) {
case *structpb.Value_StringValue:
return x.StringValue
case *structpb.Value_NumberValue:
return fmt.Sprint(x.NumberValue)
default:
return v.String()
}
}
func MustStruct(m map[string]any) *structpb.Struct {
s, _ := structpb.NewStruct(m)
return s
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"bufio"
"bytes"
"fmt"
"io"
"reflect"
"strings"
jsonpatch "github.com/evanphx/json-patch/v5" // nolint: staticcheck
"github.com/kylelemons/godebug/diff"
"google.golang.org/protobuf/proto"
yaml3 "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
"istio.io/istio/pkg/util/protomarshal"
)
func ToYAMLGeneric(root any) ([]byte, error) {
var vs []byte
if proto, ok := root.(proto.Message); ok {
v, err := protomarshal.ToYAML(proto)
if err != nil {
return nil, err
}
vs = []byte(v)
} else {
v, err := yaml.Marshal(root)
if err != nil {
return nil, err
}
vs = v
}
return vs, nil
}
func MustToYAMLGeneric(root any) string {
var vs []byte
if proto, ok := root.(proto.Message); ok {
v, err := protomarshal.ToYAML(proto)
if err != nil {
return err.Error()
}
vs = []byte(v)
} else {
v, err := yaml.Marshal(root)
if err != nil {
return err.Error()
}
vs = v
}
return string(vs)
}
// ToYAML returns a YAML string representation of val, or the error string if an error occurs.
func ToYAML(val any) string {
y, err := yaml.Marshal(val)
if err != nil {
return err.Error()
}
return string(y)
}
// ToYAMLWithJSONPB returns a YAML string representation of val (using jsonpb), or the error string if an error occurs.
func ToYAMLWithJSONPB(val proto.Message) string {
v := reflect.ValueOf(val)
if val == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
return "null"
}
js, err := protomarshal.ToJSONWithOptions(val, "", true)
if err != nil {
return err.Error()
}
yb, err := yaml.JSONToYAML([]byte(js))
if err != nil {
return err.Error()
}
return string(yb)
}
// MarshalWithJSONPB returns a YAML string representation of val (using jsonpb).
func MarshalWithJSONPB(val proto.Message) (string, error) {
return protomarshal.ToYAML(val)
}
// UnmarshalWithJSONPB unmarshals y into out using gogo jsonpb (required for many proto defined structs).
func UnmarshalWithJSONPB(y string, out proto.Message, allowUnknownField bool) error {
// Treat nothing as nothing. If we called jsonpb.Unmarshaler it would return the same.
if y == "" {
return nil
}
jb, err := yaml.YAMLToJSON([]byte(y))
if err != nil {
return err
}
if allowUnknownField {
err = protomarshal.UnmarshalAllowUnknown(jb, out)
} else {
err = protomarshal.Unmarshal(jb, out)
}
if err != nil {
return err
}
return nil
}
// OverlayTrees performs a sequential JSON strategic of overlays over base.
func OverlayTrees(base map[string]any, overlays ...map[string]any) (map[string]any, error) {
needsOverlay := false
for _, o := range overlays {
if len(o) > 0 {
needsOverlay = true
break
}
}
if !needsOverlay {
// Avoid expensive overlay if possible
return base, nil
}
bby, err := yaml.Marshal(base)
if err != nil {
return nil, err
}
by := string(bby)
for _, o := range overlays {
oy, err := yaml.Marshal(o)
if err != nil {
return nil, err
}
by, err = OverlayYAML(by, string(oy))
if err != nil {
return nil, err
}
}
out := make(map[string]any)
err = yaml.Unmarshal([]byte(by), &out)
if err != nil {
return nil, err
}
return out, nil
}
// OverlayYAML patches the overlay tree over the base tree and returns the result. All trees are expressed as YAML
// strings.
func OverlayYAML(base, overlay string) (string, error) {
if strings.TrimSpace(base) == "" {
return overlay, nil
}
if strings.TrimSpace(overlay) == "" {
return base, nil
}
bj, err := yaml.YAMLToJSON([]byte(base))
if err != nil {
return "", fmt.Errorf("yamlToJSON error in base: %s\n%s", err, bj)
}
oj, err := yaml.YAMLToJSON([]byte(overlay))
if err != nil {
return "", fmt.Errorf("yamlToJSON error in overlay: %s\n%s", err, oj)
}
if base == "" {
bj = []byte("{}")
}
if overlay == "" {
oj = []byte("{}")
}
merged, err := jsonpatch.MergePatch(bj, oj)
if err != nil {
return "", fmt.Errorf("json merge error (%s) for base object: \n%s\n override object: \n%s", err, bj, oj)
}
my, err := yaml.JSONToYAML(merged)
if err != nil {
return "", fmt.Errorf("jsonToYAML error (%s) for merged object: \n%s", err, merged)
}
return string(my), nil
}
// yamlDiff compares single YAML file
func yamlDiff(a, b string) string {
ao, bo := make(map[string]any), make(map[string]any)
if err := yaml.Unmarshal([]byte(a), &ao); err != nil {
return err.Error()
}
if err := yaml.Unmarshal([]byte(b), &bo); err != nil {
return err.Error()
}
ay, err := yaml.Marshal(ao)
if err != nil {
return err.Error()
}
by, err := yaml.Marshal(bo)
if err != nil {
return err.Error()
}
return diff.Diff(string(ay), string(by))
}
// yamlStringsToList yaml string parse to string list
func yamlStringsToList(str string) []string {
reader := bufio.NewReader(strings.NewReader(str))
decoder := yaml3.NewYAMLReader(reader)
res := make([]string, 0)
for {
doc, err := decoder.Read()
if err == io.EOF {
break
}
if err != nil {
break
}
chunk := bytes.TrimSpace(doc)
res = append(res, string(chunk))
}
return res
}
// multiYamlDiffOutput multi yaml diff output format
func multiYamlDiffOutput(res, diff string) string {
if res == "" {
return diff
}
if diff == "" {
return res
}
return res + "\n" + diff
}
func diffStringList(l1, l2 []string) string {
var maxLen int
var minLen int
var l1Max bool
res := ""
if len(l1)-len(l2) > 0 {
maxLen = len(l1)
minLen = len(l2)
l1Max = true
} else {
maxLen = len(l2)
minLen = len(l1)
l1Max = false
}
for i := 0; i < maxLen; i++ {
d := ""
if i >= minLen {
if l1Max {
d = yamlDiff(l1[i], "")
} else {
d = yamlDiff("", l2[i])
}
} else {
d = yamlDiff(l1[i], l2[i])
}
res = multiYamlDiffOutput(res, d)
}
return res
}
// YAMLDiff compares multiple YAML files and single YAML file
func YAMLDiff(a, b string) string {
al := yamlStringsToList(a)
bl := yamlStringsToList(b)
res := diffStringList(al, bl)
return res
}
// IsYAMLEqual reports whether the YAML in strings a and b are equal.
func IsYAMLEqual(a, b string) bool {
if strings.TrimSpace(a) == "" && strings.TrimSpace(b) == "" {
return true
}
ajb, err := yaml.YAMLToJSON([]byte(a))
if err != nil {
scope.Debugf("bad YAML in isYAMLEqual:\n%s", a)
return false
}
bjb, err := yaml.YAMLToJSON([]byte(b))
if err != nil {
scope.Debugf("bad YAML in isYAMLEqual:\n%s", b)
return false
}
return bytes.Equal(ajb, bjb)
}
// IsYAMLEmpty reports whether the YAML string y is logically empty.
func IsYAMLEmpty(y string) bool {
var yc []string
for _, l := range strings.Split(y, "\n") {
yt := strings.TrimSpace(l)
if !strings.HasPrefix(yt, "#") && !strings.HasPrefix(yt, "---") {
yc = append(yc, l)
}
}
res := strings.TrimSpace(strings.Join(yc, "\n"))
return res == "{}" || res == ""
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validate
import (
"fmt"
"net/netip"
"reflect"
"regexp"
"strconv"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/yaml"
"istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/pkg/log"
)
var (
scope = log.RegisterScope("validation", "API validation")
// alphaNumericRegexp defines the alpha numeric atom, typically a
// component of names. This only allows lower case characters and digits.
alphaNumericRegexp = match(`[a-z0-9]+`)
// separatorRegexp defines the separators allowed to be embedded in name
// components. This allow one period, one or two underscore and multiple
// dashes.
separatorRegexp = match(`(?:[._]|__|[-]*)`)
// nameComponentRegexp restricts registry path component names to start
// with at least one letter or number, with following parts able to be
// separated by one period, one or two underscore and multiple dashes.
nameComponentRegexp = expression(
alphaNumericRegexp,
optional(repeated(separatorRegexp, alphaNumericRegexp)))
// domainComponentRegexp restricts the registry domain component of a
// repository name to start with a component as defined by DomainRegexp
// and followed by an optional port.
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
// DomainRegexp defines the structure of potential domain components
// that may be part of image names. This is purposely a subset of what is
// allowed by DNS to ensure backwards compatibility with Docker image
// names.
DomainRegexp = expression(
domainComponentRegexp,
optional(repeated(literal(`.`), domainComponentRegexp)),
optional(literal(`:`), match(`[0-9]+`)))
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
TagRegexp = match(`[\w][\w.-]{0,127}`)
// DigestRegexp matches valid digests.
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
// NameRegexp is the format for the name component of references. The
// regexp has capturing groups for the domain and name part omitting
// the separating forward slash from either.
NameRegexp = expression(
optional(DomainRegexp, literal(`/`)),
nameComponentRegexp,
optional(repeated(literal(`/`), nameComponentRegexp)))
// ReferenceRegexp is the full supported format of a reference. The regexp
// is anchored and has capturing groups for name, tag, and digest
// components.
ReferenceRegexp = anchored(capture(NameRegexp),
optional(literal(":"), capture(TagRegexp)),
optional(literal("@"), capture(DigestRegexp)))
// ObjectNameRegexp is a legal name for a k8s object.
ObjectNameRegexp = match(`[a-z0-9.-]{1,254}`)
)
// validateWithRegex checks whether the given value matches the regexp r.
func validateWithRegex(path util.Path, val any, r *regexp.Regexp) (errs util.Errors) {
valStr := fmt.Sprint(val)
if len(r.FindString(valStr)) != len(valStr) {
errs = util.AppendErr(errs, fmt.Errorf("invalid value %s: %v", path, val))
printError(errs.ToError())
}
return errs
}
// validateStringList returns a validator function that works on a string list, using the supplied ValidatorFunc vf on
// each element.
func validateStringList(vf ValidatorFunc) ValidatorFunc {
return func(path util.Path, val any) util.Errors {
msg := fmt.Sprintf("validateStringList %v", val)
if !util.IsString(val) {
err := fmt.Errorf("validateStringList %s got %T, want string", path, val)
printError(err)
return util.NewErrs(err)
}
var errs util.Errors
for _, s := range strings.Split(val.(string), ",") {
errs = util.AppendErrs(errs, vf(path, s))
scope.Debugf("\nerrors(%d): %v", len(errs), errs)
msg += fmt.Sprintf("\nerrors(%d): %v", len(errs), errs)
}
logWithError(errs.ToError(), msg)
return errs
}
}
// validatePortNumberString checks if val is a string with a valid port number.
func validatePortNumberString(path util.Path, val any) util.Errors {
scope.Debugf("validatePortNumberString %v:", val)
if !util.IsString(val) {
return util.NewErrs(fmt.Errorf("validatePortNumberString(%s) bad type %T, want string", path, val))
}
if val.(string) == "*" || val.(string) == "" {
return nil
}
intV, err := strconv.ParseInt(val.(string), 10, 32)
if err != nil {
return util.NewErrs(fmt.Errorf("%s : %s", path, err))
}
return validatePortNumber(path, intV)
}
// validatePortNumber checks whether val is an integer representing a valid port number.
func validatePortNumber(path util.Path, val any) util.Errors {
return validateIntRange(path, val, 0, 65535)
}
// validateIPRangesOrStar validates IP ranges and also allow star, examples: "1.1.0.256/16,2.2.0.257/16", "*"
func validateIPRangesOrStar(path util.Path, val any) (errs util.Errors) {
scope.Debugf("validateIPRangesOrStar at %v: %v", path, val)
if !util.IsString(val) {
err := fmt.Errorf("validateIPRangesOrStar %s got %T, want string", path, val)
printError(err)
return util.NewErrs(err)
}
if val.(string) == "*" || val.(string) == "" {
return errs
}
return validateStringList(validateCIDR)(path, val)
}
// validateIntRange checks whether val is an integer in [min, max].
func validateIntRange(path util.Path, val any, min, max int64) util.Errors {
k := reflect.TypeOf(val).Kind()
var err error
switch {
case util.IsIntKind(k):
v := reflect.ValueOf(val).Int()
if v < min || v > max {
err = fmt.Errorf("value %s:%v falls outside range [%v, %v]", path, v, min, max)
}
case util.IsUintKind(k):
v := reflect.ValueOf(val).Uint()
if int64(v) < min || int64(v) > max {
err = fmt.Errorf("value %s:%v falls out side range [%v, %v]", path, v, min, max)
}
default:
err = fmt.Errorf("validateIntRange %s unexpected type %T, want int type", path, val)
}
logWithError(err, "validateIntRange %s:%v in [%d, %d]?: ", path, val, min, max)
return util.NewErrs(err)
}
// validateCIDR checks whether val is a string with a valid CIDR.
func validateCIDR(path util.Path, val any) util.Errors {
var err error
if !util.IsString(val) {
err = fmt.Errorf("validateCIDR %s got %T, want string", path, val)
} else {
if _, err = netip.ParsePrefix(val.(string)); err != nil {
err = fmt.Errorf("%s %s", path, err)
}
}
logWithError(err, "validateCIDR (%s): ", val)
return util.NewErrs(err)
}
func printError(err error) {
if err == nil {
scope.Debug("OK")
return
}
scope.Debugf("%v", err)
}
// logWithError prints debug log with err message
func logWithError(err error, format string, args ...any) {
msg := fmt.Sprintf(format, args...)
if err == nil {
msg += ": OK\n"
} else {
msg += fmt.Sprintf(": %v\n", err)
}
scope.Debug(msg)
}
// match compiles the string to a regular expression.
var match = regexp.MustCompile
// literal compiles s into a literal regular expression, escaping any regexp
// reserved characters.
func literal(s string) *regexp.Regexp {
re := match(regexp.QuoteMeta(s))
if _, complete := re.LiteralPrefix(); !complete {
panic("must be a literal")
}
return re
}
// expression defines a full expression, where each regular expression must
// follow the previous.
func expression(res ...*regexp.Regexp) *regexp.Regexp {
var s string
for _, re := range res {
s += re.String()
}
return match(s)
}
// optional wraps the expression in a non-capturing group and makes the
// production optional.
func optional(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `?`)
}
// repeated wraps the regexp in a non-capturing group to get one or more
// matches.
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
return match(group(expression(res...)).String() + `+`)
}
// group wraps the regexp in a non-capturing group.
func group(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(?:` + expression(res...).String() + `)`)
}
// capture wraps the expression in a capturing group.
func capture(res ...*regexp.Regexp) *regexp.Regexp {
return match(`(` + expression(res...).String() + `)`)
}
// anchored anchors the regular expression by adding start and end delimiters.
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
return match(`^` + expression(res...).String() + `$`)
}
// ValidatorFunc validates a value.
type ValidatorFunc func(path util.Path, i any) util.Errors
// UnmarshalIOP unmarshals a string containing IstioOperator as YAML.
func UnmarshalIOP(iopYAML string) (*v1alpha1.IstioOperator, error) {
// Remove creationDate (util.UnmarshalWithJSONPB fails if present)
mapIOP := make(map[string]any)
if err := yaml.Unmarshal([]byte(iopYAML), &mapIOP); err != nil {
return nil, err
}
// Don't bother trying to remove the timestamp if there are no fields.
// This also preserves iopYAML if it is ""; we don't want iopYAML to be the string "null"
if len(mapIOP) > 0 {
un := &unstructured.Unstructured{Object: mapIOP}
un.SetCreationTimestamp(metav1.Time{}) // UnmarshalIstioOperator chokes on these
iopYAML = util.ToYAML(un)
}
iop := &v1alpha1.IstioOperator{}
if err := yaml.UnmarshalStrict([]byte(iopYAML), iop); err != nil {
return nil, fmt.Errorf("%s:\n\nYAML:\n%s", err, iopYAML)
}
return iop, nil
}
// ValidIOP validates the given IstioOperator object.
func ValidIOP(iop *v1alpha1.IstioOperator) error {
errs := CheckIstioOperatorSpec(iop.Spec, false)
return errs.ToError()
}
// compose path for slice s with index i
func indexPathForSlice(s string, i int) string {
return fmt.Sprintf("%s[%d]", s, i)
}
// get validation function for specified path
func getValidationFuncForPath(validations map[string]ValidatorFunc, path util.Path) (ValidatorFunc, bool) {
pstr := path.String()
// fast match
if !strings.Contains(pstr, "[") && !strings.Contains(pstr, "]") {
vf, ok := validations[pstr]
return vf, ok
}
for p, vf := range validations {
ps := strings.Split(p, ".")
if len(ps) != len(path) {
continue
}
for i, v := range ps {
if !matchPathNode(v, path[i]) {
break
}
if i == len(ps)-1 {
return vf, true
}
}
}
return nil, false
}
// check whether the pn path node match pattern.
// pattern may contain '*', e.g. [1] match [*].
func matchPathNode(pattern, pn string) bool {
if !strings.Contains(pattern, "[") && !strings.Contains(pattern, "]") {
return pattern == pn
}
if !strings.Contains(pn, "[") && !strings.Contains(pn, "]") {
return false
}
indexPattern := pattern[strings.IndexByte(pattern, '[')+1 : strings.IndexByte(pattern, ']')]
if indexPattern == "*" {
return true
}
index := pn[strings.IndexByte(pn, '[')+1 : strings.IndexByte(pn, ']')]
return indexPattern == index
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validate
import (
"errors"
"fmt"
"reflect"
"google.golang.org/protobuf/types/known/structpb"
"istio.io/api/operator/v1alpha1"
operator_v1alpha1 "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/metrics"
"istio.io/istio/operator/pkg/tpath"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/util/protomarshal"
)
var (
// DefaultValidations maps a data path to a validation function.
DefaultValidations = map[string]ValidatorFunc{
"Values": func(path util.Path, i any) util.Errors {
return CheckValues(i)
},
"MeshConfig": validateMeshConfig,
"Hub": validateHub,
"Tag": validateTag,
"Revision": validateRevision,
"Components.IngressGateways": validateGatewayName,
"Components.EgressGateways": validateGatewayName,
}
// requiredValues lists all the values that must be non-empty.
requiredValues = map[string]bool{}
)
// CheckIstioOperator validates the operator CR.
func CheckIstioOperator(iop *operator_v1alpha1.IstioOperator, checkRequiredFields bool) error {
if iop == nil {
return nil
}
errs := CheckIstioOperatorSpec(iop.Spec, checkRequiredFields)
return errs.ToError()
}
// CheckIstioOperatorSpec validates the values in the given Installer spec, using the field map DefaultValidations to
// call the appropriate validation function. checkRequiredFields determines whether missing mandatory fields generate
// errors.
func CheckIstioOperatorSpec(is *v1alpha1.IstioOperatorSpec, checkRequiredFields bool) (errs util.Errors) {
if is == nil {
return util.Errors{}
}
return Validate2(DefaultValidations, is)
}
func Validate2(validations map[string]ValidatorFunc, iop *v1alpha1.IstioOperatorSpec) (errs util.Errors) {
for path, validator := range validations {
v, f, _ := tpath.GetFromStructPath(iop, path)
if f {
errs = append(errs, validator(util.PathFromString(path), v)...)
}
}
return
}
// Validate function below is used by third party for integrations and has to be public
// Validate validates the values of the tree using the supplied Func.
func Validate(validations map[string]ValidatorFunc, structPtr any, path util.Path, checkRequired bool) (errs util.Errors) {
scope.Debugf("validate with path %s, %v (%T)", path, structPtr, structPtr)
if structPtr == nil {
return nil
}
if util.IsStruct(structPtr) {
scope.Debugf("validate path %s, skipping struct type %T", path, structPtr)
return nil
}
if !util.IsPtr(structPtr) {
metrics.CRValidationErrorTotal.Increment()
return util.NewErrs(fmt.Errorf("validate path %s, value: %v, expected ptr, got %T", path, structPtr, structPtr))
}
structElems := reflect.ValueOf(structPtr).Elem()
if !util.IsStruct(structElems) {
metrics.CRValidationErrorTotal.Increment()
return util.NewErrs(fmt.Errorf("validate path %s, value: %v, expected struct, got %T", path, structElems, structElems))
}
if util.IsNilOrInvalidValue(structElems) {
return
}
for i := 0; i < structElems.NumField(); i++ {
fieldName := structElems.Type().Field(i).Name
fieldValue := structElems.Field(i)
if !fieldValue.CanInterface() {
continue
}
kind := structElems.Type().Field(i).Type.Kind()
if a, ok := structElems.Type().Field(i).Tag.Lookup("json"); ok && a == "-" {
continue
}
scope.Debugf("Checking field %s", fieldName)
switch kind {
case reflect.Struct:
errs = util.AppendErrs(errs, Validate(validations, fieldValue.Addr().Interface(), append(path, fieldName), checkRequired))
case reflect.Map:
newPath := append(path, fieldName)
errs = util.AppendErrs(errs, validateLeaf(validations, newPath, fieldValue.Interface(), checkRequired))
for _, key := range fieldValue.MapKeys() {
nnp := append(newPath, key.String())
errs = util.AppendErrs(errs, validateLeaf(validations, nnp, fieldValue.MapIndex(key), checkRequired))
}
case reflect.Slice:
for i := 0; i < fieldValue.Len(); i++ {
newValue := fieldValue.Index(i).Interface()
newPath := append(path, indexPathForSlice(fieldName, i))
if util.IsStruct(newValue) || util.IsPtr(newValue) {
errs = util.AppendErrs(errs, Validate(validations, newValue, newPath, checkRequired))
} else {
errs = util.AppendErrs(errs, validateLeaf(validations, newPath, newValue, checkRequired))
}
}
case reflect.Ptr:
if util.IsNilOrInvalidValue(fieldValue.Elem()) {
continue
}
newPath := append(path, fieldName)
if fieldValue.Elem().Kind() == reflect.Struct {
errs = util.AppendErrs(errs, Validate(validations, fieldValue.Interface(), newPath, checkRequired))
} else {
errs = util.AppendErrs(errs, validateLeaf(validations, newPath, fieldValue, checkRequired))
}
default:
if structElems.Field(i).CanInterface() {
errs = util.AppendErrs(errs, validateLeaf(validations, append(path, fieldName), fieldValue.Interface(), checkRequired))
}
}
}
if len(errs) > 0 {
metrics.CRValidationErrorTotal.Increment()
}
return errs
}
func validateLeaf(validations map[string]ValidatorFunc, path util.Path, val any, checkRequired bool) util.Errors {
pstr := path.String()
msg := fmt.Sprintf("validate %s:%v(%T) ", pstr, val, val)
if util.IsValueNil(val) || util.IsEmptyString(val) {
if checkRequired && requiredValues[pstr] {
return util.NewErrs(fmt.Errorf("field %s is required but not set", util.ToYAMLPathString(pstr)))
}
msg += fmt.Sprintf("validate %s: OK (empty value)", pstr)
scope.Debug(msg)
return nil
}
vf, ok := getValidationFuncForPath(validations, path)
if !ok {
msg += fmt.Sprintf("validate %s: OK (no validation)", pstr)
scope.Debug(msg)
// No validation defined.
return nil
}
scope.Debug(msg)
return vf(path, val)
}
func validateMeshConfig(path util.Path, root any) util.Errors {
vs, err := util.ToYAMLGeneric(root)
if err != nil {
return util.Errors{err}
}
// ApplyMeshConfigDefaults allows unknown fields, so we first check for unknown fields
if err := protomarshal.ApplyYAMLStrict(string(vs), mesh.DefaultMeshConfig()); err != nil {
return util.Errors{fmt.Errorf("failed to unmarshall mesh config: %v", err)}
}
// This method will also perform validation automatically
if _, validErr := mesh.ApplyMeshConfigDefaults(string(vs)); validErr != nil {
return util.Errors{validErr}
}
return nil
}
func validateHub(path util.Path, val any) util.Errors {
if val == "" {
return nil
}
return validateWithRegex(path, val, ReferenceRegexp)
}
func validateTag(path util.Path, val any) util.Errors {
return validateWithRegex(path, val.(*structpb.Value).GetStringValue(), TagRegexp)
}
func validateRevision(_ util.Path, val any) util.Errors {
if val == "" {
return nil
}
if !labels.IsDNS1123Label(val.(string)) {
err := fmt.Errorf("invalid revision specified: %s", val.(string))
return util.Errors{err}
}
return nil
}
func validateGatewayName(path util.Path, val any) (errs util.Errors) {
v := val.([]*v1alpha1.GatewaySpec)
for _, n := range v {
if n == nil {
errs = append(errs, util.NewErrs(errors.New("badly formatted gateway configuration")))
} else {
errs = append(errs, validateWithRegex(path, n.Name, ObjectNameRegexp)...)
}
}
return
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validate
import (
"reflect"
"google.golang.org/protobuf/types/known/structpb"
"istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/operator/pkg/util"
)
// DefaultValuesValidations maps a data path to a validation function.
var DefaultValuesValidations = map[string]ValidatorFunc{
"global.proxy.includeIPRanges": validateIPRangesOrStar,
"global.proxy.excludeIPRanges": validateIPRangesOrStar,
"global.proxy.includeInboundPorts": validateStringList(validatePortNumberString),
"global.proxy.excludeInboundPorts": validateStringList(validatePortNumberString),
"meshConfig": validateMeshConfig,
}
// CheckValues validates the values in the given tree, which follows the Istio values.yaml schema.
func CheckValues(root any) util.Errors {
v := reflect.ValueOf(root)
if root == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
return nil
}
vs, err := util.ToYAMLGeneric(root)
if err != nil {
return util.Errors{err}
}
val := &v1alpha1.Values{}
if err := util.UnmarshalWithJSONPB(string(vs), val, false); err != nil {
return util.Errors{err}
}
return ValuesValidate(DefaultValuesValidations, root.(*structpb.Struct).AsMap(), nil)
}
// ValuesValidate validates the values of the tree using the supplied Func
func ValuesValidate(validations map[string]ValidatorFunc, node any, path util.Path) (errs util.Errors) {
pstr := path.String()
scope.Debugf("ValuesValidate %s", pstr)
vf := validations[pstr]
if vf != nil {
errs = util.AppendErrs(errs, vf(path, node))
}
nn, ok := node.(map[string]any)
if !ok {
// Leaf, nothing more to recurse.
return errs
}
for k, v := range nn {
errs = util.AppendErrs(errs, ValuesValidate(validations, v, append(path, k)))
}
return errs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package version
import (
"fmt"
"strconv"
"strings"
goversion "github.com/hashicorp/go-version"
"gopkg.in/yaml.v2"
)
const (
// releasePrefix is the prefix we used in http://gcr.io/istio-release for releases
releasePrefix = "release-"
)
// NewVersionFromString creates a new Version from the provided SemVer formatted string and returns a pointer to it.
func NewVersionFromString(s string) (*Version, error) {
ver, err := goversion.NewVersion(s)
if err != nil {
return nil, err
}
newVer := &Version{}
vv := ver.Segments()
if len(vv) > 0 {
newVer.Major = uint32(vv[0])
}
if len(vv) > 1 {
newVer.Minor = uint32(vv[1])
}
if len(vv) > 2 {
newVer.Patch = uint32(vv[2])
}
sv := strings.Split(s, "-")
if len(sv) > 0 {
newVer.Suffix = strings.Join(sv[1:], "-")
}
return newVer, nil
}
// IsVersionString checks whether the given string is a version string
func IsVersionString(path string) bool {
_, err := goversion.NewSemver(path)
if err != nil {
return false
}
vs := Version{}
return yaml.Unmarshal([]byte(path), &vs) == nil
}
// TagToVersionString converts an istio container tag into a version string
func TagToVersionString(path string) (string, error) {
path = strings.TrimPrefix(path, releasePrefix)
ver, err := goversion.NewSemver(path)
if err != nil {
return "", err
}
segments := ver.Segments()
fmtParts := make([]string, len(segments))
for i, s := range segments {
str := strconv.Itoa(s)
fmtParts[i] = str
}
return strings.Join(fmtParts, "."), nil
}
// TagToVersionStringGrace converts an Istio container tag into a version string,
// if any error, fallback to use the original tag.
func TagToVersionStringGrace(path string) string {
v, err := TagToVersionString(path)
if err != nil {
return path
}
return v
}
// MajorVersion represents a major version.
type MajorVersion struct {
Major uint32
}
// MinorVersion represents a minor version.
type MinorVersion struct {
MajorVersion
Minor uint32
}
// PatchVersion represents a patch version.
type PatchVersion struct {
MinorVersion
Patch uint32
}
// Version represents a version with an optional suffix.
type Version struct {
PatchVersion
Suffix string
}
// NewMajorVersion creates an initialized MajorVersion struct.
func NewMajorVersion(major uint32) MajorVersion {
return MajorVersion{
Major: major,
}
}
// NewMinorVersion creates an initialized MinorVersion struct.
func NewMinorVersion(major, minor uint32) MinorVersion {
return MinorVersion{
MajorVersion: NewMajorVersion(major),
Minor: minor,
}
}
// NewPatchVersion creates an initialized PatchVersion struct.
func NewPatchVersion(major, minor, patch uint32) PatchVersion {
return PatchVersion{
MinorVersion: NewMinorVersion(major, minor),
Patch: patch,
}
}
// NewVersion creates an initialized Version struct.
func NewVersion(major, minor, patch uint32, suffix string) Version {
return Version{
PatchVersion: NewPatchVersion(major, minor, patch),
Suffix: suffix,
}
}
// String implements the Stringer interface.
func (v MajorVersion) String() string {
return fmt.Sprintf("%d", v.Major)
}
// String implements the Stringer interface.
func (v MinorVersion) String() string {
return fmt.Sprintf("%s.%d", v.MajorVersion, v.Minor)
}
// String implements the Stringer interface.
func (v PatchVersion) String() string {
return fmt.Sprintf("%s.%d", v.MinorVersion, v.Patch)
}
// String implements the Stringer interface.
func (v *Version) String() string {
if v.Suffix == "" {
return v.PatchVersion.String()
}
return fmt.Sprintf("%s-%s", v.PatchVersion, v.Suffix)
}
// UnmarshalYAML implements the Unmarshaler interface.
func (v *Version) UnmarshalYAML(unmarshal func(any) error) error {
s := ""
if err := unmarshal(&s); err != nil {
return err
}
out, err := NewVersionFromString(s)
if err != nil {
return err
}
*v = *out
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package version
import (
"time"
pkgversion "istio.io/istio/operator/pkg/version"
buildversion "istio.io/istio/pkg/version"
)
const (
// OperatorCodeBaseVersion is the version string from the code base.
OperatorCodeBaseVersion = "1.22.0"
OperatorEOLYear = 2025
OperatorEOLMonth = time.January
)
var (
// OperatorVersionString is the version string of this operator binary.
OperatorVersionString string
// OperatorBinaryVersion is the Istio operator version.
OperatorBinaryVersion pkgversion.Version
)
func init() {
var err error
OperatorVersionString = OperatorCodeBaseVersion
// If dockerinfo has a tag (e.g., specified by LDFlags), we will use it as the version of operator
tag := buildversion.DockerInfo.Tag
if pkgversion.IsVersionString(tag) {
OperatorVersionString = pkgversion.TagToVersionStringGrace(tag)
}
v, err := pkgversion.NewVersionFromString(OperatorVersionString)
if err != nil {
panic(err)
}
OperatorBinaryVersion = *v
}
func IsEOL() bool {
t := time.Now()
return t.Year() > OperatorEOLYear || (t.Year() == OperatorEOLYear && t.Month() >= OperatorEOLMonth)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import (
"time"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/monitoring"
)
var (
typeTag = monitoring.CreateLabel("type")
// StartupTime measures the time it takes for the agent to get ready Note: This
// is dependent on readiness probes. This means our granularity is correlated to
// the probing interval.
startupTime = monitoring.NewGauge(
"startup_duration_seconds",
"The time from the process starting to being marked ready.",
)
// scrapeErrors records total number of failed scrapes.
scrapeErrors = monitoring.NewSum(
"scrape_failures_total",
"The total number of failed scrapes.",
)
EnvoyScrapeErrors = scrapeErrors.With(typeTag.Value(ScrapeTypeEnvoy))
AppScrapeErrors = scrapeErrors.With(typeTag.Value(ScrapeTypeApp))
AgentScrapeErrors = scrapeErrors.With(typeTag.Value(ScrapeTypeAgent))
// ScrapeTotals records total number of scrapes.
ScrapeTotals = monitoring.NewSum(
"scrapes_total",
"The total number of scrapes.",
)
)
var (
ScrapeTypeEnvoy = "envoy"
ScrapeTypeApp = "application"
ScrapeTypeAgent = "agent"
)
var processStartTime = time.Now()
func RecordStartupTime() {
delta := time.Since(processStartTime)
startupTime.Record(delta.Seconds())
log.Infof("Readiness succeeded in %v", delta)
}
//go:build !windows
// +build !windows
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package status
import (
"net"
"syscall"
)
// copied from https://github.com/kubernetes/kubernetes/blob/v1.27.0-alpha.1/pkg/probe/dialer_others.go#L27
// ProbeDialer returns a dialer optimized for probes to avoid lingering sockets on TIME-WAIT state.
// The dialer reduces the TIME-WAIT period to 1 seconds instead of the OS default of 60 seconds.
// Using 1 second instead of 0 because SO_LINGER socket option to 0 causes pending data to be
// discarded and the connection to be aborted with an RST rather than for the pending data to be
// transmitted and the connection closed cleanly with a FIN.
// Ref: https://issues.k8s.io/89898
func ProbeDialer() *net.Dialer {
dialer := &net.Dialer{
Control: func(network, address string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
_ = syscall.SetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER, &syscall.Linger{Onoff: 1, Linger: 1})
})
},
}
return dialer
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcready
import (
"fmt"
"sync"
"istio.io/istio/pilot/cmd/pilot-agent/status/ready"
"istio.io/istio/pkg/file"
"istio.io/istio/pkg/istio-agent/grpcxds"
)
var _ ready.Prober = &probe{}
type probe struct {
sync.RWMutex
bootstrapPath string
bootstrap *grpcxds.Bootstrap
}
// NewProbe returns a probe that checks if a valid bootstrap file can be loaded once.
// If that bootstrap file has a file_watcher cert provider, we also ensure those certs exist.
func NewProbe(bootstrapPath string) ready.Prober {
return &probe{bootstrapPath: bootstrapPath}
}
func (p *probe) Check() error {
// TODO file watch?
if p.getBootstrap() == nil {
bootstrap, err := grpcxds.LoadBootstrap(p.bootstrapPath)
if err != nil {
return fmt.Errorf("failed loading %s: %v", p.bootstrapPath, err)
}
p.setBootstrap(bootstrap)
}
if bootstrap := p.getBootstrap(); bootstrap != nil {
if fwp := bootstrap.FileWatcherProvider(); fwp != nil {
for _, path := range fwp.FilePaths() {
if !file.Exists(path) {
return fmt.Errorf("%s does not exist", path)
}
}
}
}
return nil
}
func (p *probe) getBootstrap() *grpcxds.Bootstrap {
p.RLock()
defer p.RUnlock()
return p.bootstrap
}
func (p *probe) setBootstrap(bootstrap *grpcxds.Bootstrap) {
p.Lock()
defer p.Unlock()
p.bootstrap = bootstrap
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ready
import (
"context"
"fmt"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
"istio.io/istio/pilot/cmd/pilot-agent/metrics"
"istio.io/istio/pilot/cmd/pilot-agent/status/util"
)
// Probe for readiness.
type Probe struct {
LocalHostAddr string
AdminPort uint16
receivedFirstUpdate bool
// Indicates that Envoy is ready at least once so that we can cache and reuse that probe.
atleastOnceReady bool
Context context.Context
// NoEnvoy so we only check config status
NoEnvoy bool
}
type Prober interface {
// Check executes the probe and returns an error if the probe fails.
Check() error
}
var _ Prober = &Probe{}
// Check executes the probe and returns an error if the probe fails.
func (p *Probe) Check() error {
// First, check that Envoy has received a configuration update from Pilot.
if err := p.checkConfigStatus(); err != nil {
return err
}
return p.isEnvoyReady()
}
// checkConfigStatus checks to make sure initial configs have been received from Pilot.
func (p *Probe) checkConfigStatus() error {
if p.NoEnvoy {
// TODO some way to verify XDS proxy -> control plane works
return nil
}
if p.receivedFirstUpdate {
return nil
}
s, err := util.GetUpdateStatusStats(p.LocalHostAddr, p.AdminPort)
if err != nil {
return err
}
CDSUpdated := s.CDSUpdatesSuccess > 0
LDSUpdated := s.LDSUpdatesSuccess > 0
if CDSUpdated && LDSUpdated {
p.receivedFirstUpdate = true
return nil
}
if !CDSUpdated && !LDSUpdated {
return fmt.Errorf("config not received from XDS server (is Istiod running?): %s", s.String())
} else if s.LDSUpdatesRejection > 0 || s.CDSUpdatesRejection > 0 {
return fmt.Errorf("config received from XDS server, but was rejected: %s", s.String())
}
return fmt.Errorf("config not fully received from XDS server: %s", s.String())
}
// isEnvoyReady checks to ensure that Envoy is in the LIVE state and workers have started.
func (p *Probe) isEnvoyReady() error {
if p.NoEnvoy {
return nil
}
if p.Context == nil {
return p.checkEnvoyReadiness()
}
select {
case <-p.Context.Done():
return fmt.Errorf("server is not live, current state is: %s", admin.ServerInfo_DRAINING.String())
default:
return p.checkEnvoyReadiness()
}
}
func (p *Probe) checkEnvoyReadiness() error {
// If Envoy is ready at least once i.e. server state is LIVE and workers
// have started, they will not go back in the life time of Envoy process.
// They will only change at hot restart or health check fails. Since istio
// does not use both of them, it is safe to cache this value. Since the
// actual readiness probe goes via Envoy, it ensures that Envoy is actively
// serving traffic and we can rely on that.
if p.atleastOnceReady {
return nil
}
err := checkEnvoyStats(p.LocalHostAddr, p.AdminPort)
if err == nil {
metrics.RecordStartupTime()
p.atleastOnceReady = true
}
return err
}
// checkEnvoyStats actually executes the Stats Query on Envoy admin endpoint.
func checkEnvoyStats(host string, port uint16) error {
state, ws, err := util.GetReadinessStats(host, port)
if err != nil {
return fmt.Errorf("failed to get readiness stats: %v", err)
}
if state != nil && admin.ServerInfo_State(*state) != admin.ServerInfo_LIVE {
return fmt.Errorf("server is not live, current state is: %v", admin.ServerInfo_State(*state).String())
}
if !ws {
return fmt.Errorf("workers have not yet started")
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package status
import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"net"
"net/http"
"net/http/pprof"
"os"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/common/expfmt"
"golang.org/x/net/http2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
grpcHealth "google.golang.org/grpc/health/grpc_health_v1"
grpcStatus "google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/util/intstr"
k8sUtilIo "k8s.io/utils/io"
"istio.io/istio/pilot/cmd/pilot-agent/metrics"
"istio.io/istio/pilot/cmd/pilot-agent/status/grpcready"
"istio.io/istio/pilot/cmd/pilot-agent/status/ready"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
dnsProto "istio.io/istio/pkg/dns/proto"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/kube/apimirror"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/monitoring"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/slices"
)
const (
// readyPath is for the pilot agent readiness itself.
readyPath = "/healthz/ready"
// quitPath is to notify the pilot agent to quit.
quitPath = "/quitquitquit"
drainPath = "/drain"
// KubeAppProberEnvName is the name of the command line flag for pilot agent to pass app prober config.
// The json encoded string to pass app HTTP probe information from injector(istioctl or webhook).
// For example, ISTIO_KUBE_APP_PROBERS='{"/app-health/httpbin/livez":{"httpGet":{"path": "/hello", "port": 8080}}.
// indicates that httpbin container liveness prober port is 8080 and probing path is /hello.
// This environment variable should never be set manually.
KubeAppProberEnvName = "ISTIO_KUBE_APP_PROBERS"
localHostIPv4 = "127.0.0.1"
localHostIPv6 = "::1"
maxRespBodyLength = 10 * 1 << 10
)
var (
UpstreamLocalAddressIPv4 = &net.TCPAddr{IP: net.ParseIP("127.0.0.6")}
UpstreamLocalAddressIPv6 = &net.TCPAddr{IP: net.ParseIP("::6")}
)
var PrometheusScrapingConfig = env.Register("ISTIO_PROMETHEUS_ANNOTATIONS", "", "")
var (
appProberPattern = regexp.MustCompile(`^/app-health/[^/]+/(livez|readyz|startupz)$`)
EnableHTTP2Probing = env.Register("ISTIO_ENABLE_HTTP2_PROBING", true,
"If enabled, HTTP2 probes will be enabled for HTTPS probes, following Kubernetes").Get()
LegacyLocalhostProbeDestination = env.Register("REWRITE_PROBE_LEGACY_LOCALHOST_DESTINATION", false,
"If enabled, readiness probes will be sent to 'localhost'. Otherwise, they will be sent to the Pod's IP, matching Kubernetes' behavior.")
ProbeKeepaliveConnections = env.Register("ENABLE_PROBE_KEEPALIVE_CONNECTIONS", false,
"If enabled, readiness probes will keep the connection from pilot-agent to the application alive. "+
"This mirrors older Istio versions' behaviors, but not kubelet's.").Get()
)
// KubeAppProbers holds the information about a Kubernetes pod prober.
// It's a map from the prober URL path to the Kubernetes Prober config.
// For example, "/app-health/hello-world/livez" entry contains liveness prober config for
// container "hello-world".
type KubeAppProbers map[string]*Prober
// Prober represents a single container prober
type Prober struct {
HTTPGet *apimirror.HTTPGetAction `json:"httpGet,omitempty"`
TCPSocket *apimirror.TCPSocketAction `json:"tcpSocket,omitempty"`
GRPC *apimirror.GRPCAction `json:"grpc,omitempty"`
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"`
}
// Options for the status server.
type Options struct {
// Ip of the pod. Note: this is only applicable for Kubernetes pods and should only be used for
// the prober.
PodIP string
// KubeAppProbers is a json with Kubernetes application prober config encoded.
KubeAppProbers string
NodeType model.NodeType
StatusPort uint16
AdminPort uint16
IPv6 bool
Probes []ready.Prober
EnvoyPrometheusPort int
Context context.Context
FetchDNS func() *dnsProto.NameTable
NoEnvoy bool
GRPCBootstrap string
EnableProfiling bool
// PrometheusRegistry to use. Just for testing.
PrometheusRegistry prometheus.Gatherer
Shutdown context.CancelFunc
TriggerDrain func()
}
// Server provides an endpoint for handling status probes.
type Server struct {
ready []ready.Prober
prometheus *PrometheusScrapeConfiguration
mutex sync.RWMutex
appProbersDestination string
appKubeProbers KubeAppProbers
appProbeClient map[string]*http.Client
statusPort uint16
lastProbeSuccessful bool
envoyStatsPort int
fetchDNS func() *dnsProto.NameTable
upstreamLocalAddress *net.TCPAddr
config Options
http *http.Client
enableProfiling bool
registry prometheus.Gatherer
shutdown context.CancelFunc
drain func()
}
func initializeMonitoring() (prometheus.Gatherer, error) {
registry := prometheus.NewRegistry()
wrapped := prometheus.WrapRegistererWithPrefix("istio_agent_", registry)
wrapped.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))
wrapped.MustRegister(collectors.NewGoCollector())
_, err := monitoring.RegisterPrometheusExporter(wrapped, registry)
if err != nil {
return nil, fmt.Errorf("could not setup exporter: %v", err)
}
return registry, nil
}
// NewServer creates a new status server.
func NewServer(config Options) (*Server, error) {
localhost := localHostIPv4
upstreamLocalAddress := UpstreamLocalAddressIPv4
if config.IPv6 {
localhost = localHostIPv6
upstreamLocalAddress = UpstreamLocalAddressIPv6
} else {
// if not ipv6-only, it can be ipv4-only or dual-stack
// let InstanceIP decide the localhost
netIP := net.ParseIP(config.PodIP)
if netIP.To4() == nil && netIP.To16() != nil && !netIP.IsLinkLocalUnicast() {
localhost = localHostIPv6
upstreamLocalAddress = UpstreamLocalAddressIPv6
}
}
probes := make([]ready.Prober, 0)
if !config.NoEnvoy {
probes = append(probes, &ready.Probe{
LocalHostAddr: localhost,
AdminPort: config.AdminPort,
Context: config.Context,
NoEnvoy: config.NoEnvoy,
})
}
if config.GRPCBootstrap != "" {
probes = append(probes, grpcready.NewProbe(config.GRPCBootstrap))
}
probes = append(probes, config.Probes...)
registry := config.PrometheusRegistry
if registry == nil {
var err error
registry, err = initializeMonitoring()
if err != nil {
return nil, err
}
}
s := &Server{
statusPort: config.StatusPort,
ready: probes,
http: &http.Client{},
appProbersDestination: config.PodIP,
envoyStatsPort: config.EnvoyPrometheusPort,
fetchDNS: config.FetchDNS,
upstreamLocalAddress: upstreamLocalAddress,
config: config,
enableProfiling: config.EnableProfiling,
registry: registry,
shutdown: func() {
config.Shutdown()
},
drain: config.TriggerDrain,
}
if LegacyLocalhostProbeDestination.Get() {
s.appProbersDestination = "localhost"
}
// Enable prometheus server if its configured and a sidecar
// Because port 15020 is exposed in the gateway Services, we cannot safely serve this endpoint
// If we need to do this in the future, we should use envoy to do routing or have another port to make this internal
// only. For now, its not needed for gateway, as we can just get Envoy stats directly, but if we
// want to expose istio-agent metrics we may want to revisit this.
if cfg, f := PrometheusScrapingConfig.Lookup(); config.NodeType == model.SidecarProxy && f {
var prom PrometheusScrapeConfiguration
if err := json.Unmarshal([]byte(cfg), &prom); err != nil {
return nil, fmt.Errorf("failed to unmarshal %s: %v", PrometheusScrapingConfig.Name, err)
}
log.Infof("Prometheus scraping configuration: %v", prom)
if prom.Scrape != "false" {
s.prometheus = &prom
if s.prometheus.Path == "" {
s.prometheus.Path = "/metrics"
}
if s.prometheus.Port == "" {
s.prometheus.Port = "80"
}
if s.prometheus.Port == strconv.Itoa(int(config.StatusPort)) {
return nil, fmt.Errorf("invalid prometheus scrape configuration: "+
"application port is the same as agent port, which may lead to a recursive loop. "+
"Ensure pod does not have prometheus.io/port=%d label, or that injection is not happening multiple times", config.StatusPort)
}
}
}
if config.KubeAppProbers == "" {
return s, nil
}
if err := json.Unmarshal([]byte(config.KubeAppProbers), &s.appKubeProbers); err != nil {
return nil, fmt.Errorf("failed to decode app prober err = %v, json string = %v", err, config.KubeAppProbers)
}
s.appProbeClient = make(map[string]*http.Client, len(s.appKubeProbers))
// Validate the map key matching the regex pattern.
for path, prober := range s.appKubeProbers {
err := validateAppKubeProber(path, prober)
if err != nil {
return nil, err
}
if prober.HTTPGet != nil {
d := ProbeDialer()
d.LocalAddr = s.upstreamLocalAddress
// nolint: gosec
// This is matching Kubernetes. It is a reasonable usage of this, as it is just a health check over localhost.
transport, err := setTransportDefaults(&http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
DialContext: d.DialContext,
// https://github.com/kubernetes/kubernetes/blob/0153febd9f0098d4b8d0d484927710eaf899ef40/pkg/probe/http/http.go#L55
// Match Kubernetes logic. This also ensures idle timeouts do not trigger probe failures
DisableKeepAlives: !ProbeKeepaliveConnections,
})
if err != nil {
return nil, err
}
// Construct a http client and cache it in order to reuse the connection.
s.appProbeClient[path] = &http.Client{
Timeout: time.Duration(prober.TimeoutSeconds) * time.Second,
// We skip the verification since kubelet skips the verification for HTTPS prober as well
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
Transport: transport,
CheckRedirect: redirectChecker(),
}
}
}
return s, nil
}
// Copies logic from https://github.com/kubernetes/kubernetes/blob/b152001f459/pkg/probe/http/http.go#L129-L130
func isRedirect(code int) bool {
return code >= http.StatusMultipleChoices && code < http.StatusBadRequest
}
// Using the same redirect logic that kubelet does: https://github.com/kubernetes/kubernetes/blob/b152001f459/pkg/probe/http/http.go#L141
// This means that:
// * If we exceed 10 redirects, the probe fails
// * If we redirect somewhere external, the probe succeeds (https://github.com/kubernetes/kubernetes/blob/b152001f459/pkg/probe/http/http.go#L130)
// * If we redirect to the same address, the probe will follow the redirect
func redirectChecker() func(*http.Request, []*http.Request) error {
return func(req *http.Request, via []*http.Request) error {
if req.URL.Hostname() != via[0].URL.Hostname() {
return http.ErrUseLastResponse
}
// Default behavior: stop after 10 redirects.
if len(via) >= 10 {
return errors.New("stopped after 10 redirects")
}
return nil
}
}
func validateAppKubeProber(path string, prober *Prober) error {
if !appProberPattern.MatchString(path) {
return fmt.Errorf(`invalid path, must be in form of regex pattern %v`, appProberPattern)
}
count := 0
if prober.HTTPGet != nil {
count++
}
if prober.TCPSocket != nil {
count++
}
if prober.GRPC != nil {
count++
}
if count != 1 {
return fmt.Errorf(`invalid prober type, must be one of type httpGet, tcpSocket or gRPC`)
}
if prober.HTTPGet != nil && prober.HTTPGet.Port.Type != intstr.Int {
return fmt.Errorf("invalid prober config for %v, the port must be int type", path)
}
if prober.TCPSocket != nil && prober.TCPSocket.Port.Type != intstr.Int {
return fmt.Errorf("invalid prober config for %v, the port must be int type", path)
}
return nil
}
// FormatProberURL returns a set of HTTP URLs that pilot agent will serve to take over Kubernetes
// app probers.
func FormatProberURL(container string) (string, string, string) {
return fmt.Sprintf("/app-health/%v/readyz", container),
fmt.Sprintf("/app-health/%v/livez", container),
fmt.Sprintf("/app-health/%v/startupz", container)
}
// Run opens a the status port and begins accepting probes.
func (s *Server) Run(ctx context.Context) {
log.Infof("Opening status port %d", s.statusPort)
mux := http.NewServeMux()
// Add the handler for ready probes.
mux.HandleFunc(readyPath, s.handleReadyProbe)
// Default path for prom
mux.HandleFunc(`/metrics`, s.handleStats)
// Envoy uses something else - and original agent used the same.
// Keep for backward compat with configs.
mux.HandleFunc(`/stats/prometheus`, s.handleStats)
mux.HandleFunc(quitPath, s.handleQuit)
mux.HandleFunc(drainPath, s.handleDrain)
mux.HandleFunc("/app-health/", s.handleAppProbe)
if s.enableProfiling {
// Add the handler for pprof.
mux.HandleFunc("/debug/pprof/", s.handlePprofIndex)
mux.HandleFunc("/debug/pprof/cmdline", s.handlePprofCmdline)
mux.HandleFunc("/debug/pprof/profile", s.handlePprofProfile)
mux.HandleFunc("/debug/pprof/symbol", s.handlePprofSymbol)
mux.HandleFunc("/debug/pprof/trace", s.handlePprofTrace)
}
mux.HandleFunc("/debug/ndsz", s.handleNdsz)
l, err := net.Listen("tcp", fmt.Sprintf(":%d", s.statusPort))
if err != nil {
log.Errorf("Error listening on status port: %v", err.Error())
return
}
// for testing.
if s.statusPort == 0 {
_, hostPort, _ := net.SplitHostPort(l.Addr().String())
allocatedPort, _ := strconv.Atoi(hostPort)
s.mutex.Lock()
s.statusPort = uint16(allocatedPort)
s.mutex.Unlock()
}
defer l.Close()
go func() {
if err := http.Serve(l, mux); err != nil {
if network.IsUnexpectedListenerError(err) {
log.Error(err)
}
select {
case <-ctx.Done():
// We are shutting down already, don't trigger SIGTERM
return
default:
// If the server errors then pilot-agent can never pass readiness or liveness probes
// Therefore, trigger graceful termination by sending SIGTERM to the binary pid
notifyExit()
}
}
}()
// Wait for the agent to be shut down.
<-ctx.Done()
log.Info("Status server has successfully terminated")
}
func (s *Server) handlePprofIndex(w http.ResponseWriter, r *http.Request) {
if !isRequestFromLocalhost(r) {
http.Error(w, "Only requests from localhost are allowed", http.StatusForbidden)
return
}
pprof.Index(w, r)
}
func (s *Server) handlePprofCmdline(w http.ResponseWriter, r *http.Request) {
if !isRequestFromLocalhost(r) {
http.Error(w, "Only requests from localhost are allowed", http.StatusForbidden)
return
}
pprof.Cmdline(w, r)
}
func (s *Server) handlePprofSymbol(w http.ResponseWriter, r *http.Request) {
if !isRequestFromLocalhost(r) {
http.Error(w, "Only requests from localhost are allowed", http.StatusForbidden)
return
}
pprof.Symbol(w, r)
}
func (s *Server) handlePprofProfile(w http.ResponseWriter, r *http.Request) {
if !isRequestFromLocalhost(r) {
http.Error(w, "Only requests from localhost are allowed", http.StatusForbidden)
return
}
pprof.Profile(w, r)
}
func (s *Server) handlePprofTrace(w http.ResponseWriter, r *http.Request) {
if !isRequestFromLocalhost(r) {
http.Error(w, "Only requests from localhost are allowed", http.StatusForbidden)
return
}
pprof.Trace(w, r)
}
func (s *Server) handleReadyProbe(w http.ResponseWriter, _ *http.Request) {
err := s.isReady()
s.mutex.Lock()
if err != nil {
w.WriteHeader(http.StatusServiceUnavailable)
log.Warnf("Envoy proxy is NOT ready: %s", err.Error())
s.lastProbeSuccessful = false
} else {
w.WriteHeader(http.StatusOK)
if !s.lastProbeSuccessful {
log.Info("Envoy proxy is ready")
}
s.lastProbeSuccessful = true
}
s.mutex.Unlock()
}
func (s *Server) isReady() error {
for _, p := range s.ready {
if err := p.Check(); err != nil {
return err
}
}
return nil
}
func isRequestFromLocalhost(r *http.Request) bool {
ip, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return false
}
userIP := net.ParseIP(ip)
return userIP.IsLoopback()
}
type PrometheusScrapeConfiguration struct {
Scrape string `json:"scrape"`
Path string `json:"path"`
Port string `json:"port"`
}
// handleStats handles prometheus stats scraping. This will scrape envoy metrics, and, if configured,
// the application metrics and merge them together.
// The merge here is a simple string concatenation. This works for almost all cases, assuming the application
// is not exposing the same metrics as Envoy.
// This merging works for both FmtText and FmtOpenMetrics and will use the format of the application metrics
// Note that we do not return any errors here. If we do, we will drop metrics. For example, the app may be having issues,
// but we still want Envoy metrics. Instead, errors are tracked in the failed scrape metrics/logs.
func (s *Server) handleStats(w http.ResponseWriter, r *http.Request) {
metrics.ScrapeTotals.Increment()
var err error
var envoy, application io.ReadCloser
var envoyCancel, appCancel context.CancelFunc
defer func() {
if envoy != nil {
err = envoy.Close()
if err != nil {
log.Infof("envoy connection is not closed: %v", err)
}
}
if application != nil {
err = application.Close()
if err != nil {
log.Infof("app connection is not closed: %v", err)
}
}
if envoyCancel != nil {
envoyCancel()
}
if appCancel != nil {
appCancel()
}
}()
// Gather all the metrics we will merge
if !s.config.NoEnvoy {
if envoy, envoyCancel, _, err = s.scrape(fmt.Sprintf("http://localhost:%d/stats/prometheus", s.envoyStatsPort), r.Header); err != nil {
log.Errorf("failed scraping envoy metrics: %v", err)
metrics.EnvoyScrapeErrors.Increment()
}
}
// Scrape app metrics if defined and capture their format
var format expfmt.Format
if s.prometheus != nil {
var contentType string
url := fmt.Sprintf("http://localhost:%s%s", s.prometheus.Port, s.prometheus.Path)
if application, appCancel, contentType, err = s.scrape(url, r.Header); err != nil {
log.Errorf("failed scraping application metrics: %v", err)
metrics.AppScrapeErrors.Increment()
}
format = negotiateMetricsFormat(contentType)
} else {
// Without app metrics format use a default
format = expfmt.FmtText
}
w.Header().Set("Content-Type", string(format))
// Write out the metrics
if err = scrapeAndWriteAgentMetrics(s.registry, io.Writer(w)); err != nil {
log.Errorf("failed scraping and writing agent metrics: %v", err)
metrics.AgentScrapeErrors.Increment()
}
if envoy != nil {
_, err = io.Copy(w, envoy)
if err != nil {
log.Errorf("failed to scraping and writing envoy metrics: %v", err)
metrics.EnvoyScrapeErrors.Increment()
}
}
// App metrics must go last because if they are FmtOpenMetrics,
// they will have a trailing "# EOF" which terminates the full exposition
if application != nil {
_, err = io.Copy(w, application)
if err != nil {
log.Errorf("failed to scraping and writing application metrics: %v", err)
metrics.AppScrapeErrors.Increment()
}
}
}
func negotiateMetricsFormat(contentType string) expfmt.Format {
mediaType, params, err := mime.ParseMediaType(contentType)
if err == nil && mediaType == expfmt.OpenMetricsType {
switch params["version"] {
case expfmt.OpenMetricsVersion_1_0_0:
return expfmt.FmtOpenMetrics_1_0_0
case expfmt.OpenMetricsVersion_0_0_1, "":
return expfmt.FmtOpenMetrics_0_0_1
}
}
return expfmt.FmtText
}
func scrapeAndWriteAgentMetrics(registry prometheus.Gatherer, w io.Writer) error {
mfs, err := registry.Gather()
enc := expfmt.NewEncoder(w, expfmt.FmtText)
if err != nil {
return err
}
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
return err
}
}
return nil
}
func applyHeaders(into http.Header, from http.Header, keys ...string) {
for _, key := range keys {
val := from.Get(key)
if val != "" {
into.Set(key, val)
}
}
}
// getHeaderTimeout parse a string like (1.234) representing number of seconds
func getHeaderTimeout(timeout string) (time.Duration, error) {
timeoutSeconds, err := strconv.ParseFloat(timeout, 64)
if err != nil {
return 0 * time.Second, err
}
return time.Duration(timeoutSeconds * 1e9), nil
}
// scrape will send a request to the provided url to scrape metrics from
// This will attempt to mimic some of Prometheus functionality by passing some of the headers through
// such as accept, timeout, and user agent
// Returns the scraped metrics reader as well as the response's "Content-Type" header to determine the metrics format
func (s *Server) scrape(url string, header http.Header) (io.ReadCloser, context.CancelFunc, string, error) {
var cancel context.CancelFunc
ctx := context.Background()
if timeoutString := header.Get("X-Prometheus-Scrape-Timeout-Seconds"); timeoutString != "" {
timeout, err := getHeaderTimeout(timeoutString)
if err != nil {
log.Warnf("Failed to parse timeout header %v: %v", timeoutString, err)
} else {
ctx, cancel = context.WithTimeout(ctx, timeout)
}
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
return nil, cancel, "", err
}
applyHeaders(req.Header, header, "Accept",
"User-Agent",
"X-Prometheus-Scrape-Timeout-Seconds",
)
resp, err := s.http.Do(req)
if err != nil {
return nil, cancel, "", fmt.Errorf("error scraping %s: %v", url, err)
}
if resp.StatusCode != http.StatusOK {
resp.Body.Close()
return nil, cancel, "", fmt.Errorf("error scraping %s, status code: %v", url, resp.StatusCode)
}
format := resp.Header.Get("Content-Type")
return resp.Body, cancel, format, nil
}
func (s *Server) handleQuit(w http.ResponseWriter, r *http.Request) {
if !isRequestFromLocalhost(r) {
http.Error(w, "Only requests from localhost are allowed", http.StatusForbidden)
return
}
if r.Method != http.MethodPost {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("OK"))
log.Infof("handling %s, notifying pilot-agent to exit", quitPath)
s.shutdown()
}
func (s *Server) handleDrain(w http.ResponseWriter, r *http.Request) {
if !isRequestFromLocalhost(r) {
http.Error(w, "Only requests from localhost are allowed", http.StatusForbidden)
return
}
if r.Method != http.MethodPost {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("OK"))
log.Infof("handling %s, starting drain", drainPath)
s.drain()
}
func (s *Server) handleAppProbe(w http.ResponseWriter, req *http.Request) {
// Validate the request first.
path := req.URL.Path
if !strings.HasPrefix(path, "/") {
path = "/" + req.URL.Path
}
prober, exists := s.appKubeProbers[path]
if !exists {
log.Errorf("Prober does not exists url %v", path)
w.WriteHeader(http.StatusBadRequest)
_, _ = w.Write([]byte(fmt.Sprintf("app prober config does not exists for %v", path)))
return
}
switch {
case prober.HTTPGet != nil:
s.handleAppProbeHTTPGet(w, req, prober, path)
case prober.TCPSocket != nil:
s.handleAppProbeTCPSocket(w, prober)
case prober.GRPC != nil:
s.handleAppProbeGRPC(w, req, prober)
}
}
func (s *Server) handleAppProbeHTTPGet(w http.ResponseWriter, req *http.Request, prober *Prober, path string) {
proberPath := prober.HTTPGet.Path
if !strings.HasPrefix(proberPath, "/") {
proberPath = "/" + proberPath
}
var url string
hostPort := net.JoinHostPort(s.appProbersDestination, strconv.Itoa(prober.HTTPGet.Port.IntValue()))
if prober.HTTPGet.Scheme == apimirror.URISchemeHTTPS {
url = fmt.Sprintf("https://%s%s", hostPort, proberPath)
} else {
url = fmt.Sprintf("http://%s%s", hostPort, proberPath)
}
appReq, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
log.Errorf("Failed to create request to probe app %v, original url %v", err, path)
w.WriteHeader(http.StatusInternalServerError)
return
}
appReq.Host = req.Host
if host, port, err := net.SplitHostPort(req.Host); err == nil {
port, _ := strconv.Atoi(port)
// the port is same as the status port, then we need to replace the port in the host with the real one
if port == int(s.statusPort) {
realPort := strconv.Itoa(prober.HTTPGet.Port.IntValue())
appReq.Host = net.JoinHostPort(host, realPort)
}
}
// Forward incoming headers to the application.
for name, values := range req.Header {
appReq.Header[name] = slices.Clone(values)
if len(values) > 0 && (name == "Host") {
// Probe has specific host header override; honor it
appReq.Host = values[0]
}
}
// get the http client must exist because
httpClient := s.appProbeClient[path]
// Send the request.
response, err := httpClient.Do(appReq)
if err != nil {
log.Errorf("Request to probe app failed: %v, original URL path = %v\napp URL path = %v", err, path, proberPath)
w.WriteHeader(http.StatusInternalServerError)
return
}
defer func() {
// Drain and close the body to let the Transport reuse the connection
_, _ = io.Copy(io.Discard, response.Body)
_ = response.Body.Close()
}()
if isRedirect(response.StatusCode) { // Redirect
// In other cases, we return the original status code. For redirects, it is illegal to
// not have Location header, so we need to switch to just 200.
w.WriteHeader(http.StatusOK)
return
}
// We only write the status code to the response.
w.WriteHeader(response.StatusCode)
// Return the body from probe as well
b, _ := k8sUtilIo.ReadAtMost(response.Body, maxRespBodyLength)
_, _ = w.Write(b)
}
func (s *Server) handleAppProbeTCPSocket(w http.ResponseWriter, prober *Prober) {
timeout := time.Duration(prober.TimeoutSeconds) * time.Second
d := ProbeDialer()
d.LocalAddr = s.upstreamLocalAddress
d.Timeout = timeout
conn, err := d.Dial("tcp", net.JoinHostPort(s.appProbersDestination, strconv.Itoa(prober.TCPSocket.Port.IntValue())))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
} else {
w.WriteHeader(http.StatusOK)
err = conn.Close()
if err != nil {
log.Infof("tcp connection is not closed: %v", err)
}
}
}
func (s *Server) handleAppProbeGRPC(w http.ResponseWriter, req *http.Request, prober *Prober) {
timeout := time.Duration(prober.TimeoutSeconds) * time.Second
// the DialOptions are referenced from https://github.com/kubernetes/kubernetes/blob/v1.23.1/pkg/probe/grpc/grpc.go#L55-L59
opts := []grpc.DialOption{
grpc.WithBlock(),
grpc.WithTransportCredentials(insecure.NewCredentials()), // credentials are currently not supported
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
d := ProbeDialer()
d.LocalAddr = s.upstreamLocalAddress
d.Timeout = timeout
return d.DialContext(ctx, "tcp", addr)
}),
}
if userAgent := req.Header["User-Agent"]; len(userAgent) > 0 {
// simulate kubelet
// please refer to:
// https://github.com/kubernetes/kubernetes/blob/v1.23.1/pkg/probe/grpc/grpc.go#L56
// https://github.com/kubernetes/kubernetes/blob/v1.23.1/pkg/probe/http/http.go#L103
opts = append(opts, grpc.WithUserAgent(userAgent[0]))
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
addr := net.JoinHostPort(s.appProbersDestination, strconv.Itoa(int(prober.GRPC.Port)))
conn, err := grpc.DialContext(ctx, addr, opts...)
if err != nil {
log.Errorf("Failed to create grpc connection to probe app: %v", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
defer conn.Close()
var svc string
if prober.GRPC.Service != nil {
svc = *prober.GRPC.Service
}
grpcClient := grpcHealth.NewHealthClient(conn)
resp, err := grpcClient.Check(ctx, &grpcHealth.HealthCheckRequest{
Service: svc,
})
// the error handling is referenced from https://github.com/kubernetes/kubernetes/blob/v1.23.1/pkg/probe/grpc/grpc.go#L88-L106
if err != nil {
status, ok := grpcStatus.FromError(err)
if ok {
switch status.Code() {
case codes.Unimplemented:
log.Errorf("server does not implement the grpc health protocol (grpc.health.v1.Health): %v", err)
case codes.DeadlineExceeded:
log.Errorf("grpc request not finished within timeout: %v", err)
default:
log.Errorf("grpc probe failed: %v", err)
}
} else {
log.Errorf("grpc probe failed: %v", err)
}
w.WriteHeader(http.StatusInternalServerError)
return
}
if resp.GetStatus() == grpcHealth.HealthCheckResponse_SERVING {
w.WriteHeader(http.StatusOK)
return
}
w.WriteHeader(http.StatusInternalServerError)
}
func (s *Server) handleNdsz(w http.ResponseWriter, r *http.Request) {
if !isRequestFromLocalhost(r) {
http.Error(w, "Only requests from localhost are allowed", http.StatusForbidden)
return
}
nametable := s.fetchDNS()
if nametable == nil {
// See https://golang.org/doc/faq#nil_error for why writeJSONProto cannot handle this
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte(`{}`))
return
}
writeJSONProto(w, nametable)
}
// writeJSONProto writes a protobuf to a json payload, handling content type, marshaling, and errors
func writeJSONProto(w http.ResponseWriter, obj any) {
w.Header().Set("Content-Type", "application/json")
b, err := config.ToJSON(obj)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
_, _ = w.Write([]byte(err.Error()))
return
}
_, err = w.Write(b)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}
// notifyExit sends SIGTERM to itself
func notifyExit() {
p, err := os.FindProcess(os.Getpid())
if err != nil {
log.Error(err)
}
if err := p.Signal(syscall.SIGTERM); err != nil {
log.Errorf("failed to send SIGTERM to self: %v", err)
}
}
var defaultTransport = http.DefaultTransport.(*http.Transport)
// SetTransportDefaults mirrors Kubernetes probe settings
// https://github.com/kubernetes/kubernetes/blob/0153febd9f0098d4b8d0d484927710eaf899ef40/pkg/probe/http/http.go#L52
func setTransportDefaults(t *http.Transport) (*http.Transport, error) {
if !EnableHTTP2Probing {
return t, nil
}
if t.TLSHandshakeTimeout == 0 {
t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout
}
if t.IdleConnTimeout == 0 {
t.IdleConnTimeout = defaultTransport.IdleConnTimeout
}
t2, err := http2.ConfigureTransports(t)
if err != nil {
return nil, err
}
t2.ReadIdleTimeout = time.Duration(30) * time.Second
t2.PingTimeout = time.Duration(15) * time.Second
return t, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"bytes"
"fmt"
"net"
"strconv"
"strings"
"time"
multierror "github.com/hashicorp/go-multierror"
"istio.io/istio/pkg/http"
)
const (
statCdsRejected = "cluster_manager.cds.update_rejected"
statsCdsSuccess = "cluster_manager.cds.update_success"
statLdsRejected = "listener_manager.lds.update_rejected"
statLdsSuccess = "listener_manager.lds.update_success"
statServerState = "server.state"
statWorkersStarted = "listener_manager.workers_started"
readyStatsRegex = "^(server\\.state|listener_manager\\.workers_started)"
updateStatsRegex = "^(cluster_manager\\.cds|listener_manager\\.lds)\\.(update_success|update_rejected)$"
)
var readinessTimeout = time.Second * 3 // Default Readiness timeout. It is set the same in helm charts.
type stat struct {
name string
value *uint64
found bool
}
// Stats contains values of interest from a poll of Envoy stats.
type Stats struct {
// Update Stats.
CDSUpdatesSuccess uint64
CDSUpdatesRejection uint64
LDSUpdatesSuccess uint64
LDSUpdatesRejection uint64
// Server State of Envoy.
ServerState uint64
WorkersStarted uint64
}
// String representation of the Stats.
func (s *Stats) String() string {
return fmt.Sprintf("cds updates: %d successful, %d rejected; lds updates: %d successful, %d rejected",
s.CDSUpdatesSuccess,
s.CDSUpdatesRejection,
s.LDSUpdatesSuccess,
s.LDSUpdatesRejection)
}
// GetReadinessStats returns the current Envoy state by checking the "server.state" stat.
func GetReadinessStats(localHostAddr string, adminPort uint16) (*uint64, bool, error) {
// If the localHostAddr was not set, we use 'localhost' to void empty host in URL.
if localHostAddr == "" {
localHostAddr = "localhost"
}
hostPort := net.JoinHostPort(localHostAddr, strconv.Itoa(int(adminPort)))
readinessURL := fmt.Sprintf("http://%s/stats?usedonly&filter=%s", hostPort, readyStatsRegex)
stats, err := http.DoHTTPGetWithTimeout(readinessURL, readinessTimeout)
if err != nil {
return nil, false, err
}
if !strings.Contains(stats.String(), "server.state") {
return nil, false, fmt.Errorf("server.state is not yet updated: %s", stats.String())
}
if !strings.Contains(stats.String(), "listener_manager.workers_started") {
return nil, false, fmt.Errorf("listener_manager.workers_started is not yet updated: %s", stats.String())
}
s := &Stats{}
allStats := []*stat{
{name: statServerState, value: &s.ServerState},
{name: statWorkersStarted, value: &s.WorkersStarted},
}
if err := parseStats(stats, allStats); err != nil {
return nil, false, err
}
return &s.ServerState, s.WorkersStarted == 1, nil
}
// GetUpdateStatusStats returns the version stats for CDS and LDS.
func GetUpdateStatusStats(localHostAddr string, adminPort uint16) (*Stats, error) {
// If the localHostAddr was not set, we use 'localhost' to void empty host in URL.
if localHostAddr == "" {
localHostAddr = "localhost"
}
hostPort := net.JoinHostPort(localHostAddr, strconv.Itoa(int(adminPort)))
stats, err := http.DoHTTPGet(fmt.Sprintf("http://%s/stats?usedonly&filter=%s", hostPort, updateStatsRegex))
if err != nil {
return nil, err
}
s := &Stats{}
allStats := []*stat{
{name: statsCdsSuccess, value: &s.CDSUpdatesSuccess},
{name: statCdsRejected, value: &s.CDSUpdatesRejection},
{name: statLdsSuccess, value: &s.LDSUpdatesSuccess},
{name: statLdsRejected, value: &s.LDSUpdatesRejection},
}
if err := parseStats(stats, allStats); err != nil {
return nil, err
}
return s, nil
}
func parseStats(input *bytes.Buffer, stats []*stat) (err error) {
for input.Len() > 0 {
line, _ := input.ReadString('\n')
for _, stat := range stats {
if e := stat.processLine(line); e != nil {
err = multierror.Append(err, e)
}
}
}
for _, stat := range stats {
if !stat.found {
*stat.value = 0
}
}
return
}
func (s *stat) processLine(line string) error {
if !s.found && strings.HasPrefix(line, s.name) {
s.found = true
parts := strings.Split(line, ":")
if len(parts) != 2 {
return fmt.Errorf("envoy stat %s missing separator. line:%s", s.name, line)
}
val, err := strconv.ParseUint(strings.TrimSpace(parts[1]), 10, 64)
if err != nil {
return fmt.Errorf("failed parsing Envoy stat %s (error: %s) line: %s", s.name, err.Error(), line)
}
*s.value = val
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package autoregistration
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/maps"
)
// connection from a proxy to a control plane.
// This interface exists to avoid circular reference to xds.Connection as well
// as keeps the API surface scoped to what we need for auto-registration logic.
type connection interface {
ID() string
Proxy() *model.Proxy
ConnectedAt() time.Time
Stop()
}
// a single proxy may have multiple connections, so we track them here
// when a WorkloadGroup is deleted, we can force disconnects
// when OnDisconnect occurs, we only trigger cleanup when there are no more connections for that proxy
type adsConnections struct {
sync.Mutex
// keyed by proxy id, then connection id
byProxy map[proxyKey]map[string]connection
}
func newAdsConnections() *adsConnections {
return &adsConnections{byProxy: map[proxyKey]map[string]connection{}}
}
func (m *adsConnections) ConnectionsForGroup(wg types.NamespacedName) []connection {
// collect the proxies that should be disconnected (don't remove them, OnDisconnect will)
m.Lock()
defer m.Unlock()
var conns []connection
for key, connections := range m.byProxy {
if key.GroupName == wg.Name && key.Namespace == wg.Namespace {
conns = append(conns, maps.Values(connections)...)
}
}
return conns
}
func (m *adsConnections) Connect(conn connection) {
m.Lock()
defer m.Unlock()
k := makeProxyKey(conn.Proxy())
connections := m.byProxy[k]
if connections == nil {
connections = make(map[string]connection)
m.byProxy[k] = connections
}
connections[conn.ID()] = conn
}
// Disconnect tracks disconnect events of ads clients.
// Returns false once there are no more connections for the given proxy.
func (m *adsConnections) Disconnect(conn connection) bool {
m.Lock()
defer m.Unlock()
k := makeProxyKey(conn.Proxy())
connections := m.byProxy[k]
if connections == nil {
return false
}
id := conn.ID()
delete(connections, id)
if len(connections) == 0 {
delete(m.byProxy, k)
return false
}
return true
}
// keys required to uniquely ID a single proxy
type proxyKey struct {
Network string
IP string
GroupName string
Namespace string
}
func makeProxyKey(proxy *model.Proxy) proxyKey {
return proxyKey{
Network: string(proxy.Metadata.Network),
IP: proxy.IPAddresses[0],
GroupName: proxy.Metadata.AutoRegisterGroup,
Namespace: proxy.Metadata.Namespace,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package autoregistration
import (
"context"
"fmt"
"math"
"strings"
"time"
"golang.org/x/time/rate"
"google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
"istio.io/api/annotation"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/autoregistration/internal/health"
"istio.io/istio/pilot/pkg/autoregistration/internal/state"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/model/status"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube/controllers"
istiolog "istio.io/istio/pkg/log"
"istio.io/istio/pkg/monitoring"
"istio.io/istio/pkg/queue"
)
var (
autoRegistrationSuccess = monitoring.NewSum(
"auto_registration_success_total",
"Total number of successful auto registrations.",
)
autoRegistrationUpdates = monitoring.NewSum(
"auto_registration_updates_total",
"Total number of auto registration updates.",
)
autoRegistrationUnregistrations = monitoring.NewSum(
"auto_registration_unregister_total",
"Total number of unregistrations.",
)
autoRegistrationDeletes = monitoring.NewSum(
"auto_registration_deletes_total",
"Total number of auto registration cleaned up by periodic timer.",
)
autoRegistrationErrors = monitoring.NewSum(
"auto_registration_errors_total",
"Total number of auto registration errors.",
)
)
const (
timeFormat = time.RFC3339Nano
// maxRetries is the number of times a service will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the
// sequence of delays between successive queuings of a service.
//
// 5ms, 10ms, 20ms, 40ms, 80ms
maxRetries = 5
)
var log = istiolog.RegisterScope("wle", "wle controller debugging")
type Controller struct {
instanceID string
// TODO move WorkloadEntry related tasks into their own object and give InternalGen a reference.
// store should either be k8s (for running pilot) or in-memory (for tests). MCP and other config store implementations
// do not support writing. We only use it here for reading WorkloadEntry/WorkloadGroup.
store model.ConfigStoreController
// Note: unregister is to update the workload entry status: like setting `istio.io/disconnectedAt`
// and make the workload entry enqueue `cleanupQueue`
// cleanup is to delete the workload entry
// queue contains workloadEntry that need to be unregistered
queue controllers.Queue
// cleanupLimit rate limit's auto registered WorkloadEntry cleanup calls to k8s
cleanupLimit *rate.Limiter
// cleanupQueue delays the cleanup of auto registered WorkloadEntries to allow for grace period
cleanupQueue queue.Delayed
adsConnections *adsConnections
lateRegistrationQueue controllers.Queue
// maxConnectionAge is a duration that workload entry should be cleaned up if it does not reconnects.
maxConnectionAge time.Duration
stateStore *state.Store
healthController *health.Controller
}
type HealthEvent = health.HealthEvent
// NewController create a controller which manages workload lifecycle and health status.
func NewController(store model.ConfigStoreController, instanceID string, maxConnAge time.Duration) *Controller {
if !features.WorkloadEntryAutoRegistration && !features.WorkloadEntryHealthChecks {
return nil
}
if maxConnAge != math.MaxInt64 {
maxConnAge += maxConnAge / 2
// if overflow, set it to max int64
if maxConnAge < 0 {
maxConnAge = time.Duration(math.MaxInt64)
}
}
c := &Controller{
instanceID: instanceID,
store: store,
cleanupLimit: rate.NewLimiter(rate.Limit(20), 1),
cleanupQueue: queue.NewDelayed(),
adsConnections: newAdsConnections(),
maxConnectionAge: maxConnAge,
}
c.queue = controllers.NewQueue("unregister_workloadentry",
controllers.WithMaxAttempts(maxRetries),
controllers.WithGenericReconciler(c.unregisterWorkload))
c.stateStore = state.NewStore(store, c)
c.healthController = health.NewController(c.stateStore, maxRetries)
c.setupAutoRecreate()
return c
}
func (c *Controller) Run(stop <-chan struct{}) {
if c == nil {
return
}
if c.store != nil && c.cleanupQueue != nil {
go c.periodicWorkloadEntryCleanup(stop)
go c.cleanupQueue.Run(stop)
go c.lateRegistrationQueue.Run(stop)
}
go c.queue.Run(stop)
go c.healthController.Run(stop)
<-stop
}
// workItem contains the state of a "disconnect" event used to unregister a workload.
type workItem struct {
entryName string
autoCreated bool
proxy *model.Proxy
disConTime time.Time
origConTime time.Time
}
// setupAutoRecreate adds a handler to create entries for existing connections when a WG is added
func (c *Controller) setupAutoRecreate() {
if !features.WorkloadEntryAutoRegistration {
return
}
c.lateRegistrationQueue = controllers.NewQueue("auto-register existing connections",
controllers.WithReconciler(func(key kubetypes.NamespacedName) error {
log.Debugf("(%s) processing WorkloadGroup add for %s/%s", c.instanceID, key.Namespace, key.Name)
// WorkloadGroup doesn't exist anymore, skip this.
if c.store.Get(gvk.WorkloadGroup, key.Name, key.Namespace) == nil {
return nil
}
conns := c.adsConnections.ConnectionsForGroup(key)
for _, conn := range conns {
proxy := conn.Proxy()
entryName := autoregisteredWorkloadEntryName(proxy)
if entryName == "" {
continue
}
if err := c.registerWorkload(entryName, proxy, conn.ConnectedAt()); err != nil {
log.Error(err)
}
proxy.SetWorkloadEntry(entryName, true)
}
return nil
}))
c.store.RegisterEventHandler(gvk.WorkloadGroup, func(_ config.Config, cfg config.Config, event model.Event) {
if event == model.EventAdd {
c.lateRegistrationQueue.Add(cfg.NamespacedName())
}
})
}
func setConnectMeta(c *config.Config, controller string, conTime time.Time) {
if c.Annotations == nil {
c.Annotations = map[string]string{}
}
c.Annotations[annotation.IoIstioWorkloadController.Name] = controller
c.Annotations[annotation.IoIstioConnectedAt.Name] = conTime.Format(timeFormat)
delete(c.Annotations, annotation.IoIstioDisconnectedAt.Name)
}
// OnConnect determines whether a connecting proxy represents a non-Kubernetes
// workload and, if that's the case, initiates special processing required for that type
// of workloads, such as auto-registration, health status updates, etc.
//
// If connecting proxy represents a workload that is using auto-registration, it will
// create a WorkloadEntry resource automatically and be ready to receive health status
// updates.
//
// If connecting proxy represents a workload that is not using auto-registration,
// the WorkloadEntry resource is expected to exist beforehand. Otherwise, no special
// processing will be initiated, e.g. health status updates will be ignored.
func (c *Controller) OnConnect(conn connection) error {
if c == nil {
return nil
}
proxy := conn.Proxy()
var entryName string
var autoCreate bool
if features.WorkloadEntryAutoRegistration && proxy.Metadata.AutoRegisterGroup != "" {
entryName = autoregisteredWorkloadEntryName(proxy)
autoCreate = true
} else if features.WorkloadEntryHealthChecks && proxy.Metadata.WorkloadEntry != "" {
// a non-empty value of the `WorkloadEntry` field indicates that proxy must correspond to the WorkloadEntry
wle := c.store.Get(gvk.WorkloadEntry, proxy.Metadata.WorkloadEntry, proxy.Metadata.Namespace)
if wle == nil {
// either invalid proxy configuration or config propagation delay
return fmt.Errorf("proxy metadata indicates that it must correspond to an existing WorkloadEntry, "+
"however WorkloadEntry %s/%s is not found", proxy.Metadata.Namespace, proxy.Metadata.WorkloadEntry)
}
if health.IsEligibleForHealthStatusUpdates(wle) {
if err := ensureProxyCanControlEntry(proxy, wle); err != nil {
return err
}
entryName = wle.Name
}
}
if entryName == "" {
return nil
}
proxy.SetWorkloadEntry(entryName, autoCreate)
c.adsConnections.Connect(conn)
err := c.onWorkloadConnect(entryName, proxy, conn.ConnectedAt(), autoCreate)
if err != nil {
log.Error(err)
}
return err
}
// ensureProxyCanControlEntry ensures the connected proxy's identity matches that of the WorkloadEntry it is associating with.
func ensureProxyCanControlEntry(proxy *model.Proxy, wle *config.Config) error {
if !features.ValidateWorkloadEntryIdentity {
// Validation disabled, skip
return nil
}
if proxy.VerifiedIdentity == nil {
return fmt.Errorf("registration of WorkloadEntry requires a verified identity")
}
if proxy.VerifiedIdentity.Namespace != wle.Namespace {
return fmt.Errorf("registration of WorkloadEntry namespace mismatch: %q vs %q", proxy.VerifiedIdentity.Namespace, wle.Namespace)
}
spec := wle.Spec.(*v1alpha3.WorkloadEntry)
if spec.ServiceAccount != "" && proxy.VerifiedIdentity.ServiceAccount != spec.ServiceAccount {
return fmt.Errorf("registration of WorkloadEntry service account mismatch: %q vs %q", proxy.VerifiedIdentity.ServiceAccount, spec.ServiceAccount)
}
return nil
}
// onWorkloadConnect creates/updates WorkloadEntry of the connecting workload.
//
// If workload is using auto-registration, WorkloadEntry will be created automatically.
//
// If workload is not using auto-registration, WorkloadEntry must already exist.
func (c *Controller) onWorkloadConnect(entryName string, proxy *model.Proxy, conTime time.Time, autoCreate bool) error {
if autoCreate {
return c.registerWorkload(entryName, proxy, conTime)
}
return c.becomeControllerOf(entryName, proxy, conTime)
}
// becomeControllerOf updates an existing WorkloadEntry of a workload that is not using
// auto-registration.
func (c *Controller) becomeControllerOf(entryName string, proxy *model.Proxy, conTime time.Time) error {
changed, err := c.changeWorkloadEntryStateToConnected(entryName, proxy, conTime)
if err != nil {
return err
}
if !changed {
return nil
}
log.Infof("updated health-checked WorkloadEntry %s/%s", proxy.Metadata.Namespace, entryName)
return nil
}
// registerWorkload creates or updates a WorkloadEntry of a workload that is using
// auto-registration.
func (c *Controller) registerWorkload(entryName string, proxy *model.Proxy, conTime time.Time) error {
wle := c.store.Get(gvk.WorkloadEntry, entryName, proxy.Metadata.Namespace)
if wle != nil {
if err := ensureProxyCanControlEntry(proxy, wle); err != nil {
return err
}
changed, err := c.changeWorkloadEntryStateToConnected(entryName, proxy, conTime)
if err != nil {
autoRegistrationErrors.Increment()
return err
}
if !changed {
return nil
}
autoRegistrationUpdates.Increment()
log.Infof("updated auto-registered WorkloadEntry %s/%s as connected", proxy.Metadata.Namespace, entryName)
return nil
}
// No WorkloadEntry, create one using fields from the associated WorkloadGroup
groupCfg := c.store.Get(gvk.WorkloadGroup, proxy.Metadata.AutoRegisterGroup, proxy.Metadata.Namespace)
if groupCfg == nil {
autoRegistrationErrors.Increment()
return grpcstatus.Errorf(codes.FailedPrecondition, "auto-registration WorkloadEntry of %v failed: cannot find WorkloadGroup %s/%s",
proxy.ID, proxy.Metadata.Namespace, proxy.Metadata.AutoRegisterGroup)
}
entry := workloadEntryFromGroup(entryName, proxy, groupCfg)
if err := ensureProxyCanControlEntry(proxy, entry); err != nil {
return err
}
setConnectMeta(entry, c.instanceID, conTime)
_, err := c.store.Create(*entry)
if err != nil {
autoRegistrationErrors.Increment()
return fmt.Errorf("auto-registration WorkloadEntry of %v failed: error creating WorkloadEntry: %v", proxy.ID, err)
}
hcMessage := ""
if health.IsEligibleForHealthStatusUpdates(entry) {
hcMessage = " with health checking enabled"
}
autoRegistrationSuccess.Increment()
log.Infof("auto-registered WorkloadEntry %s/%s%s", proxy.Metadata.Namespace, entryName, hcMessage)
return nil
}
// changeWorkloadEntryStateToConnected updates given WorkloadEntry to reflect that
// it is now connected to this particular `istiod` instance.
func (c *Controller) changeWorkloadEntryStateToConnected(entryName string, proxy *model.Proxy, conTime time.Time) (bool, error) {
wle := c.store.Get(gvk.WorkloadEntry, entryName, proxy.Metadata.Namespace)
if wle == nil {
return false, fmt.Errorf("failed updating WorkloadEntry %s/%s: WorkloadEntry not found", proxy.Metadata.Namespace, entryName)
}
// check if this was actually disconnected AFTER this connTime
// this check can miss, but when it does the `Update` will fail due to versioning
// and retry. The retry includes this check and passes the next time.
if timestamp, ok := wle.Annotations[annotation.IoIstioDisconnectedAt.Name]; ok {
disconnTime, _ := time.Parse(timeFormat, timestamp)
if conTime.Before(disconnTime) {
// we slowly processed a connect and disconnected before getting to this point
return false, nil
}
}
lastConTime, _ := time.Parse(timeFormat, wle.Annotations[annotation.IoIstioConnectedAt.Name])
// the proxy has reconnected to another pilot, not belong to this one.
if conTime.Before(lastConTime) {
return false, nil
}
// Try to update, if it fails we retry all the above logic since the WLE changed
updated := wle.DeepCopy()
setConnectMeta(&updated, c.instanceID, conTime)
_, err := c.store.Update(updated)
if err != nil {
return false, fmt.Errorf("failed updating WorkloadEntry %s/%s err: %v", proxy.Metadata.Namespace, entryName, err)
}
return true, nil
}
// changeWorkloadEntryStateToDisconnected updates given WorkloadEntry to reflect that
// it is no longer connected to this particular `istiod` instance.
func (c *Controller) changeWorkloadEntryStateToDisconnected(entryName string, proxy *model.Proxy, disconTime, origConnTime time.Time) (bool, error) {
// unset controller, set disconnect time
cfg := c.store.Get(gvk.WorkloadEntry, entryName, proxy.Metadata.Namespace)
if cfg == nil {
log.Infof("workloadentry %s/%s is not found, maybe deleted or because of propagate latency",
proxy.Metadata.Namespace, entryName)
// return error and backoff retry to prevent workloadentry leak
return false, fmt.Errorf("workloadentry %s/%s is not found", proxy.Metadata.Namespace, entryName)
}
// only queue a delete if this disconnect event is associated with the last connect event written to the workload entry
if mostRecentConn, err := time.Parse(timeFormat, cfg.Annotations[annotation.IoIstioConnectedAt.Name]); err == nil {
if mostRecentConn.After(origConnTime) {
// this disconnect event wasn't processed until after we successfully reconnected
return false, nil
}
}
// The wle has reconnected to another istiod and controlled by it.
if cfg.Annotations[annotation.IoIstioWorkloadController.Name] != c.instanceID {
return false, nil
}
conTime, _ := time.Parse(timeFormat, cfg.Annotations[annotation.IoIstioConnectedAt.Name])
// The wle has reconnected to this istiod,
// this may happen when the unregister fails retry
if disconTime.Before(conTime) {
return false, nil
}
wle := cfg.DeepCopy()
delete(wle.Annotations, annotation.IoIstioConnectedAt.Name)
wle.Annotations[annotation.IoIstioDisconnectedAt.Name] = disconTime.Format(timeFormat)
// use update instead of patch to prevent race condition
_, err := c.store.Update(wle)
if err != nil {
return false, fmt.Errorf("disconnect: failed updating WorkloadEntry %s/%s: %v", proxy.Metadata.Namespace, entryName, err)
}
return true, nil
}
// OnDisconnect determines whether a connected proxy represents a non-Kubernetes
// workload and, if that's the case, terminates special processing required for that type
// of workloads, such as auto-registration, health status updates, etc.
//
// If proxy represents a workload (be it auto-registered or not), WorkloadEntry resource
// will be updated to reflect that the proxy is no longer connected to this particular `istiod`
// instance.
//
// Besides that, if proxy represents a workload that is using auto-registration, WorkloadEntry
// resource will be scheduled for removal if the proxy does not reconnect within a grace period.
//
// If proxy represents a workload that is not using auto-registration, WorkloadEntry resource
// will be scheduled to be marked unhealthy if the proxy does not reconnect within a grace period.
func (c *Controller) OnDisconnect(conn connection) {
if c == nil {
return
}
if !features.WorkloadEntryAutoRegistration && !features.WorkloadEntryHealthChecks {
return
}
proxy := conn.Proxy()
// check if the WE already exists, update the status
entryName, autoCreate := proxy.WorkloadEntry()
if entryName == "" {
return
}
// if there is still an ads connection, do not unregister.
if remainingConnections := c.adsConnections.Disconnect(conn); remainingConnections {
return
}
proxy.RLock()
defer proxy.RUnlock()
workload := &workItem{
entryName: entryName,
autoCreated: autoCreate,
proxy: conn.Proxy(),
disConTime: time.Now(),
origConTime: conn.ConnectedAt(),
}
// queue has max retry itself
c.queue.Add(workload)
}
func (c *Controller) unregisterWorkload(item any) error {
workItem, ok := item.(*workItem)
if !ok {
return nil
}
changed, err := c.changeWorkloadEntryStateToDisconnected(workItem.entryName, workItem.proxy, workItem.disConTime, workItem.origConTime)
if err != nil {
autoRegistrationErrors.Increment()
return err
}
if !changed {
return nil
}
log.Infof("updated auto-registered WorkloadEntry %s/%s as disconnected", workItem.proxy.Metadata.Namespace, workItem.entryName)
if workItem.autoCreated {
autoRegistrationUnregistrations.Increment()
}
// after grace period, check if the workload ever reconnected
ns := workItem.proxy.Metadata.Namespace
c.cleanupQueue.PushDelayed(func() error {
wle := c.store.Get(gvk.WorkloadEntry, workItem.entryName, ns)
if wle == nil {
return nil
}
if c.shouldCleanupEntry(*wle) {
c.cleanupEntry(*wle, false)
}
return nil
}, features.WorkloadEntryCleanupGracePeriod)
return nil
}
// QueueWorkloadEntryHealth enqueues the associated WorkloadEntries health status.
func (c *Controller) QueueWorkloadEntryHealth(proxy *model.Proxy, event HealthEvent) {
if !features.WorkloadEntryHealthChecks {
return
}
c.healthController.QueueWorkloadEntryHealth(proxy, event)
}
// periodicWorkloadEntryCleanup checks lists all WorkloadEntry
func (c *Controller) periodicWorkloadEntryCleanup(stopCh <-chan struct{}) {
if !features.WorkloadEntryAutoRegistration && !features.WorkloadEntryHealthChecks {
return
}
ticker := time.NewTicker(10 * features.WorkloadEntryCleanupGracePeriod)
defer ticker.Stop()
for {
select {
case <-ticker.C:
wles := c.store.List(gvk.WorkloadEntry, metav1.NamespaceAll)
for _, wle := range wles {
wle := wle
if c.shouldCleanupEntry(wle) {
c.cleanupQueue.Push(func() error {
c.cleanupEntry(wle, true)
return nil
})
}
}
case <-stopCh:
return
}
}
}
func (c *Controller) shouldCleanupEntry(wle config.Config) bool {
// don't clean up if WorkloadEntry is neither auto-registered
// nor health-checked
if !isAutoRegisteredWorkloadEntry(&wle) &&
!(isHealthCheckedWorkloadEntry(&wle) && health.HasHealthCondition(&wle)) {
return false
}
// If there is `istio.io/connectedAt` set, don't cleanup this workload entry.
// This may happen when the workload fast reconnects to the same istiod.
// 1. disconnect: the workload entry has been updated
// 2. connect: but the patch is based on the old workloadentry because of the propagation latency.
// So in this case the `istio.io/disconnectedAt` is still there and the cleanup procedure will go on.
connTime := wle.Annotations[annotation.IoIstioConnectedAt.Name]
if connTime != "" {
// handle workload leak when both workload/pilot down at the same time before pilot has a chance to set disconnTime
connAt, err := time.Parse(timeFormat, connTime)
if err == nil && uint64(time.Since(connAt)) > uint64(c.maxConnectionAge) {
return true
}
return false
}
disconnTime := wle.Annotations[annotation.IoIstioDisconnectedAt.Name]
if disconnTime == "" {
return false
}
disconnAt, err := time.Parse(timeFormat, disconnTime)
// if we haven't passed the grace period, don't cleanup
if err == nil && time.Since(disconnAt) < features.WorkloadEntryCleanupGracePeriod {
return false
}
return true
}
// cleanupEntry performs clean-up actions on a WorkloadEntry of a proxy that hasn't
// reconnected within a grace period.
func (c *Controller) cleanupEntry(wle config.Config, periodic bool) {
if err := c.cleanupLimit.Wait(context.TODO()); err != nil {
log.Errorf("error in WorkloadEntry cleanup rate limiter: %v", err)
return
}
if isAutoRegisteredWorkloadEntry(&wle) {
c.deleteEntry(wle, periodic)
return
}
if isHealthCheckedWorkloadEntry(&wle) && health.HasHealthCondition(&wle) {
c.deleteHealthCondition(wle, periodic)
return
}
}
// deleteEntry removes WorkloadEntry that was created automatically for a workload
// that is using auto-registration.
func (c *Controller) deleteEntry(wle config.Config, periodic bool) {
if err := c.store.Delete(gvk.WorkloadEntry, wle.Name, wle.Namespace, &wle.ResourceVersion); err != nil && !errors.IsNotFound(err) {
log.Warnf("failed cleaning up auto-registered WorkloadEntry %s/%s: %v", wle.Namespace, wle.Name, err)
autoRegistrationErrors.Increment()
return
}
autoRegistrationDeletes.Increment()
log.Infof("cleaned up auto-registered WorkloadEntry %s/%s periodic:%v", wle.Namespace, wle.Name, periodic)
}
// deleteHealthCondition updates WorkloadEntry of a workload that is not using auto-registration
// to remove information about the health status (since we can no longer be certain about it).
func (c *Controller) deleteHealthCondition(wle config.Config, periodic bool) {
err := c.stateStore.DeleteHealthCondition(wle)
if err != nil {
log.Warnf("failed cleaning up health-checked WorkloadEntry %s/%s: %v", wle.Namespace, wle.Name, err)
return
}
log.Infof("cleaned up health-checked WorkloadEntry %s/%s periodic:%v", wle.Namespace, wle.Name, periodic)
}
// IsControllerOf implements state.StoreCallbacks.
func (c *Controller) IsControllerOf(wle *config.Config) bool {
if wle == nil {
return false
}
return wle.Annotations[annotation.IoIstioWorkloadController.Name] == c.instanceID
}
func autoregisteredWorkloadEntryName(proxy *model.Proxy) string {
if proxy.Metadata.AutoRegisterGroup == "" {
return ""
}
if len(proxy.IPAddresses) == 0 {
log.Errorf("auto-registration of %v failed: missing IP addresses", proxy.ID)
return ""
}
if len(proxy.Metadata.Namespace) == 0 {
log.Errorf("auto-registration of %v failed: missing namespace", proxy.ID)
return ""
}
p := []string{proxy.Metadata.AutoRegisterGroup, sanitizeIP(proxy.IPAddresses[0])}
if proxy.Metadata.Network != "" {
p = append(p, string(proxy.Metadata.Network))
}
name := strings.Join(p, "-")
if len(name) > 253 {
name = name[len(name)-253:]
log.Warnf("generated WorkloadEntry name is too long, consider making the WorkloadGroup name shorter. Shortening from beginning to: %s", name)
}
return name
}
// sanitizeIP ensures an IP address (IPv6) can be used in Kubernetes resource name
func sanitizeIP(s string) string {
return strings.ReplaceAll(s, ":", "-")
}
func mergeLabels(labels ...map[string]string) map[string]string {
if len(labels) == 0 {
return map[string]string{}
}
out := make(map[string]string, len(labels)*len(labels[0]))
for _, lm := range labels {
for k, v := range lm {
out[k] = v
}
}
return out
}
var workloadGroupIsController = true
func workloadEntryFromGroup(name string, proxy *model.Proxy, groupCfg *config.Config) *config.Config {
group := groupCfg.Spec.(*v1alpha3.WorkloadGroup)
entry := group.Template.DeepCopy()
entry.Address = proxy.IPAddresses[0]
// TODO move labels out of entry
// node metadata > WorkloadGroup.Metadata > WorkloadGroup.Template
if group.Metadata != nil && group.Metadata.Labels != nil {
entry.Labels = mergeLabels(entry.Labels, group.Metadata.Labels)
}
// Explicitly do not use proxy.Labels, as it is only initialized *after* we register the workload,
// and it would be circular, as it will set the labels based on the WorkloadEntry -- but we are creating
// the workload entry.
if proxy.Metadata.Labels != nil {
entry.Labels = mergeLabels(entry.Labels, proxy.Metadata.Labels)
// the label has been converted to "istio-locality: region/zone/subzone"
// in pilot/pkg/xds/ads.go, and `/` is not allowed in k8s label value.
// Instead of converting again, we delete it since has set WorkloadEntry.Locality
delete(entry.Labels, model.LocalityLabel)
}
annotations := map[string]string{annotation.IoIstioAutoRegistrationGroup.Name: groupCfg.Name}
if group.Metadata != nil && group.Metadata.Annotations != nil {
annotations = mergeLabels(annotations, group.Metadata.Annotations)
}
if proxy.Metadata.Network != "" {
entry.Network = string(proxy.Metadata.Network)
}
if proxy.Locality != nil {
entry.Locality = util.LocalityToString(proxy.Locality)
}
if proxy.Metadata.ProxyConfig != nil && proxy.Metadata.ProxyConfig.ReadinessProbe != nil {
annotations[status.WorkloadEntryHealthCheckAnnotation] = "true"
}
return &config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.WorkloadEntry,
Name: name,
Namespace: proxy.Metadata.Namespace,
Labels: entry.Labels,
Annotations: annotations,
OwnerReferences: []metav1.OwnerReference{{
APIVersion: groupCfg.GroupVersionKind.GroupVersion(),
Kind: groupCfg.GroupVersionKind.Kind,
Name: groupCfg.Name,
UID: kubetypes.UID(groupCfg.UID),
Controller: &workloadGroupIsController,
}},
},
Spec: entry,
// TODO status fields used for garbage collection
Status: nil,
}
}
func isAutoRegisteredWorkloadEntry(wle *config.Config) bool {
return wle != nil && wle.Annotations[annotation.IoIstioAutoRegistrationGroup.Name] != ""
}
func isHealthCheckedWorkloadEntry(wle *config.Config) bool {
return wle != nil && wle.Annotations[annotation.IoIstioWorkloadController.Name] != "" && !isAutoRegisteredWorkloadEntry(wle)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package health
import (
"google.golang.org/protobuf/types/known/timestamppb"
"istio.io/api/meta/v1alpha1"
"istio.io/istio/pilot/pkg/autoregistration/internal/state"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/model/status"
"istio.io/istio/pkg/kube/controllers"
istiolog "istio.io/istio/pkg/log"
)
var log = istiolog.RegisterScope("wle", "wle controller debugging")
type HealthEvent struct {
// whether or not the agent thought the target is healthy
Healthy bool `json:"healthy,omitempty"`
// error message propagated
Message string `json:"errMessage,omitempty"`
}
type HealthCondition struct {
proxy *model.Proxy
entryName string
condition *v1alpha1.IstioCondition
}
// Controller knows how to update health status of a workload.
type Controller struct {
stateStore *state.Store
// healthCondition is a fifo queue used for updating health check status
healthCondition controllers.Queue
}
// NewController returns a new Controller instance.
func NewController(stateStore *state.Store, maxRetries int) *Controller {
c := &Controller{
stateStore: stateStore,
}
c.healthCondition = controllers.NewQueue("healthcheck",
controllers.WithMaxAttempts(maxRetries),
controllers.WithGenericReconciler(c.updateWorkloadEntryHealth))
return c
}
func (c *Controller) Run(stop <-chan struct{}) {
c.healthCondition.Run(stop)
}
// QueueWorkloadEntryHealth enqueues the associated WorkloadEntries health status.
func (c *Controller) QueueWorkloadEntryHealth(proxy *model.Proxy, event HealthEvent) {
// we assume that the workload entry exists
// if auto registration does not exist, try looking
// up in NodeMetadata
entryName, _ := proxy.WorkloadEntry()
if entryName == "" {
log.Errorf("unable to derive WorkloadEntry for health update for %v", proxy.ID)
return
}
condition := transformHealthEvent(proxy, entryName, event)
c.healthCondition.Add(condition)
}
func transformHealthEvent(proxy *model.Proxy, entryName string, event HealthEvent) HealthCondition {
cond := &v1alpha1.IstioCondition{
Type: status.ConditionHealthy,
// last probe and transition are the same because
// we only send on transition in the agent
LastProbeTime: timestamppb.Now(),
LastTransitionTime: timestamppb.Now(),
}
out := HealthCondition{
proxy: proxy,
entryName: entryName,
condition: cond,
}
if event.Healthy {
cond.Status = status.StatusTrue
return out
}
cond.Status = status.StatusFalse
cond.Message = event.Message
return out
}
// updateWorkloadEntryHealth updates the associated WorkloadEntries health status
// based on the corresponding health check performed by istio-agent.
func (c *Controller) updateWorkloadEntryHealth(obj any) error {
condition := obj.(HealthCondition)
return c.stateStore.UpdateHealth(condition.proxy.ID, condition.entryName, condition.proxy.Metadata.Namespace, condition.condition)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package health
import (
"istio.io/api/meta/v1alpha1"
"istio.io/istio/pilot/pkg/model/status"
"istio.io/istio/pkg/config"
)
// IsEligibleForHealthStatusUpdates returns true if a given WorkloadEntry
// is allowed to receive health status updates sent by an Istio Proxy.
//
// Consider a workload eligible for health status updates as long as the
// WorkloadEntryHealthCheckAnnotation is present (no matter what the value is).
// In case the annotation is present but the value is not "true", the proxy should be allowed
// to send health status updates, config health condition should be updated accordingly,
// however reported health status should not come into effect.
func IsEligibleForHealthStatusUpdates(wle *config.Config) bool {
if wle == nil {
return false
}
_, annotated := wle.Annotations[status.WorkloadEntryHealthCheckAnnotation]
return annotated
}
// HasHealthCondition returns true if a given WorkloadEntry has ConditionHealthy
// condition.
func HasHealthCondition(wle *config.Config) bool {
if wle == nil {
return false
}
s, ok := wle.Status.(*v1alpha1.IstioStatus)
if !ok {
return false
}
return status.GetCondition(s.Conditions, status.ConditionHealthy) != nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package state
import (
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
"istio.io/api/meta/v1alpha1"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/model/status"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvk"
istiolog "istio.io/istio/pkg/log"
)
var log = istiolog.RegisterScope("wle", "wle controller debugging")
// Store knows how to keep internal state as part of a WorkloadEntry resource.
type Store struct {
store model.ConfigStoreController
cb StoreCallbacks
}
// StoreCallbacks represents a contract between a Store and
// a autoregistration.Controller.
type StoreCallbacks interface {
// IsControllerOf returns true if a given WorkloadEntry is connected
// to this istiod instance.
IsControllerOf(wle *config.Config) bool
}
// NewStore returns a new Store instance.
func NewStore(store model.ConfigStoreController, cb StoreCallbacks) *Store {
return &Store{
store: store,
cb: cb,
}
}
// UpdateHealth updates the associated WorkloadEntries health status
// based on the corresponding health check performed by istio-agent.
func (s *Store) UpdateHealth(proxyID, entryName, entryNs string, condition *v1alpha1.IstioCondition) error {
// get previous status
cfg := s.store.Get(gvk.WorkloadEntry, entryName, entryNs)
if cfg == nil {
return fmt.Errorf("failed to update health status for %v: WorkloadEntry %v not found", proxyID, entryNs)
}
// The workloadentry has reconnected to the other istiod
if !s.cb.IsControllerOf(cfg) {
return nil
}
// check if the existing health status is newer than this one
wleStatus, ok := cfg.Status.(*v1alpha1.IstioStatus)
if ok {
healthCondition := status.GetCondition(wleStatus.Conditions, status.ConditionHealthy)
if healthCondition != nil {
if healthCondition.LastProbeTime.AsTime().After(condition.LastProbeTime.AsTime()) {
return nil
}
}
}
// replace the updated status
wle := status.UpdateConfigCondition(*cfg, condition)
// update the status
_, err := s.store.UpdateStatus(wle)
if err != nil {
return fmt.Errorf("error while updating WorkloadEntry health status for %s: %w", proxyID, err)
}
log.Debugf("updated health status of %v to %v", proxyID, condition)
return nil
}
// DeleteHealthCondition updates WorkloadEntry of a workload that is not using auto-registration
// to remove information about the health status (since we can no longer be certain about it).
func (s *Store) DeleteHealthCondition(wle config.Config) error {
wle = status.DeleteConfigCondition(wle, status.ConditionHealthy)
// update the status
_, err := s.store.UpdateStatus(wle)
if err != nil && !errors.IsNotFound(err) {
return fmt.Errorf("error while removing WorkloadEntry health status for %s/%s: %v", wle.Namespace, wle.Name, err)
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"bytes"
"crypto/tls"
"crypto/x509"
"fmt"
"os"
"path"
"strings"
"time"
"istio.io/istio/pilot/pkg/features"
tb "istio.io/istio/pilot/pkg/trustbundle"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/sleep"
"istio.io/istio/security/pkg/k8s/chiron"
"istio.io/istio/security/pkg/pki/ca"
certutil "istio.io/istio/security/pkg/util"
)
const (
// defaultCertGracePeriodRatio is the default length of certificate rotation grace period,
// configured as the ratio of the certificate TTL.
defaultCertGracePeriodRatio = 0.5
// the interval polling root cert and resign istiod cert when it changes.
rootCertPollingInterval = 60 * time.Second
// Default CA certificate path
// Currently, custom CA path is not supported; no API to get custom CA cert yet.
defaultCACertPath = "./var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
)
// initDNSCerts will create the certificates to be used by Istiod GRPC server and webhooks.
// If the certificate creation fails - for example no support in K8S - returns an error.
// Will use the mesh.yaml DiscoveryAddress to find the default expected address of the control plane,
// with an environment variable allowing override.
func (s *Server) initDNSCerts() error {
var certChain, keyPEM, caBundle []byte
var err error
pilotCertProviderName := features.PilotCertProvider
if strings.HasPrefix(pilotCertProviderName, constants.CertProviderKubernetesSignerPrefix) && s.RA != nil {
signerName := strings.TrimPrefix(pilotCertProviderName, constants.CertProviderKubernetesSignerPrefix)
log.Infof("Generating K8S-signed cert for %v using signer %v", s.dnsNames, signerName)
certChain, keyPEM, _, err = chiron.GenKeyCertK8sCA(s.kubeClient.Kube(),
strings.Join(s.dnsNames, ","), "", signerName, true, SelfSignedCACertTTL.Get())
if err != nil {
return fmt.Errorf("failed generating key and cert by kubernetes: %v", err)
}
caBundle, err = s.RA.GetRootCertFromMeshConfig(signerName)
if err != nil {
return err
}
// MeshConfig:Add callback for mesh config update
s.environment.AddMeshHandler(func() {
newCaBundle, _ := s.RA.GetRootCertFromMeshConfig(signerName)
if newCaBundle != nil && !bytes.Equal(newCaBundle, s.istiodCertBundleWatcher.GetKeyCertBundle().CABundle) {
newCertChain, newKeyPEM, _, err := chiron.GenKeyCertK8sCA(s.kubeClient.Kube(),
strings.Join(s.dnsNames, ","), "", signerName, true, SelfSignedCACertTTL.Get())
if err != nil {
log.Fatalf("failed regenerating key and cert for istiod by kubernetes: %v", err)
}
s.istiodCertBundleWatcher.SetAndNotify(newKeyPEM, newCertChain, newCaBundle)
}
})
s.addStartFunc("istiod server certificate rotation", func(stop <-chan struct{}) error {
go func() {
// Track TTL of DNS cert and renew cert in accordance to grace period.
s.RotateDNSCertForK8sCA(stop, "", signerName, true, SelfSignedCACertTTL.Get())
}()
return nil
})
} else if pilotCertProviderName == constants.CertProviderKubernetes {
log.Infof("Generating K8S-signed cert for %v", s.dnsNames)
certChain, keyPEM, _, err = chiron.GenKeyCertK8sCA(s.kubeClient.Kube(),
strings.Join(s.dnsNames, ","), defaultCACertPath, "", true, SelfSignedCACertTTL.Get())
if err != nil {
return fmt.Errorf("failed generating key and cert by kubernetes: %v", err)
}
caBundle, err = os.ReadFile(defaultCACertPath)
if err != nil {
return fmt.Errorf("failed reading %s: %v", defaultCACertPath, err)
}
s.addStartFunc("istiod server certificate rotation", func(stop <-chan struct{}) error {
go func() {
// Track TTL of DNS cert and renew cert in accordance to grace period.
s.RotateDNSCertForK8sCA(stop, defaultCACertPath, "", true, SelfSignedCACertTTL.Get())
}()
return nil
})
} else if pilotCertProviderName == constants.CertProviderIstiod {
certChain, keyPEM, err = s.CA.GenKeyCert(s.dnsNames, SelfSignedCACertTTL.Get(), false)
if err != nil {
return fmt.Errorf("failed generating istiod key cert %v", err)
}
log.Infof("Generating istiod-signed cert for %v:\n %s", s.dnsNames, certChain)
fileBundle, err := detectSigningCABundle()
if err != nil {
return fmt.Errorf("unable to determine signing file format %v", err)
}
istioGenerated, detectedSigningCABundle := false, false
if _, err := os.Stat(fileBundle.SigningKeyFile); err == nil {
detectedSigningCABundle = true
if _, err := os.Stat(path.Join(LocalCertDir.Get(), ca.IstioGenerated)); err == nil {
istioGenerated = true
}
}
// check if signing key file exists the cert dir and if the istio-generated file
// exists (only if USE_CACERTS_FOR_SELF_SIGNED_CA is enabled)
if !detectedSigningCABundle || (features.UseCacertsForSelfSignedCA && istioGenerated) {
log.Infof("Use istio-generated cacerts at %v or istio-ca-secret", fileBundle.SigningKeyFile)
caBundle = s.CA.GetCAKeyCertBundle().GetRootCertPem()
s.addStartFunc("istiod server certificate rotation", func(stop <-chan struct{}) error {
go func() {
// regenerate istiod key cert when root cert changes.
s.watchRootCertAndGenKeyCert(stop)
}()
return nil
})
} else {
log.Infof("DNS certs use plugged-in cert at %v", fileBundle.SigningKeyFile)
caBundle, err = os.ReadFile(fileBundle.RootCertFile)
if err != nil {
return fmt.Errorf("failed reading %s: %v", fileBundle.RootCertFile, err)
}
}
} else {
customCACertPath := security.DefaultRootCertFilePath
log.Infof("User specified cert provider: %v, mounted in a well known location %v",
features.PilotCertProvider, customCACertPath)
caBundle, err = os.ReadFile(customCACertPath)
if err != nil {
return fmt.Errorf("failed reading %s: %v", customCACertPath, err)
}
}
s.istiodCertBundleWatcher.SetAndNotify(keyPEM, certChain, caBundle)
return nil
}
// TODO(hzxuzonghu): support async notification instead of polling the CA root cert.
func (s *Server) watchRootCertAndGenKeyCert(stop <-chan struct{}) {
caBundle := s.CA.GetCAKeyCertBundle().GetRootCertPem()
for {
if !sleep.Until(stop, rootCertPollingInterval) {
return
}
newRootCert := s.CA.GetCAKeyCertBundle().GetRootCertPem()
if !bytes.Equal(caBundle, newRootCert) {
caBundle = newRootCert
certChain, keyPEM, err := s.CA.GenKeyCert(s.dnsNames, SelfSignedCACertTTL.Get(), false)
if err != nil {
log.Errorf("failed generating istiod key cert %v", err)
} else {
s.istiodCertBundleWatcher.SetAndNotify(keyPEM, certChain, caBundle)
log.Infof("regenerated istiod dns cert: %s", certChain)
}
}
}
}
func (s *Server) RotateDNSCertForK8sCA(stop <-chan struct{},
defaultCACertPath string,
signerName string,
approveCsr bool,
requestedLifetime time.Duration,
) {
certUtil := certutil.NewCertUtil(int(defaultCertGracePeriodRatio * 100))
for {
waitTime, _ := certUtil.GetWaitTime(s.istiodCertBundleWatcher.GetKeyCertBundle().CertPem, time.Now())
if !sleep.Until(stop, waitTime) {
return
}
certChain, keyPEM, _, err := chiron.GenKeyCertK8sCA(s.kubeClient.Kube(),
strings.Join(s.dnsNames, ","), defaultCACertPath, signerName, approveCsr, requestedLifetime)
if err != nil {
log.Errorf("failed regenerating key and cert for istiod by kubernetes: %v", err)
continue
}
s.istiodCertBundleWatcher.SetAndNotify(keyPEM, certChain, s.istiodCertBundleWatcher.GetCABundle())
}
}
// updateRootCertAndGenKeyCert when CA certs is updated, it generates new dns certs and notifies keycertbundle about the changes
func (s *Server) updateRootCertAndGenKeyCert() error {
log.Infof("update root cert and generate new dns certs")
caBundle := s.CA.GetCAKeyCertBundle().GetRootCertPem()
certChain, keyPEM, err := s.CA.GenKeyCert(s.dnsNames, SelfSignedCACertTTL.Get(), false)
if err != nil {
return err
}
if features.MultiRootMesh {
// Trigger trust anchor update, this will send PCDS to all sidecars.
log.Infof("Update trust anchor with new root cert")
err = s.workloadTrustBundle.UpdateTrustAnchor(&tb.TrustAnchorUpdate{
TrustAnchorConfig: tb.TrustAnchorConfig{Certs: []string{string(caBundle)}},
Source: tb.SourceIstioCA,
})
if err != nil {
log.Errorf("failed to update trust anchor from source Istio CA, err: %v", err)
return err
}
}
s.istiodCertBundleWatcher.SetAndNotify(keyPEM, certChain, caBundle)
return nil
}
// initCertificateWatches sets up watches for the plugin dns certs.
func (s *Server) initCertificateWatches(tlsOptions TLSOptions) error {
if err := s.istiodCertBundleWatcher.SetFromFilesAndNotify(tlsOptions.KeyFile, tlsOptions.CertFile, tlsOptions.CaCertFile); err != nil {
return fmt.Errorf("set keyCertBundle failed: %v", err)
}
// TODO: Setup watcher for root and restart server if it changes.
for _, file := range []string{tlsOptions.CertFile, tlsOptions.KeyFile} {
log.Infof("adding watcher for certificate %s", file)
if err := s.fileWatcher.Add(file); err != nil {
return fmt.Errorf("could not watch %v: %v", file, err)
}
}
s.addStartFunc("certificate rotation", func(stop <-chan struct{}) error {
go func() {
var keyCertTimerC <-chan time.Time
for {
select {
case <-keyCertTimerC:
keyCertTimerC = nil
if err := s.istiodCertBundleWatcher.SetFromFilesAndNotify(tlsOptions.KeyFile, tlsOptions.CertFile, tlsOptions.CaCertFile); err != nil {
log.Errorf("Setting keyCertBundle failed: %v", err)
}
case <-s.fileWatcher.Events(tlsOptions.CertFile):
if keyCertTimerC == nil {
keyCertTimerC = time.After(watchDebounceDelay)
}
case <-s.fileWatcher.Events(tlsOptions.KeyFile):
if keyCertTimerC == nil {
keyCertTimerC = time.After(watchDebounceDelay)
}
case err := <-s.fileWatcher.Errors(tlsOptions.CertFile):
log.Errorf("error watching %v: %v", tlsOptions.CertFile, err)
case err := <-s.fileWatcher.Errors(tlsOptions.KeyFile):
log.Errorf("error watching %v: %v", tlsOptions.KeyFile, err)
case <-stop:
return
}
}
}()
return nil
})
return nil
}
func (s *Server) reloadIstiodCert(watchCh <-chan struct{}, stopCh <-chan struct{}) {
for {
select {
case <-stopCh:
return
case <-watchCh:
if err := s.loadIstiodCert(); err != nil {
log.Errorf("reload istiod cert failed: %v", err)
}
}
}
}
// loadIstiodCert load IstiodCert received from watchCh once
func (s *Server) loadIstiodCert() error {
keyCertBundle := s.istiodCertBundleWatcher.GetKeyCertBundle()
keyPair, err := tls.X509KeyPair(keyCertBundle.CertPem, keyCertBundle.KeyPem)
if err != nil {
return fmt.Errorf("istiod loading x509 key pairs failed: %v", err)
}
for _, c := range keyPair.Certificate {
x509Cert, err := x509.ParseCertificates(c)
if err != nil {
// This can rarely happen, just in case.
return fmt.Errorf("x509 cert - ParseCertificates() error: %v", err)
}
for _, c := range x509Cert {
log.Infof("x509 cert - Issuer: %q, Subject: %q, SN: %x, NotBefore: %q, NotAfter: %q",
c.Issuer, c.Subject, c.SerialNumber,
c.NotBefore.Format(time.RFC3339), c.NotAfter.Format(time.RFC3339))
}
}
log.Info("Istiod certificates are reloaded")
s.certMu.Lock()
s.istiodCert = &keyPair
s.certMu.Unlock()
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"strings"
gogoproto "github.com/gogo/protobuf/proto" // nolint: depguard
"google.golang.org/protobuf/proto"
"istio.io/istio/pkg/config"
)
// needsPush checks whether the passed in config has same spec and hence push needs
// to be triggered. This is to avoid unnecessary pushes only when labels have changed
// for example.
func needsPush(prev config.Config, curr config.Config) bool {
if prev.GroupVersionKind != curr.GroupVersionKind {
// This should never happen.
return true
}
// If the config is not Istio, let us just push.
if !strings.HasSuffix(prev.GroupVersionKind.Group, "istio.io") {
return true
}
// If current/previous metadata has "*istio.io" label/annotation, just push
for label := range curr.Meta.Labels {
if strings.Contains(label, "istio.io") {
return true
}
}
for annotation := range curr.Meta.Annotations {
if strings.Contains(annotation, "istio.io") {
return true
}
}
for label := range prev.Meta.Labels {
if strings.Contains(label, "istio.io") {
return true
}
}
for annotation := range prev.Meta.Annotations {
if strings.Contains(annotation, "istio.io") {
return true
}
}
prevspecProto, okProtoP := prev.Spec.(proto.Message)
currspecProto, okProtoC := curr.Spec.(proto.Message)
if okProtoP && okProtoC {
return !proto.Equal(prevspecProto, currspecProto)
}
prevspecGogo, okGogoP := prev.Spec.(gogoproto.Message)
currspecGogo, okGogoC := curr.Spec.(gogoproto.Message)
if okGogoP && okGogoC {
return !gogoproto.Equal(prevspecGogo, currspecGogo)
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"fmt"
"net/url"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/autoregistration"
configaggregate "istio.io/istio/pilot/pkg/config/aggregate"
"istio.io/istio/pilot/pkg/config/kube/crdclient"
"istio.io/istio/pilot/pkg/config/kube/gateway"
ingress "istio.io/istio/pilot/pkg/config/kube/ingress"
"istio.io/istio/pilot/pkg/config/memory"
configmonitor "istio.io/istio/pilot/pkg/config/monitor"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/leaderelection"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/status/distribution"
"istio.io/istio/pkg/adsc"
"istio.io/istio/pkg/config/analysis/incluster"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/gvr"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/revisions"
)
// URL schemes supported by the config store
type ConfigSourceAddressScheme string
const (
// fs:///PATH will load local files. This replaces --configDir.
// example fs:///tmp/configroot
// PATH can be mounted from a config map or volume
File ConfigSourceAddressScheme = "fs"
// xds://ADDRESS - load XDS-over-MCP sources
// example xds://127.0.0.1:49133
XDS ConfigSourceAddressScheme = "xds"
// k8s:// - load in-cluster k8s controller
// example k8s://
Kubernetes ConfigSourceAddressScheme = "k8s"
)
// initConfigController creates the config controller in the pilotConfig.
func (s *Server) initConfigController(args *PilotArgs) error {
s.initStatusController(args, features.EnableStatus && features.EnableDistributionTracking)
meshConfig := s.environment.Mesh()
if len(meshConfig.ConfigSources) > 0 {
// Using MCP for config.
if err := s.initConfigSources(args); err != nil {
return err
}
} else if args.RegistryOptions.FileDir != "" {
// Local files - should be added even if other options are specified
store := memory.Make(collections.Pilot)
configController := memory.NewController(store)
err := s.makeFileMonitor(args.RegistryOptions.FileDir, args.RegistryOptions.KubeOptions.DomainSuffix, configController)
if err != nil {
return err
}
s.ConfigStores = append(s.ConfigStores, configController)
} else {
err := s.initK8SConfigStore(args)
if err != nil {
return err
}
}
// If running in ingress mode (requires k8s), wrap the config controller.
if hasKubeRegistry(args.RegistryOptions.Registries) && meshConfig.IngressControllerMode != meshconfig.MeshConfig_OFF {
// Wrap the config controller with a cache.
// Supporting only Ingress/v1 means we lose support of Kubernetes 1.18
// Supporting only Ingress/v1beta1 means we lose support of Kubernetes 1.22
// Since supporting both in a monolith controller is painful due to lack of usable conversion logic between
// the two versions.
// As a compromise, we instead just fork the controller. Once 1.18 support is no longer needed, we can drop the old controller
s.ConfigStores = append(s.ConfigStores,
ingress.NewController(s.kubeClient, s.environment.Watcher, args.RegistryOptions.KubeOptions))
s.addTerminatingStartFunc("ingress status", func(stop <-chan struct{}) error {
leaderelection.
NewLeaderElection(args.Namespace, args.PodName, leaderelection.IngressController, args.Revision, s.kubeClient).
AddRunFunction(func(leaderStop <-chan struct{}) {
ingressSyncer := ingress.NewStatusSyncer(s.environment.Watcher, s.kubeClient, args.RegistryOptions.KubeOptions)
// Start informers again. This fixes the case where informers for namespace do not start,
// as we create them only after acquiring the leader lock
// Note: stop here should be the overall pilot stop, NOT the leader election stop. We are
// basically lazy loading the informer, if we stop it when we lose the lock we will never
// recreate it again.
s.kubeClient.RunAndWait(stop)
log.Infof("Starting ingress controller")
ingressSyncer.Run(leaderStop)
}).
Run(stop)
return nil
})
}
// Wrap the config controller with a cache.
aggregateConfigController, err := configaggregate.MakeCache(s.ConfigStores)
if err != nil {
return err
}
s.configController = aggregateConfigController
// Create the config store.
s.environment.ConfigStore = aggregateConfigController
// Defer starting the controller until after the service is created.
s.addStartFunc("config controller", func(stop <-chan struct{}) error {
go s.configController.Run(stop)
return nil
})
return nil
}
func (s *Server) initK8SConfigStore(args *PilotArgs) error {
if s.kubeClient == nil {
return nil
}
configController := s.makeKubeConfigController(args)
s.ConfigStores = append(s.ConfigStores, configController)
if features.EnableGatewayAPI {
if s.statusManager == nil && features.EnableGatewayAPIStatus {
s.initStatusManager(args)
}
gwc := gateway.NewController(s.kubeClient, configController, s.kubeClient.CrdWatcher().WaitForCRD,
s.environment.CredentialsController, args.RegistryOptions.KubeOptions)
s.environment.GatewayAPIController = gwc
s.ConfigStores = append(s.ConfigStores, s.environment.GatewayAPIController)
s.addTerminatingStartFunc("gateway status", func(stop <-chan struct{}) error {
leaderelection.
NewLeaderElection(args.Namespace, args.PodName, leaderelection.GatewayStatusController, args.Revision, s.kubeClient).
AddRunFunction(func(leaderStop <-chan struct{}) {
log.Infof("Starting gateway status writer")
gwc.SetStatusWrite(true, s.statusManager)
// Trigger a push so we can recompute status
s.XDSServer.ConfigUpdate(&model.PushRequest{
Full: true,
Reason: model.NewReasonStats(model.GlobalUpdate),
})
<-leaderStop
log.Infof("Stopping gateway status writer")
gwc.SetStatusWrite(false, nil)
}).
Run(stop)
return nil
})
if features.EnableGatewayAPIDeploymentController {
s.addTerminatingStartFunc("gateway deployment controller", func(stop <-chan struct{}) error {
leaderelection.
NewPerRevisionLeaderElection(args.Namespace, args.PodName, leaderelection.GatewayDeploymentController, args.Revision, s.kubeClient).
AddRunFunction(func(leaderStop <-chan struct{}) {
// We can only run this if the Gateway CRD is created
if s.kubeClient.CrdWatcher().WaitForCRD(gvr.KubernetesGateway, leaderStop) {
nsFilter := args.RegistryOptions.KubeOptions.DiscoveryNamespacesFilter
if !features.EnableEnhancedResourceScoping {
nsFilter = nil
}
tagWatcher := revisions.NewTagWatcher(s.kubeClient, args.Revision)
controller := gateway.NewDeploymentController(s.kubeClient, s.clusterID, s.environment,
s.webhookInfo.getWebhookConfig, s.webhookInfo.addHandler, tagWatcher, args.Revision, nsFilter)
// Start informers again. This fixes the case where informers for namespace do not start,
// as we create them only after acquiring the leader lock
// Note: stop here should be the overall pilot stop, NOT the leader election stop. We are
// basically lazy loading the informer, if we stop it when we lose the lock we will never
// recreate it again.
s.kubeClient.RunAndWait(stop)
go tagWatcher.Run(leaderStop)
controller.Run(leaderStop)
}
}).
Run(stop)
return nil
})
}
}
if features.EnableAnalysis {
if err := s.initInprocessAnalysisController(args); err != nil {
return err
}
}
var err error
s.RWConfigStore, err = configaggregate.MakeWriteableCache(s.ConfigStores, configController)
if err != nil {
return err
}
s.XDSServer.WorkloadEntryController = autoregistration.NewController(configController, args.PodName, args.KeepaliveOptions.MaxServerConnectionAge)
return nil
}
// initConfigSources will process mesh config 'configSources' and initialize
// associated configs.
func (s *Server) initConfigSources(args *PilotArgs) (err error) {
for _, configSource := range s.environment.Mesh().ConfigSources {
srcAddress, err := url.Parse(configSource.Address)
if err != nil {
return fmt.Errorf("invalid config URL %s %v", configSource.Address, err)
}
scheme := ConfigSourceAddressScheme(srcAddress.Scheme)
switch scheme {
case File:
if srcAddress.Path == "" {
return fmt.Errorf("invalid fs config URL %s, contains no file path", configSource.Address)
}
store := memory.Make(collections.Pilot)
configController := memory.NewController(store)
err := s.makeFileMonitor(srcAddress.Path, args.RegistryOptions.KubeOptions.DomainSuffix, configController)
if err != nil {
return err
}
s.ConfigStores = append(s.ConfigStores, configController)
log.Infof("Started File configSource %s", configSource.Address)
case XDS:
xdsMCP, err := adsc.New(srcAddress.Host, &adsc.ADSConfig{
InitialDiscoveryRequests: adsc.ConfigInitialRequests(),
Config: adsc.Config{
Namespace: args.Namespace,
Workload: args.PodName,
Revision: args.Revision,
Meta: model.NodeMetadata{
Generator: "api",
// To reduce transported data if upstream server supports. Especially for custom servers.
IstioRevision: args.Revision,
}.ToStruct(),
GrpcOpts: []grpc.DialOption{
args.KeepaliveOptions.ConvertToClientOption(),
// Because we use the custom grpc options for adsc, here we should
// explicitly set transport credentials.
// TODO: maybe we should use the tls settings within ConfigSource
// to secure the connection between istiod and remote xds server.
grpc.WithTransportCredentials(insecure.NewCredentials()),
},
},
})
if err != nil {
return fmt.Errorf("failed to dial XDS %s %v", configSource.Address, err)
}
store := memory.Make(collections.Pilot)
// TODO: enable namespace filter for memory controller
configController := memory.NewController(store)
configController.RegisterHasSyncedHandler(xdsMCP.HasSynced)
xdsMCP.Store = configController
err = xdsMCP.Run()
if err != nil {
return fmt.Errorf("MCP: failed running %v", err)
}
s.ConfigStores = append(s.ConfigStores, configController)
log.Infof("Started XDS configSource %s", configSource.Address)
case Kubernetes:
if srcAddress.Path == "" || srcAddress.Path == "/" {
err2 := s.initK8SConfigStore(args)
if err2 != nil {
log.Warnf("Error loading k8s: %v", err2)
return err2
}
log.Infof("Started Kubernetes configSource %s", configSource.Address)
} else {
log.Warnf("Not implemented, ignore: %v", configSource.Address)
// TODO: handle k8s:// scheme for remote cluster. Use same mechanism as service registry,
// using the cluster name as key to match a secret.
}
default:
log.Warnf("Ignoring unsupported config source: %v", configSource.Address)
}
}
return nil
}
// initInprocessAnalysisController spins up an instance of Galley which serves no purpose other than
// running Analyzers for status updates. The Status Updater will eventually need to allow input from istiod
// to support config distribution status as well.
func (s *Server) initInprocessAnalysisController(args *PilotArgs) error {
if s.statusManager == nil {
s.initStatusManager(args)
}
s.addStartFunc("analysis controller", func(stop <-chan struct{}) error {
go leaderelection.
NewLeaderElection(args.Namespace, args.PodName, leaderelection.AnalyzeController, args.Revision, s.kubeClient).
AddRunFunction(func(leaderStop <-chan struct{}) {
cont, err := incluster.NewController(leaderStop, s.RWConfigStore,
s.kubeClient, args.Revision, args.Namespace, s.statusManager, args.RegistryOptions.KubeOptions.DomainSuffix)
if err != nil {
return
}
cont.Run(leaderStop)
}).Run(stop)
return nil
})
return nil
}
func (s *Server) initStatusController(args *PilotArgs, writeStatus bool) {
if s.statusManager == nil && writeStatus {
s.initStatusManager(args)
}
if features.EnableDistributionTracking {
s.statusReporter = &distribution.Reporter{
UpdateInterval: features.StatusUpdateInterval,
PodName: args.PodName,
}
s.addStartFunc("status reporter init", func(stop <-chan struct{}) error {
s.statusReporter.Init(s.environment.GetLedger(), stop)
return nil
})
s.addTerminatingStartFunc("status reporter", func(stop <-chan struct{}) error {
if writeStatus {
s.statusReporter.Start(s.kubeClient.Kube(), args.Namespace, args.PodName, stop)
}
return nil
})
s.XDSServer.StatusReporter = s.statusReporter
}
if writeStatus {
s.addTerminatingStartFunc("status distribution", func(stop <-chan struct{}) error {
leaderelection.
NewLeaderElection(args.Namespace, args.PodName, leaderelection.StatusController, args.Revision, s.kubeClient).
AddRunFunction(func(leaderStop <-chan struct{}) {
// Controller should be created for calling the run function every time, so it can
// avoid concurrently calling of informer Run() for controller in controller.Start
controller := distribution.NewController(s.kubeClient.RESTConfig(), args.Namespace, s.RWConfigStore, s.statusManager)
s.statusReporter.SetController(controller)
controller.Start(leaderStop)
}).Run(stop)
return nil
})
}
}
func (s *Server) makeKubeConfigController(args *PilotArgs) *crdclient.Client {
opts := crdclient.Option{
Revision: args.Revision,
DomainSuffix: args.RegistryOptions.KubeOptions.DomainSuffix,
Identifier: "crd-controller",
}
if args.RegistryOptions.KubeOptions.DiscoveryNamespacesFilter != nil {
opts.NamespacesFilter = args.RegistryOptions.KubeOptions.DiscoveryNamespacesFilter.Filter
}
return crdclient.New(s.kubeClient, opts)
}
func (s *Server) makeFileMonitor(fileDir string, domainSuffix string, configController model.ConfigStore) error {
fileSnapshot := configmonitor.NewFileSnapshot(fileDir, collections.Pilot, domainSuffix)
fileMonitor := configmonitor.NewMonitor("file-monitor", configController, fileSnapshot.ReadConfigFiles, fileDir)
// Defer starting the file monitor until after the service is created.
s.addStartFunc("file monitor", func(stop <-chan struct{}) error {
fileMonitor.Start(stop)
return nil
})
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"net/http"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/apigen"
"istio.io/istio/pilot/pkg/networking/core"
"istio.io/istio/pilot/pkg/networking/grpcgen"
"istio.io/istio/pilot/pkg/xds"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/cluster"
)
func InitGenerators(
s *xds.DiscoveryServer,
cg core.ConfigGenerator,
systemNameSpace string,
clusterID cluster.ID,
internalDebugMux *http.ServeMux,
) {
env := s.Env
generators := map[string]model.XdsResourceGenerator{}
edsGen := &xds.EdsGenerator{Cache: s.Cache, EndpointIndex: env.EndpointIndex}
generators[v3.ClusterType] = &xds.CdsGenerator{ConfigGenerator: cg}
generators[v3.ListenerType] = &xds.LdsGenerator{ConfigGenerator: cg}
generators[v3.RouteType] = &xds.RdsGenerator{ConfigGenerator: cg}
generators[v3.EndpointType] = edsGen
ecdsGen := &xds.EcdsGenerator{ConfigGenerator: cg}
if env.CredentialsController != nil {
generators[v3.SecretType] = xds.NewSecretGen(env.CredentialsController, s.Cache, clusterID, env.Mesh())
ecdsGen.SetCredController(env.CredentialsController)
}
generators[v3.ExtensionConfigurationType] = ecdsGen
generators[v3.NameTableType] = &xds.NdsGenerator{ConfigGenerator: cg}
generators[v3.ProxyConfigType] = &xds.PcdsGenerator{TrustBundle: env.TrustBundle}
workloadGen := &xds.WorkloadGenerator{Server: s}
generators[v3.AddressType] = workloadGen
generators[v3.WorkloadType] = workloadGen
generators[v3.WorkloadAuthorizationType] = &xds.WorkloadRBACGenerator{Server: s}
generators["grpc"] = &grpcgen.GrpcConfigGenerator{}
generators["grpc/"+v3.EndpointType] = edsGen
generators["grpc/"+v3.ListenerType] = generators["grpc"]
generators["grpc/"+v3.RouteType] = generators["grpc"]
generators["grpc/"+v3.ClusterType] = generators["grpc"]
generators["api"] = apigen.NewGenerator(env.ConfigStore)
generators["api/"+v3.EndpointType] = edsGen
generators["event"] = xds.NewStatusGen(s)
generators[v3.DebugType] = xds.NewDebugGen(s, systemNameSpace, internalDebugMux)
s.Generators = generators
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"path"
"strings"
"time"
"github.com/fsnotify/fsnotify"
"google.golang.org/grpc"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/api/security/v1beta1"
"istio.io/istio/pilot/pkg/features"
securityModel "istio.io/istio/pilot/pkg/security/model"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/jwt"
"istio.io/istio/pkg/kube/namespace"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/security/pkg/cmd"
"istio.io/istio/security/pkg/pki/ca"
"istio.io/istio/security/pkg/pki/ra"
caserver "istio.io/istio/security/pkg/server/ca"
"istio.io/istio/security/pkg/server/ca/authenticate"
"istio.io/istio/security/pkg/util"
)
type caOptions struct {
ExternalCAType ra.CaExternalType
ExternalCASigner string
// domain to use in SPIFFE identity URLs
TrustDomain string
Namespace string
Authenticators []security.Authenticator
CertSignerDomain string
DiscoveryFilter namespace.DiscoveryFilter
}
// Based on istio_ca main - removing creation of Secrets with private keys in all namespaces and install complexity.
//
// For backward compat, will preserve support for the "cacerts" Secret used for self-signed certificates.
// It is mounted in the same location, and if found will be used - creating the secret is sufficient, no need for
// extra options.
//
// In old installer, the LocalCertDir is hardcoded to /etc/cacerts and mounted from "cacerts" secret.
//
// Support for signing other root CA has been removed - too dangerous, no clear use case.
//
// Default config, for backward compat with Citadel:
// - if "cacerts" secret exists in istio-system, will be mounted. It may contain an optional "root-cert.pem",
// with additional roots and optional {ca-key, ca-cert, cert-chain}.pem user-provided root CA.
// - if user-provided root CA is not found, the Secret "istio-ca-secret" is used, with ca-cert.pem and ca-key.pem files.
// - if neither is found, istio-ca-secret will be created.
//
// - a config map "istio-security" with a "caTLSRootCert" file will be used for root cert, and created if needed.
// The config map was used by node agent - no longer possible to use in sds-agent, but we still save it for
// backward compat. Will be removed with the node-agent. sds-agent is calling NewCitadelClient directly, using
// K8S root.
var (
// LocalCertDir replaces the "cert-chain", "signing-cert" and "signing-key" flags in citadel - Istio installer is
// requires a secret named "cacerts" with specific files inside.
LocalCertDir = env.Register("ROOT_CA_DIR", "./etc/cacerts",
"Location of a local or mounted CA root")
useRemoteCerts = env.Register("USE_REMOTE_CERTS", false,
"Whether to try to load CA certs from config Kubernetes cluster. Used for external Istiod.")
workloadCertTTL = env.Register("DEFAULT_WORKLOAD_CERT_TTL",
cmd.DefaultWorkloadCertTTL,
"The default TTL of issued workload certificates. Applied when the client sets a "+
"non-positive TTL in the CSR.")
maxWorkloadCertTTL = env.Register("MAX_WORKLOAD_CERT_TTL",
cmd.DefaultMaxWorkloadCertTTL,
"The max TTL of issued workload certificates.")
SelfSignedCACertTTL = env.Register("CITADEL_SELF_SIGNED_CA_CERT_TTL",
cmd.DefaultSelfSignedCACertTTL,
"The TTL of self-signed CA root certificate.")
selfSignedRootCertCheckInterval = env.Register("CITADEL_SELF_SIGNED_ROOT_CERT_CHECK_INTERVAL",
cmd.DefaultSelfSignedRootCertCheckInterval,
"The interval that self-signed CA checks its root certificate "+
"expiration time and rotates root certificate. Setting this interval "+
"to zero or a negative value disables automated root cert check and "+
"rotation. This interval is suggested to be larger than 10 minutes.")
selfSignedRootCertGracePeriodPercentile = env.Register("CITADEL_SELF_SIGNED_ROOT_CERT_GRACE_PERIOD_PERCENTILE",
cmd.DefaultRootCertGracePeriodPercentile,
"Grace period percentile for self-signed root cert.")
enableJitterForRootCertRotator = env.Register("CITADEL_ENABLE_JITTER_FOR_ROOT_CERT_ROTATOR",
true,
"If true, set up a jitter to start root cert rotator. "+
"Jitter selects a backoff time in seconds to start root cert rotator, "+
"and the back off time is below root cert check interval.")
k8sInCluster = env.Register("KUBERNETES_SERVICE_HOST", "",
"Kubernetes service host, set automatically when running in-cluster")
// This value can also be extracted from the mounted token
trustedIssuer = env.Register("TOKEN_ISSUER", "",
"OIDC token issuer. If set, will be used to check the tokens.")
audience = env.Register("AUDIENCE", "",
"Expected audience in the tokens. ")
caRSAKeySize = env.Register("CITADEL_SELF_SIGNED_CA_RSA_KEY_SIZE", 2048,
"Specify the RSA key size to use for self-signed Istio CA certificates.")
// TODO: Likely to be removed and added to mesh config
externalCaType = env.Register("EXTERNAL_CA", "",
"External CA Integration Type. Permitted value is ISTIOD_RA_KUBERNETES_API.").Get()
// TODO: Likely to be removed and added to mesh config
k8sSigner = env.Register("K8S_SIGNER", "",
"Kubernetes CA Signer type. Valid from Kubernetes 1.18").Get()
)
// RunCA will start the cert signing GRPC service on an existing server.
// Protected by installer options: the CA will be started only if the JWT token in /var/run/secrets
// is mounted. If it is missing - for example old versions of K8S that don't support such tokens -
// we will not start the cert-signing server, since pods will have no way to authenticate.
func (s *Server) RunCA(grpc *grpc.Server, ca caserver.CertificateAuthority, opts *caOptions) {
iss := trustedIssuer.Get()
aud := audience.Get()
token, err := os.ReadFile(getJwtPath())
if err == nil {
tok, err := detectAuthEnv(string(token))
if err != nil {
log.Warnf("Starting with invalid K8S JWT token: %v", err)
} else {
if iss == "" {
iss = tok.Iss
}
if len(tok.Aud) > 0 && len(aud) == 0 {
aud = tok.Aud[0]
}
}
}
// The CA API uses cert with the max workload cert TTL.
// 'hostlist' must be non-empty - but is not used since a grpc server is passed.
// Adds client cert auth and kube (sds enabled)
caServer, startErr := caserver.New(ca, maxWorkloadCertTTL.Get(), opts.Authenticators, s.kubeClient, opts.DiscoveryFilter)
if startErr != nil {
log.Fatalf("failed to create istio ca server: %v", startErr)
}
// TODO: if not set, parse Istiod's own token (if present) and get the issuer. The same issuer is used
// for all tokens - no need to configure twice. The token may also include cluster info to auto-configure
// networking properties.
if iss != "" && // issuer set explicitly or extracted from our own JWT
k8sInCluster.Get() == "" { // not running in cluster - in cluster use direct call to apiserver
// Add a custom authenticator using standard JWT validation, if not running in K8S
// When running inside K8S - we can use the built-in validator, which also check pod removal (invalidation).
jwtRule := v1beta1.JWTRule{Issuer: iss, Audiences: []string{aud}}
oidcAuth, err := authenticate.NewJwtAuthenticator(&jwtRule)
if err == nil {
caServer.Authenticators = append(caServer.Authenticators, oidcAuth)
log.Info("Using out-of-cluster JWT authentication")
} else {
log.Info("K8S token doesn't support OIDC, using only in-cluster auth")
}
}
caServer.Register(grpc)
log.Info("Istiod CA has started")
}
// detectAuthEnv will use the JWT token that is mounted in istiod to set the default audience
// and trust domain for Istiod, if not explicitly defined.
// K8S will use the same kind of tokens for the pods, and the value in istiod's own token is
// simplest and safest way to have things match.
//
// Note that K8S is not required to use JWT tokens - we will fallback to the defaults
// or require explicit user option for K8S clusters using opaque tokens.
func detectAuthEnv(jwt string) (*authenticate.JwtPayload, error) {
jwtSplit := strings.Split(jwt, ".")
if len(jwtSplit) != 3 {
return nil, fmt.Errorf("invalid JWT parts: %s", jwt)
}
payload := jwtSplit[1]
payloadBytes, err := util.DecodeJwtPart(payload)
if err != nil {
return nil, fmt.Errorf("failed to decode jwt: %v", err.Error())
}
structuredPayload := &authenticate.JwtPayload{}
err = json.Unmarshal(payloadBytes, &structuredPayload)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal jwt: %v", err.Error())
}
return structuredPayload, nil
}
// detectSigningCABundle determines in which format the signing ca files are created.
// kubernetes tls secrets mount files as tls.crt,tls.key,ca.crt
// istiod secret is ca-cert.pem ca-key.pem cert-chain.pem root-cert.pem
func detectSigningCABundle() (ca.SigningCAFileBundle, error) {
tlsSigningFile := path.Join(LocalCertDir.Get(), ca.TLSSecretCACertFile)
// looking for tls file format (tls.crt)
if _, err := os.Stat(tlsSigningFile); err == nil {
log.Info("Using kubernetes.io/tls secret type for signing ca files")
return ca.SigningCAFileBundle{
RootCertFile: path.Join(LocalCertDir.Get(), ca.TLSSecretRootCertFile),
CertChainFiles: []string{
tlsSigningFile,
path.Join(LocalCertDir.Get(), ca.TLSSecretRootCertFile),
},
SigningCertFile: tlsSigningFile,
SigningKeyFile: path.Join(LocalCertDir.Get(), ca.TLSSecretCAPrivateKeyFile),
}, nil
} else if !os.IsNotExist(err) {
return ca.SigningCAFileBundle{}, err
}
log.Info("Using istiod file format for signing ca files")
// default ca file format
return ca.SigningCAFileBundle{
RootCertFile: path.Join(LocalCertDir.Get(), ca.RootCertFile),
CertChainFiles: []string{path.Join(LocalCertDir.Get(), ca.CertChainFile)},
SigningCertFile: path.Join(LocalCertDir.Get(), ca.CACertFile),
SigningKeyFile: path.Join(LocalCertDir.Get(), ca.CAPrivateKeyFile),
}, nil
}
// loadCACerts loads an existing `cacerts` Secret if the files aren't mounted locally.
// By default, a cacerts Secret would be mounted during pod startup due to the
// Istiod Deployment configuration. But with external Istiod, we want to be
// able to load cacerts from a remote cluster instead.
func (s *Server) loadCACerts(caOpts *caOptions, dir string) error {
if s.kubeClient == nil {
return nil
}
signingKeyFile := path.Join(dir, ca.CAPrivateKeyFile)
if _, err := os.Stat(signingKeyFile); err == nil {
return nil
} else if !os.IsNotExist(err) {
return fmt.Errorf("signing key file %s already exists", signingKeyFile)
}
secret, err := s.kubeClient.Kube().CoreV1().Secrets(caOpts.Namespace).Get(
context.TODO(), ca.CACertsSecret, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
log.Infof("cacerts Secret found in config cluster, saving contents to %s", dir)
if err := os.MkdirAll(dir, 0o700); err != nil {
return err
}
for key, data := range secret.Data {
filename := path.Join(dir, key)
if err := os.WriteFile(filename, data, 0o600); err != nil {
return err
}
}
return nil
}
// handleEvent handles the events on cacerts related files.
// If create/write(modified) event occurs, then it verifies that
// newly introduced cacerts are intermediate CA which is generated
// from cuurent root-cert.pem. Then it updates and keycertbundle
// and generates new dns certs.
func handleEvent(s *Server) {
log.Info("Update Istiod cacerts")
var newCABundle []byte
var err error
currentCABundle := s.CA.GetCAKeyCertBundle().GetRootCertPem()
fileBundle, err := detectSigningCABundle()
if err != nil {
log.Errorf("unable to determine signing file format %v", err)
return
}
newCABundle, err = os.ReadFile(fileBundle.RootCertFile)
if err != nil {
log.Errorf("failed reading root-cert.pem: %v", err)
return
}
// Only updating intermediate CA is supported now
if !bytes.Equal(currentCABundle, newCABundle) {
if !features.MultiRootMesh {
log.Warn("Multi root is disabled, updating new ROOT-CA not supported")
return
}
// in order to support root ca rotation, or we are removing the old ca,
// we need to make the new CA bundle contain both old and new CA certs
if bytes.Contains(currentCABundle, newCABundle) ||
bytes.Contains(newCABundle, currentCABundle) {
log.Info("Updating new ROOT-CA")
} else {
log.Warn("Updating new ROOT-CA not supported")
return
}
}
err = s.CA.GetCAKeyCertBundle().UpdateVerifiedKeyCertBundleFromFile(
fileBundle.SigningCertFile,
fileBundle.SigningKeyFile,
fileBundle.CertChainFiles,
fileBundle.RootCertFile)
if err != nil {
log.Errorf("Failed to update new Plug-in CA certs: %v", err)
return
}
err = s.updateRootCertAndGenKeyCert()
if err != nil {
log.Errorf("Failed generating plugged-in istiod key cert: %v", err)
return
}
log.Info("Istiod has detected the newly added intermediate CA and updated its key and certs accordingly")
}
// handleCACertsFileWatch handles the events on cacerts files
func (s *Server) handleCACertsFileWatch() {
var timerC <-chan time.Time
for {
select {
case <-timerC:
timerC = nil
handleEvent(s)
case event, ok := <-s.cacertsWatcher.Events:
if !ok {
log.Debug("plugin cacerts watch stopped")
return
}
if event.Has(fsnotify.Write) || event.Has(fsnotify.Create) {
if timerC == nil {
timerC = time.After(100 * time.Millisecond)
}
}
case err := <-s.cacertsWatcher.Errors:
if err != nil {
log.Errorf("failed to catch events on cacerts file: %v", err)
return
}
case <-s.internalStop:
return
}
}
}
func (s *Server) addCACertsFileWatcher(dir string) error {
err := s.cacertsWatcher.Add(dir)
if err != nil {
log.Infof("failed to add cacerts file watcher for %s: %v", dir, err)
return err
}
log.Infof("Added cacerts files watcher at %v", dir)
return nil
}
// initCACertsWatcher initializes the cacerts (/etc/cacerts) directory.
// In particular it monitors 'ca-key.pem', 'ca-cert.pem', 'root-cert.pem'
// and 'cert-chain.pem'.
func (s *Server) initCACertsWatcher() {
var err error
s.cacertsWatcher, err = fsnotify.NewWatcher()
if err != nil {
log.Warnf("failed to add CAcerts watcher: %v", err)
return
}
err = s.addCACertsFileWatcher(LocalCertDir.Get())
if err != nil {
log.Warnf("failed to add CAcerts file watcher: %v", err)
return
}
go s.handleCACertsFileWatch()
}
// createIstioCA initializes the Istio CA signing functionality.
// - for 'plugged in', uses ./etc/cacert directory, mounted from 'cacerts' secret in k8s.
//
// Inside, the key/cert are 'ca-key.pem' and 'ca-cert.pem'. The root cert signing the intermediate is root-cert.pem,
// which may contain multiple roots. A 'cert-chain.pem' file has the full cert chain.
func (s *Server) createIstioCA(opts *caOptions) (*ca.IstioCA, error) {
var caOpts *ca.IstioCAOptions
var detectedSigningCABundle bool
var istioGenerated bool
var err error
fileBundle, err := detectSigningCABundle()
if err != nil {
return nil, fmt.Errorf("unable to determine signing file format %v", err)
}
if _, err := os.Stat(fileBundle.SigningKeyFile); err == nil {
detectedSigningCABundle = true
if _, err := os.Stat(path.Join(LocalCertDir.Get(), ca.IstioGenerated)); err == nil {
istioGenerated = true
}
}
if !detectedSigningCABundle || (features.UseCacertsForSelfSignedCA && istioGenerated) {
if features.UseCacertsForSelfSignedCA && istioGenerated {
log.Infof("IstioGenerated %s secret found, use it as the CA certificate", ca.CACertsSecret)
// TODO(jaellio): Currently, when the USE_CACERTS_FOR_SELF_SIGNED_CA flag is true istiod
// handles loading and updating the "cacerts" secret with the "istio-generated" key the
// same way it handles the "istio-ca-secret" secret. Isitod utilizes a secret watch instead
// of file watch to check for secret updates. This may change in the future, and istiod
// will watch the file mount instead.
}
// Either the secret is not mounted because it is named `istio-ca-secret`,
// or it is `cacerts` secret mounted with "istio-generated" key set.
caOpts, err = s.createSelfSignedCACertificateOptions(&fileBundle, opts)
if err != nil {
return nil, err
}
caOpts.OnRootCertUpdate = s.updateRootCertAndGenKeyCert
} else {
// The secret is mounted and the "istio-generated" key is not used.
log.Info("Use local CA certificate")
caOpts, err = ca.NewPluggedCertIstioCAOptions(fileBundle, workloadCertTTL.Get(), maxWorkloadCertTTL.Get(), caRSAKeySize.Get())
if err != nil {
return nil, fmt.Errorf("failed to create an istiod CA: %v", err)
}
s.initCACertsWatcher()
}
istioCA, err := ca.NewIstioCA(caOpts)
if err != nil {
return nil, fmt.Errorf("failed to create an istiod CA: %v", err)
}
// Start root cert rotator in a separate goroutine.
istioCA.Run(s.internalStop)
return istioCA, nil
}
func (s *Server) createSelfSignedCACertificateOptions(fileBundle *ca.SigningCAFileBundle, opts *caOptions) (*ca.IstioCAOptions, error) {
var caOpts *ca.IstioCAOptions
var err error
if s.kubeClient != nil {
log.Info("Use self-signed certificate as the CA certificate")
// Abort after 20 minutes.
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*20)
defer cancel()
// rootCertFile will be added to "ca-cert.pem".
// readSigningCertOnly set to false - it doesn't seem to be used in Citadel, nor do we have a way
// to set it only for one job.
caOpts, err = ca.NewSelfSignedIstioCAOptions(ctx,
selfSignedRootCertGracePeriodPercentile.Get(), SelfSignedCACertTTL.Get(),
selfSignedRootCertCheckInterval.Get(), workloadCertTTL.Get(),
maxWorkloadCertTTL.Get(), opts.TrustDomain, features.UseCacertsForSelfSignedCA, true,
opts.Namespace, s.kubeClient.Kube().CoreV1(), fileBundle.RootCertFile,
enableJitterForRootCertRotator.Get(), caRSAKeySize.Get())
} else {
log.Warnf(
"Use local self-signed CA certificate for testing. Will use in-memory root CA, no K8S access and no ca key file %s",
fileBundle.SigningKeyFile)
caOpts, err = ca.NewSelfSignedDebugIstioCAOptions(fileBundle.RootCertFile, SelfSignedCACertTTL.Get(),
workloadCertTTL.Get(), maxWorkloadCertTTL.Get(), opts.TrustDomain, caRSAKeySize.Get())
}
if err != nil {
return nil, fmt.Errorf("failed to create a self-signed istiod CA: %v", err)
}
return caOpts, nil
}
// createIstioRA initializes the Istio RA signing functionality.
// the caOptions defines the external provider
// ca cert can come from three sources, order matters:
// 1. Define ca cert via kubernetes secret and mount the secret through `external-ca-cert` volume
// 2. Use kubernetes ca cert `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` if signer is
//
// kubernetes built-in `kubernetes.io/legacy-unknown" signer
//
// 3. Extract from the cert-chain signed by other CSR signer.
func (s *Server) createIstioRA(opts *caOptions) (ra.RegistrationAuthority, error) {
caCertFile := path.Join(ra.DefaultExtCACertDir, constants.CACertNamespaceConfigMapDataName)
certSignerDomain := opts.CertSignerDomain
_, err := os.Stat(caCertFile)
if err != nil {
if !os.IsNotExist(err) {
return nil, fmt.Errorf("failed to get file info: %v", err)
}
// File does not exist.
if certSignerDomain == "" {
log.Infof("CA cert file %q not found, using %q.", caCertFile, defaultCACertPath)
caCertFile = defaultCACertPath
} else {
log.Infof("CA cert file %q not found - ignoring.", caCertFile)
caCertFile = ""
}
}
if s.kubeClient == nil {
return nil, fmt.Errorf("kubeClient is nil")
}
raOpts := &ra.IstioRAOptions{
ExternalCAType: opts.ExternalCAType,
DefaultCertTTL: workloadCertTTL.Get(),
MaxCertTTL: maxWorkloadCertTTL.Get(),
CaSigner: opts.ExternalCASigner,
CaCertFile: caCertFile,
VerifyAppendCA: true,
K8sClient: s.kubeClient.Kube(),
TrustDomain: opts.TrustDomain,
CertSignerDomain: opts.CertSignerDomain,
}
raServer, err := ra.NewIstioRA(raOpts)
if err != nil {
return nil, err
}
raServer.SetCACertificatesFromMeshConfig(s.environment.Mesh().CaCertificates)
s.environment.AddMeshHandler(func() {
meshConfig := s.environment.Mesh()
caCertificates := meshConfig.CaCertificates
s.RA.SetCACertificatesFromMeshConfig(caCertificates)
})
return raServer, err
}
// getJwtPath returns jwt path.
func getJwtPath() string {
log.Infof("JWT policy is %v", features.JwtPolicy)
switch features.JwtPolicy {
case jwt.PolicyThirdParty:
return securityModel.K8sSATrustworthyJwtFileName
case jwt.PolicyFirstParty:
return securityModel.K8sSAJwtFileName
default:
log.Infof("unknown JWT policy %v, default to certificates ", features.JwtPolicy)
return ""
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"encoding/json"
"os"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/mesh/kubemesh"
"istio.io/istio/pkg/filewatcher"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/version"
)
const (
// defaultMeshConfigMapName is the default name of the ConfigMap with the mesh config
// The actual name can be different - use getMeshConfigMapName
defaultMeshConfigMapName = "istio"
// configMapKey should match the expected MeshConfig file name
configMapKey = "mesh"
)
// initMeshConfiguration creates the mesh in the pilotConfig from the input arguments.
// Original/default behavior:
// - use the mounted file, if it exists.
// - use istio-REVISION if k8s is enabled
// - fallback to default
//
// If the 'SHARED_MESH_CONFIG' env is set (experimental feature in 1.10):
// - if a file exist, load it - will be merged
// - if istio-REVISION exists, will be used, even if the file is present.
// - the SHARED_MESH_CONFIG config map will also be loaded and merged.
func (s *Server) initMeshConfiguration(args *PilotArgs, fileWatcher filewatcher.FileWatcher) {
log.Infof("initializing mesh configuration %v", args.MeshConfigFile)
defer func() {
if s.environment.Watcher != nil {
log.Infof("mesh configuration: %s", mesh.PrettyFormatOfMeshConfig(s.environment.Mesh()))
log.Infof("version: %s", version.Info.String())
argsdump, _ := json.MarshalIndent(args, "", " ")
log.Infof("flags: %s", argsdump)
}
}()
// Watcher will be merging more than one mesh config source?
multiWatch := features.SharedMeshConfig != ""
var err error
if _, err = os.Stat(args.MeshConfigFile); !os.IsNotExist(err) {
s.environment.Watcher, err = mesh.NewFileWatcher(fileWatcher, args.MeshConfigFile, multiWatch)
if err == nil {
if multiWatch && s.kubeClient != nil {
kubemesh.AddUserMeshConfig(
s.kubeClient, s.environment.Watcher, args.Namespace, configMapKey, features.SharedMeshConfig, s.internalStop)
} else {
// Normal install no longer uses this mode - testing and special installs still use this.
log.Warnf("Using local mesh config file %s, in cluster configs ignored", args.MeshConfigFile)
}
return
}
}
// Config file either didn't exist or failed to load.
if s.kubeClient == nil {
// Use a default mesh.
meshConfig := mesh.DefaultMeshConfig()
s.environment.Watcher = mesh.NewFixedWatcher(meshConfig)
log.Warnf("Using default mesh - missing file %s and no k8s client", args.MeshConfigFile)
return
}
// Watch the istio ConfigMap for mesh config changes.
// This may be necessary for external Istiod.
configMapName := getMeshConfigMapName(args.Revision)
multiWatcher := kubemesh.NewConfigMapWatcher(
s.kubeClient, args.Namespace, configMapName, configMapKey, multiWatch, s.internalStop)
s.environment.Watcher = multiWatcher
s.environment.NetworksWatcher = multiWatcher
log.Infof("initializing mesh networks from mesh config watcher")
if multiWatch {
kubemesh.AddUserMeshConfig(s.kubeClient, s.environment.Watcher, args.Namespace, configMapKey, features.SharedMeshConfig, s.internalStop)
}
}
// initMeshNetworks loads the mesh networks configuration from the file provided
// in the args and add a watcher for changes in this file.
func (s *Server) initMeshNetworks(args *PilotArgs, fileWatcher filewatcher.FileWatcher) {
if s.environment.NetworksWatcher != nil {
return
}
log.Info("initializing mesh networks")
if args.NetworksConfigFile != "" {
var err error
s.environment.NetworksWatcher, err = mesh.NewNetworksWatcher(fileWatcher, args.NetworksConfigFile)
if err != nil {
log.Info(err)
}
}
if s.environment.NetworksWatcher == nil {
log.Info("mesh networks configuration not provided")
s.environment.NetworksWatcher = mesh.NewFixedNetworksWatcher(nil)
}
}
func getMeshConfigMapName(revision string) string {
name := defaultMeshConfigMapName
if revision == "" || revision == "default" {
return name
}
return name + "-" + revision
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"fmt"
"net"
"net/http"
"time"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/monitoring"
"istio.io/istio/pkg/version"
)
type monitor struct {
monitoringServer *http.Server
}
const (
metricsPath = "/metrics"
versionPath = "/version"
)
var (
serverStart = time.Now()
_ = monitoring.NewDerivedGauge(
"istiod_uptime_seconds",
"Current istiod server uptime in seconds",
).ValueFrom(func() float64 {
return time.Since(serverStart).Seconds()
})
versionTag = monitoring.CreateLabel("version")
pilotVersion = monitoring.NewGauge(
"pilot_info",
"Pilot version and build information.",
)
)
func init() {
pilotVersion.With(versionTag.Value(version.Info.String())).Record(1)
}
func addMonitor(mux *http.ServeMux) error {
exporter, err := monitoring.RegisterPrometheusExporter(nil, nil)
if err != nil {
return fmt.Errorf("could not set up prometheus exporter: %v", err)
}
mux.Handle(metricsPath, exporter)
mux.HandleFunc(versionPath, func(out http.ResponseWriter, req *http.Request) {
if _, err := out.Write([]byte(version.Info.String())); err != nil {
log.Errorf("Unable to write version string: %v", err)
}
})
return nil
}
// Deprecated: we shouldn't have 2 http ports. Will be removed after code using
// this port is removed.
func startMonitor(addr string, mux *http.ServeMux) (*monitor, error) {
m := &monitor{}
// get the network stuff setup
var listener net.Listener
if addr != "" {
var err error
if listener, err = net.Listen("tcp", addr); err != nil {
return nil, fmt.Errorf("unable to listen on socket: %v", err)
}
}
// NOTE: this is a temporary solution to provide bare-bones debug functionality
// for pilot. a full design / implementation of self-monitoring and reporting
// is coming. that design will include proper coverage of statusz/healthz type
// functionality, in addition to how pilot reports its own metrics.
if err := addMonitor(mux); err != nil {
return nil, fmt.Errorf("could not establish self-monitoring: %v", err)
}
if addr != "" {
m.monitoringServer = &http.Server{
Addr: listener.Addr().String(),
Handler: mux,
IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout
ReadTimeout: 30 * time.Second,
}
}
version.Info.RecordComponentBuildTag("pilot")
if addr != "" {
go func() {
_ = m.monitoringServer.Serve(listener)
}()
}
return m, nil
}
func (m *monitor) Close() error {
if m.monitoringServer != nil {
return m.monitoringServer.Close()
}
return nil
}
// initMonitor initializes the configuration for the pilot monitoring server.
func (s *Server) initMonitor(addr string) error { // nolint: unparam
s.addStartFunc("monitoring", func(stop <-chan struct{}) error {
monitor, err := startMonitor(addr, s.monitoringMux)
if err != nil {
return err
}
go func() {
<-stop
err := monitor.Close()
log.Debugf("Monitoring server terminated: %v", err)
}()
return nil
})
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"crypto/tls"
"fmt"
"time"
"istio.io/istio/pilot/pkg/features"
kubecontroller "istio.io/istio/pilot/pkg/serviceregistry/kube/controller"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/ctrlz"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/keepalive"
)
// RegistryOptions provide configuration options for the configuration controller. If FileDir is set, that directory will
// be monitored for CRD yaml files and will update the controller as those files change (This is used for testing
// purposes). Otherwise, a CRD client is created based on the configuration.
type RegistryOptions struct {
// If FileDir is set, the below kubernetes options are ignored
FileDir string
Registries []string
// Kubernetes controller options
KubeOptions kubecontroller.Options
// ClusterRegistriesNamespace specifies where the multi-cluster secret resides
ClusterRegistriesNamespace string
KubeConfig string
// DistributionTracking control
DistributionCacheRetention time.Duration
// DistributionTracking control
DistributionTrackingEnabled bool
}
// PilotArgs provides all of the configuration parameters for the Pilot discovery service.
type PilotArgs struct {
ServerOptions DiscoveryServerOptions
InjectionOptions InjectionOptions
PodName string
Namespace string
Revision string
MeshConfigFile string
NetworksConfigFile string
RegistryOptions RegistryOptions
CtrlZOptions *ctrlz.Options
KeepaliveOptions *keepalive.Options
ShutdownDuration time.Duration
JwtRule string
}
// DiscoveryServerOptions contains options for create a new discovery server instance.
type DiscoveryServerOptions struct {
// The listening address for HTTP (debug). If the port in the address is empty or "0" (as in "127.0.0.1:" or "[::1]:0")
// a port number is automatically chosen.
HTTPAddr string
// The listening address for HTTPS (webhooks). If the port in the address is empty or "0" (as in "127.0.0.1:" or "[::1]:0")
// a port number is automatically chosen.
// If the address is empty, the secure port is disabled, and the
// webhooks are registered on the HTTP port - a gateway in front will
// terminate TLS instead.
HTTPSAddr string
// The listening address for gRPC. If the port in the address is empty or "0" (as in "127.0.0.1:" or "[::1]:0")
// a port number is automatically chosen.
GRPCAddr string
// The listening address for the monitoring port. If the port in the address is empty or "0" (as in "127.0.0.1:" or "[::1]:0")
// a port number is automatically chosen.
MonitoringAddr string
EnableProfiling bool
// Optional TLS configuration
TLSOptions TLSOptions
// The listening address for secured gRPC. If the port in the address is empty or "0" (as in "127.0.0.1:" or "[::1]:0")
// a port number is automatically chosen.
SecureGRPCAddr string
}
type InjectionOptions struct {
// Directory of injection related config files.
InjectionDirectory string
}
// TLSOptions is optional TLS parameters for Istiod server.
type TLSOptions struct {
CaCertFile string
CertFile string
KeyFile string
TLSCipherSuites []string
CipherSuits []uint16 // This is the parsed cipher suites
}
var (
PodNamespace = env.Register("POD_NAMESPACE", constants.IstioSystemNamespace, "").Get()
PodName = env.Register("POD_NAME", "", "").Get()
JwtRule = env.Register("JWT_RULE", "",
"The JWT rule used by istiod authentication").Get()
)
// Revision is the value of the Istio control plane revision, e.g. "canary",
// and is the value used by the "istio.io/rev" label.
var Revision = env.Register("REVISION", "", "").Get()
// NewPilotArgs constructs pilotArgs with default values.
func NewPilotArgs(initFuncs ...func(*PilotArgs)) *PilotArgs {
p := &PilotArgs{}
// Apply Default Values.
p.applyDefaults()
// Apply custom initialization functions.
for _, fn := range initFuncs {
fn(p)
}
return p
}
// Apply default value to PilotArgs
func (p *PilotArgs) applyDefaults() {
p.Namespace = PodNamespace
p.PodName = PodName
p.Revision = Revision
p.JwtRule = JwtRule
p.KeepaliveOptions = keepalive.DefaultOption()
p.RegistryOptions.DistributionTrackingEnabled = features.EnableDistributionTracking
p.RegistryOptions.DistributionCacheRetention = features.DistributionHistoryRetention
p.RegistryOptions.ClusterRegistriesNamespace = p.Namespace
}
func (p *PilotArgs) Complete() error {
cipherSuits, err := TLSCipherSuites(p.ServerOptions.TLSOptions.TLSCipherSuites)
if err != nil {
return err
}
p.ServerOptions.TLSOptions.CipherSuits = cipherSuits
return nil
}
func allCiphers() map[string]uint16 {
acceptedCiphers := make(map[string]uint16, len(tls.CipherSuites())+len(tls.InsecureCipherSuites()))
for _, cipher := range tls.InsecureCipherSuites() {
acceptedCiphers[cipher.Name] = cipher.ID
}
for _, cipher := range tls.CipherSuites() {
acceptedCiphers[cipher.Name] = cipher.ID
}
return acceptedCiphers
}
// TLSCipherSuites returns a list of cipher suite IDs from the cipher suite names passed.
func TLSCipherSuites(cipherNames []string) ([]uint16, error) {
if len(cipherNames) == 0 {
return nil, nil
}
ciphersIntSlice := make([]uint16, 0)
possibleCiphers := allCiphers()
for _, cipher := range cipherNames {
intValue, ok := possibleCiphers[cipher]
if !ok {
return nil, fmt.Errorf("cipher suite %s not supported or doesn't exist", cipher)
}
ciphersIntSlice = append(ciphersIntSlice, intValue)
}
return ciphersIntSlice, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"net"
"net/http"
"os"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/fsnotify/fsnotify"
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
"golang.org/x/net/http2"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/reflection"
"k8s.io/client-go/rest"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/api/security/v1beta1"
kubecredentials "istio.io/istio/pilot/pkg/credentials/kube"
"istio.io/istio/pilot/pkg/features"
istiogrpc "istio.io/istio/pilot/pkg/grpc"
"istio.io/istio/pilot/pkg/keycertbundle"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3"
"istio.io/istio/pilot/pkg/server"
"istio.io/istio/pilot/pkg/serviceregistry/aggregate"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/serviceregistry/serviceentry"
"istio.io/istio/pilot/pkg/status"
"istio.io/istio/pilot/pkg/status/distribution"
tb "istio.io/istio/pilot/pkg/trustbundle"
"istio.io/istio/pilot/pkg/xds"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/ctrlz"
"istio.io/istio/pkg/filewatcher"
"istio.io/istio/pkg/h2c"
istiokeepalive "istio.io/istio/pkg/keepalive"
kubelib "istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/inject"
"istio.io/istio/pkg/kube/multicluster"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/spiffe"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/security/pkg/pki/ca"
"istio.io/istio/security/pkg/pki/ra"
"istio.io/istio/security/pkg/server/ca/authenticate"
"istio.io/istio/security/pkg/server/ca/authenticate/kubeauth"
)
const (
// debounce file watcher events to minimize noise in logs
watchDebounceDelay = 100 * time.Millisecond
)
func init() {
// Disable gRPC tracing. It has performance impacts (See https://github.com/grpc/grpc-go/issues/695)
grpc.EnableTracing = false
}
// readinessProbe defines a function that will be used indicate whether a server is ready.
type readinessProbe func() bool
// Server contains the runtime configuration for the Pilot discovery service.
type Server struct {
XDSServer *xds.DiscoveryServer
clusterID cluster.ID
environment *model.Environment
kubeClient kubelib.Client
multiclusterController *multicluster.Controller
configController model.ConfigStoreController
ConfigStores []model.ConfigStoreController
serviceEntryController *serviceentry.Controller
httpServer *http.Server // debug, monitoring and readiness Server.
httpAddr string
httpsServer *http.Server // webhooks HTTPS Server.
grpcServer *grpc.Server
grpcAddress string
secureGrpcServer *grpc.Server
secureGrpcAddress string
// monitoringMux listens on monitoringAddr(:15014).
// Currently runs prometheus monitoring and debug (if enabled).
monitoringMux *http.ServeMux
// internalDebugMux is a mux for *internal* calls to the debug interface. That is, authentication is disabled.
internalDebugMux *http.ServeMux
// httpMux listens on the httpAddr (8080).
// If a Gateway is used in front and https is off it is also multiplexing
// the rest of the features if their port is empty.
// Currently runs readiness and debug (if enabled)
httpMux *http.ServeMux
// httpsMux listens on the httpsAddr(15017), handling webhooks
// If the address os empty, the webhooks will be set on the default httpPort.
httpsMux *http.ServeMux // webhooks
// fileWatcher used to watch mesh config, networks and certificates.
fileWatcher filewatcher.FileWatcher
// certWatcher watches the certificates for changes and triggers a notification to Istiod.
cacertsWatcher *fsnotify.Watcher
dnsNames []string
CA *ca.IstioCA
RA ra.RegistrationAuthority
// TrustAnchors for workload to workload mTLS
workloadTrustBundle *tb.TrustBundle
certMu sync.RWMutex
istiodCert *tls.Certificate
istiodCertBundleWatcher *keycertbundle.Watcher
server server.Instance
readinessProbes map[string]readinessProbe
readinessFlags *readinessFlags
// duration used for graceful shutdown.
shutdownDuration time.Duration
// internalStop is closed when the server is shutdown. This should be avoided as much as possible, in
// favor of AddStartFunc. This is only required if we *must* start something outside of this process.
// For example, everything depends on mesh config, so we use it there rather than trying to sequence everything
// in AddStartFunc
internalStop chan struct{}
webhookInfo *webhookInfo
statusReporter *distribution.Reporter
statusManager *status.Manager
// RWConfigStore is the configstore which allows updates, particularly for status.
RWConfigStore model.ConfigStoreController
}
type readinessFlags struct {
sidecarInjectorReady atomic.Bool
configValidationReady atomic.Bool
}
type webhookInfo struct {
mu sync.RWMutex
wh *inject.Webhook
}
func (w *webhookInfo) GetTemplates() map[string]string {
w.mu.RLock()
defer w.mu.RUnlock()
if w.wh != nil {
return w.wh.Config.RawTemplates
}
return map[string]string{}
}
func (w *webhookInfo) getWebhookConfig() inject.WebhookConfig {
w.mu.RLock()
defer w.mu.RUnlock()
if w.wh != nil {
return w.wh.GetConfig()
}
return inject.WebhookConfig{}
}
func (w *webhookInfo) addHandler(fn func()) {
w.mu.Lock()
defer w.mu.Unlock()
if w.wh != nil {
w.wh.MultiCast.AddHandler(func(c *inject.Config, s string) error {
fn()
return nil
})
}
}
// NewServer creates a new Server instance based on the provided arguments.
func NewServer(args *PilotArgs, initFuncs ...func(*Server)) (*Server, error) {
e := model.NewEnvironment()
e.DomainSuffix = args.RegistryOptions.KubeOptions.DomainSuffix
e.SetLedger(buildLedger(args.RegistryOptions))
ac := aggregate.NewController(aggregate.Options{
MeshHolder: e,
})
e.ServiceDiscovery = ac
s := &Server{
clusterID: getClusterID(args),
environment: e,
fileWatcher: filewatcher.NewWatcher(),
httpMux: http.NewServeMux(),
monitoringMux: http.NewServeMux(),
readinessProbes: make(map[string]readinessProbe),
readinessFlags: &readinessFlags{},
workloadTrustBundle: tb.NewTrustBundle(nil),
server: server.New(),
shutdownDuration: args.ShutdownDuration,
internalStop: make(chan struct{}),
istiodCertBundleWatcher: keycertbundle.NewWatcher(),
webhookInfo: &webhookInfo{},
}
// Apply custom initialization functions.
for _, fn := range initFuncs {
fn(s)
}
// Initialize workload Trust Bundle before XDS Server
e.TrustBundle = s.workloadTrustBundle
s.XDSServer = xds.NewDiscoveryServer(e, args.RegistryOptions.KubeOptions.ClusterAliases)
configGen := v1alpha3.NewConfigGenerator(s.XDSServer.Cache)
grpcprom.EnableHandlingTimeHistogram()
// make sure we have a readiness probe before serving HTTP to avoid marking ready too soon
s.initReadinessProbes()
s.initServers(args)
if err := s.initIstiodAdminServer(args, s.webhookInfo.GetTemplates); err != nil {
return nil, fmt.Errorf("error initializing debug server: %v", err)
}
if err := s.serveHTTP(); err != nil {
return nil, fmt.Errorf("error serving http: %v", err)
}
// Apply the arguments to the configuration.
if err := s.initKubeClient(args); err != nil {
return nil, fmt.Errorf("error initializing kube client: %v", err)
}
s.initMeshConfiguration(args, s.fileWatcher)
spiffe.SetTrustDomain(s.environment.Mesh().GetTrustDomain())
s.initMeshNetworks(args, s.fileWatcher)
s.initMeshHandlers(configGen.MeshConfigChanged)
s.environment.Init()
if err := s.environment.InitNetworksManager(s.XDSServer); err != nil {
return nil, err
}
// Options based on the current 'defaults' in istio.
caOpts := &caOptions{
TrustDomain: s.environment.Mesh().TrustDomain,
Namespace: args.Namespace,
DiscoveryFilter: args.RegistryOptions.KubeOptions.GetFilter(),
ExternalCAType: ra.CaExternalType(externalCaType),
CertSignerDomain: features.CertSignerDomain,
}
if caOpts.ExternalCAType == ra.ExtCAK8s {
// Older environment variable preserved for backward compatibility
caOpts.ExternalCASigner = k8sSigner
}
// CA signing certificate must be created first if needed.
if err := s.maybeCreateCA(caOpts); err != nil {
return nil, err
}
if err := s.initControllers(args); err != nil {
return nil, err
}
InitGenerators(s.XDSServer, configGen, args.Namespace, s.clusterID, s.internalDebugMux)
// Initialize workloadTrustBundle after CA has been initialized
if err := s.initWorkloadTrustBundle(args); err != nil {
return nil, err
}
// Parse and validate Istiod Address.
istiodHost, _, err := e.GetDiscoveryAddress()
if err != nil {
return nil, err
}
// Create Istiod certs and setup watches.
if err := s.initIstiodCerts(args, string(istiodHost)); err != nil {
return nil, err
}
// Secure gRPC Server must be initialized after CA is created as may use a Citadel generated cert.
if err := s.initSecureDiscoveryService(args); err != nil {
return nil, fmt.Errorf("error initializing secure gRPC Listener: %v", err)
}
// common https server for webhooks (e.g. injection, validation)
if s.kubeClient != nil {
s.initSecureWebhookServer(args)
wh, err := s.initSidecarInjector(args)
if err != nil {
return nil, fmt.Errorf("error initializing sidecar injector: %v", err)
}
s.readinessFlags.sidecarInjectorReady.Store(true)
s.webhookInfo.mu.Lock()
s.webhookInfo.wh = wh
s.webhookInfo.mu.Unlock()
if err := s.initConfigValidation(args); err != nil {
return nil, fmt.Errorf("error initializing config validator: %v", err)
}
}
// This should be called only after controllers are initialized.
s.initRegistryEventHandlers()
s.initDiscoveryService()
// Notice that the order of authenticators matters, since at runtime
// authenticators are activated sequentially and the first successful attempt
// is used as the authentication result.
authenticators := []security.Authenticator{
&authenticate.ClientCertAuthenticator{},
}
if args.JwtRule != "" {
jwtAuthn, err := initOIDC(args)
if err != nil {
return nil, fmt.Errorf("error initializing OIDC: %v", err)
}
if jwtAuthn == nil {
return nil, fmt.Errorf("JWT authenticator is nil")
}
authenticators = append(authenticators, jwtAuthn)
}
// The k8s JWT authenticator requires the multicluster registry to be initialized,
// so we build it later.
if s.kubeClient != nil {
authenticators = append(authenticators,
kubeauth.NewKubeJWTAuthenticator(s.environment.Watcher, s.kubeClient.Kube(), s.clusterID, s.multiclusterController.GetRemoteKubeClient, features.JwtPolicy))
}
if len(features.TrustedGatewayCIDR) > 0 {
authenticators = append(authenticators, &authenticate.XfccAuthenticator{})
}
if features.XDSAuth {
s.XDSServer.Authenticators = authenticators
}
caOpts.Authenticators = authenticators
// Start CA or RA server. This should be called after CA and Istiod certs have been created.
s.startCA(caOpts)
// TODO: don't run this if galley is started, one ctlz is enough
if args.CtrlZOptions != nil {
_, _ = ctrlz.Run(args.CtrlZOptions, nil)
}
// This must be last, otherwise we will not know which informers to register
if s.kubeClient != nil {
s.addStartFunc("kube client", func(stop <-chan struct{}) error {
s.kubeClient.RunAndWait(stop)
return nil
})
}
return s, nil
}
func initOIDC(args *PilotArgs) (security.Authenticator, error) {
// JWTRule is from the JWT_RULE environment variable.
// An example of json string for JWTRule is:
// `{"issuer": "foo", "jwks_uri": "baz", "audiences": ["aud1", "aud2"]}`.
jwtRule := &v1beta1.JWTRule{}
err := json.Unmarshal([]byte(args.JwtRule), jwtRule)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal JWT rule: %v", err)
}
log.Infof("Istiod authenticating using JWTRule: %v", jwtRule)
jwtAuthn, err := authenticate.NewJwtAuthenticator(jwtRule)
if err != nil {
return nil, fmt.Errorf("failed to create the JWT authenticator: %v", err)
}
return jwtAuthn, nil
}
func getClusterID(args *PilotArgs) cluster.ID {
clusterID := args.RegistryOptions.KubeOptions.ClusterID
if clusterID == "" {
if hasKubeRegistry(args.RegistryOptions.Registries) {
clusterID = cluster.ID(provider.Kubernetes)
}
}
return clusterID
}
// Start starts all components of the error serving tap http serverPilot discovery service on the port specified in DiscoveryServerOptions.
// If Port == 0, a port number is automatically chosen. Content serving is started by this method,
// but is executed asynchronously. Serving can be canceled at any time by closing the provided stop channel.
func (s *Server) Start(stop <-chan struct{}) error {
log.Infof("Starting Istiod Server with primary cluster %s", s.clusterID)
if features.UnsafeFeaturesEnabled() {
log.Warn("Server is starting with unsafe features enabled")
}
// Now start all of the components.
if err := s.server.Start(stop); err != nil {
return err
}
if !s.waitForCacheSync(stop) {
return fmt.Errorf("failed to sync cache")
}
// Inform Discovery Server so that it can start accepting connections.
s.XDSServer.CachesSynced()
// Race condition - if waitForCache is too fast and we run this as a startup function,
// the grpc server would be started before CA is registered. Listening should be last.
if s.secureGrpcAddress != "" {
grpcListener, err := net.Listen("tcp", s.secureGrpcAddress)
if err != nil {
return err
}
go func() {
log.Infof("starting secure gRPC discovery service at %s", grpcListener.Addr())
if err := s.secureGrpcServer.Serve(grpcListener); err != nil {
log.Errorf("error serving secure GRPC server: %v", err)
}
}()
}
if s.grpcAddress != "" {
grpcListener, err := net.Listen("tcp", s.grpcAddress)
if err != nil {
return err
}
go func() {
log.Infof("starting gRPC discovery service at %s", grpcListener.Addr())
if err := s.grpcServer.Serve(grpcListener); err != nil {
log.Errorf("error serving GRPC server: %v", err)
}
}()
}
if s.httpsServer != nil {
httpsListener, err := net.Listen("tcp", s.httpsServer.Addr)
if err != nil {
return err
}
go func() {
log.Infof("starting webhook service at %s", httpsListener.Addr())
if err := s.httpsServer.ServeTLS(httpsListener, "", ""); network.IsUnexpectedListenerError(err) {
log.Errorf("error serving https server: %v", err)
}
}()
}
s.waitForShutdown(stop)
return nil
}
// WaitUntilCompletion waits for everything marked as a "required termination" to complete.
// This should be called before exiting.
func (s *Server) WaitUntilCompletion() {
s.server.Wait()
}
// initSDSServer starts the SDS server
func (s *Server) initSDSServer() {
if s.kubeClient == nil {
return
}
if !features.EnableXDSIdentityCheck {
// Make sure we have security
log.Warnf("skipping Kubernetes credential reader; PILOT_ENABLE_XDS_IDENTITY_CHECK must be set to true for this feature.")
} else {
creds := kubecredentials.NewMulticluster(s.clusterID)
creds.AddSecretHandler(func(name string, namespace string) {
s.XDSServer.ConfigUpdate(&model.PushRequest{
Full: false,
ConfigsUpdated: sets.New(model.ConfigKey{Kind: kind.Secret, Name: name, Namespace: namespace}),
Reason: model.NewReasonStats(model.SecretTrigger),
})
})
s.multiclusterController.AddHandler(creds)
s.environment.CredentialsController = creds
}
}
// initKubeClient creates the k8s client if running in a k8s environment.
// This is determined by the presence of a kube registry, which
// uses in-context k8s, or a config source of type k8s.
func (s *Server) initKubeClient(args *PilotArgs) error {
if s.kubeClient != nil {
// Already initialized by startup arguments
return nil
}
hasK8SConfigStore := false
if args.RegistryOptions.FileDir == "" {
// If file dir is set - config controller will just use file.
if _, err := os.Stat(args.MeshConfigFile); !os.IsNotExist(err) {
meshConfig, err := mesh.ReadMeshConfig(args.MeshConfigFile)
if err != nil {
return fmt.Errorf("failed reading mesh config: %v", err)
}
if len(meshConfig.ConfigSources) == 0 && args.RegistryOptions.KubeConfig != "" {
hasK8SConfigStore = true
}
for _, cs := range meshConfig.ConfigSources {
if cs.Address == string(Kubernetes)+"://" {
hasK8SConfigStore = true
break
}
}
} else if args.RegistryOptions.KubeConfig != "" {
hasK8SConfigStore = true
}
}
if hasK8SConfigStore || hasKubeRegistry(args.RegistryOptions.Registries) {
// Used by validation
kubeRestConfig, err := kubelib.DefaultRestConfig(args.RegistryOptions.KubeConfig, "", func(config *rest.Config) {
config.QPS = args.RegistryOptions.KubeOptions.KubernetesAPIQPS
config.Burst = args.RegistryOptions.KubeOptions.KubernetesAPIBurst
})
if err != nil {
return fmt.Errorf("failed creating kube config: %v", err)
}
s.kubeClient, err = kubelib.NewClient(kubelib.NewClientConfigForRestConfig(kubeRestConfig), s.clusterID)
if err != nil {
return fmt.Errorf("failed creating kube client: %v", err)
}
s.kubeClient = kubelib.EnableCrdWatcher(s.kubeClient)
}
return nil
}
// A single container can't have two readiness probes. Make this readiness probe a generic one
// that can handle all istiod related readiness checks including webhook, gRPC etc.
// The "http" portion of the readiness check is satisfied by the fact we've started listening on
// this handler and everything has already initialized.
func (s *Server) istiodReadyHandler(w http.ResponseWriter, _ *http.Request) {
for name, fn := range s.readinessProbes {
if ready := fn(); !ready {
log.Warnf("%s is not ready", name)
w.WriteHeader(http.StatusServiceUnavailable)
return
}
}
w.WriteHeader(http.StatusOK)
}
// initServers initializes http and grpc servers
func (s *Server) initServers(args *PilotArgs) {
s.initGrpcServer(args.KeepaliveOptions)
multiplexGRPC := false
if args.ServerOptions.GRPCAddr != "" {
s.grpcAddress = args.ServerOptions.GRPCAddr
} else {
// This happens only if the GRPC port (15010) is disabled. We will multiplex
// it on the HTTP port. Does not impact the HTTPS gRPC or HTTPS.
log.Infof("multiplexing gRPC on http addr %v", args.ServerOptions.HTTPAddr)
multiplexGRPC = true
}
h2s := &http2.Server{
MaxConcurrentStreams: uint32(features.MaxConcurrentStreams),
}
multiplexHandler := h2c.NewHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// If we detect gRPC, serve using grpcServer
if r.ProtoMajor == 2 && strings.HasPrefix(r.Header.Get("content-type"), "application/grpc") {
s.grpcServer.ServeHTTP(w, r)
return
}
// Otherwise, this is meant for the standard HTTP server
s.httpMux.ServeHTTP(w, r)
}), h2s)
s.httpServer = &http.Server{
Addr: args.ServerOptions.HTTPAddr,
Handler: s.httpMux,
IdleTimeout: 90 * time.Second, // matches http.DefaultTransport keep-alive timeout
ReadTimeout: 30 * time.Second,
}
if multiplexGRPC {
// To allow the gRPC handler to make per-request decision,
// use ReadHeaderTimeout instead of ReadTimeout.
s.httpServer.ReadTimeout = 0
s.httpServer.ReadHeaderTimeout = 30 * time.Second
s.httpServer.Handler = multiplexHandler
}
if args.ServerOptions.MonitoringAddr == "" {
s.monitoringMux = s.httpMux
log.Infof("initializing Istiod admin server multiplexed on httpAddr %v", s.httpServer.Addr)
} else {
log.Info("initializing Istiod admin server")
}
}
// initIstiodAdminServer initializes monitoring, debug and readiness end points.
func (s *Server) initIstiodAdminServer(args *PilotArgs, whc func() map[string]string) error {
// Debug Server.
internalMux := s.XDSServer.InitDebug(s.monitoringMux, args.ServerOptions.EnableProfiling, whc)
s.internalDebugMux = internalMux
// Debug handlers are currently added on monitoring mux and readiness mux.
// If monitoring addr is empty, the mux is shared and we only add it once on the shared mux .
if args.ServerOptions.MonitoringAddr != "" {
s.XDSServer.AddDebugHandlers(s.httpMux, nil, args.ServerOptions.EnableProfiling, whc)
}
// Monitoring Server.
if err := s.initMonitor(args.ServerOptions.MonitoringAddr); err != nil {
return fmt.Errorf("error initializing monitor: %v", err)
}
// Readiness Handler.
s.httpMux.HandleFunc("/ready", s.istiodReadyHandler)
return nil
}
// initDiscoveryService initializes discovery server on plain text port.
func (s *Server) initDiscoveryService() {
log.Infof("starting discovery service")
// Implement EnvoyXdsServer grace shutdown
s.addStartFunc("xds server", func(stop <-chan struct{}) error {
log.Infof("Starting ADS server")
s.XDSServer.Start(stop)
return nil
})
}
// Wait for the stop, and do cleanups
func (s *Server) waitForShutdown(stop <-chan struct{}) {
go func() {
<-stop
close(s.internalStop)
_ = s.fileWatcher.Close()
if s.cacertsWatcher != nil {
_ = s.cacertsWatcher.Close()
}
// Stop gRPC services. If gRPC services fail to stop in the shutdown duration,
// force stop them. This does not happen normally.
stopped := make(chan struct{})
go func() {
// Some grpcServer implementations do not support GracefulStop. Unfortunately, this is not
// exposed; they just panic. To avoid this, we will recover and do a standard Stop when its not
// support.
defer func() {
if r := recover(); r != nil {
s.grpcServer.Stop()
if s.secureGrpcServer != nil {
s.secureGrpcServer.Stop()
}
close(stopped)
}
}()
s.grpcServer.GracefulStop()
if s.secureGrpcServer != nil {
s.secureGrpcServer.GracefulStop()
}
close(stopped)
}()
t := time.NewTimer(s.shutdownDuration)
select {
case <-t.C:
s.grpcServer.Stop()
if s.secureGrpcServer != nil {
s.secureGrpcServer.Stop()
}
case <-stopped:
t.Stop()
}
// Stop HTTP services.
ctx, cancel := context.WithTimeout(context.Background(), s.shutdownDuration)
defer cancel()
if err := s.httpServer.Shutdown(ctx); err != nil {
log.Warn(err)
}
if s.httpsServer != nil {
if err := s.httpsServer.Shutdown(ctx); err != nil {
log.Warn(err)
}
}
// Shutdown the DiscoveryServer.
s.XDSServer.Shutdown()
}()
}
func (s *Server) initGrpcServer(options *istiokeepalive.Options) {
interceptors := []grpc.UnaryServerInterceptor{
// setup server prometheus monitoring (as final interceptor in chain)
grpcprom.UnaryServerInterceptor,
}
grpcOptions := istiogrpc.ServerOptions(options, interceptors...)
s.grpcServer = grpc.NewServer(grpcOptions...)
s.XDSServer.Register(s.grpcServer)
reflection.Register(s.grpcServer)
}
// initialize secureGRPCServer.
func (s *Server) initSecureDiscoveryService(args *PilotArgs) error {
if args.ServerOptions.SecureGRPCAddr == "" {
log.Info("The secure discovery port is disabled, multiplexing on httpAddr ")
return nil
}
peerCertVerifier, err := s.createPeerCertVerifier(args.ServerOptions.TLSOptions)
if err != nil {
return err
}
if peerCertVerifier == nil {
// Running locally without configured certs - no TLS mode
log.Warnf("The secure discovery service is disabled")
return nil
}
log.Info("initializing secure discovery service")
cfg := &tls.Config{
GetCertificate: s.getIstiodCertificate,
ClientAuth: tls.VerifyClientCertIfGiven,
ClientCAs: peerCertVerifier.GetGeneralCertPool(),
VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
err := peerCertVerifier.VerifyPeerCert(rawCerts, verifiedChains)
if err != nil {
log.Infof("Could not verify certificate: %v", err)
}
return err
},
MinVersion: tls.VersionTLS12,
CipherSuites: args.ServerOptions.TLSOptions.CipherSuits,
}
tlsCreds := credentials.NewTLS(cfg)
s.secureGrpcAddress = args.ServerOptions.SecureGRPCAddr
interceptors := []grpc.UnaryServerInterceptor{
// setup server prometheus monitoring (as final interceptor in chain)
grpcprom.UnaryServerInterceptor,
}
opts := istiogrpc.ServerOptions(args.KeepaliveOptions, interceptors...)
opts = append(opts, grpc.Creds(tlsCreds))
s.secureGrpcServer = grpc.NewServer(opts...)
s.XDSServer.Register(s.secureGrpcServer)
reflection.Register(s.secureGrpcServer)
s.addStartFunc("secure gRPC", func(stop <-chan struct{}) error {
go func() {
<-stop
s.secureGrpcServer.Stop()
}()
return nil
})
return nil
}
// addStartFunc appends a function to be run. These are run synchronously in order,
// so the function should start a go routine if it needs to do anything blocking
func (s *Server) addStartFunc(name string, fn server.Component) {
s.server.RunComponent(name, fn)
}
// adds a readiness probe for Istiod Server.
func (s *Server) addReadinessProbe(name string, fn readinessProbe) {
s.readinessProbes[name] = fn
}
// addTerminatingStartFunc adds a function that should terminate before the serve shuts down
// This is useful to do cleanup activities
// This is does not guarantee they will terminate gracefully - best effort only
// Function should be synchronous; once it returns it is considered "done"
func (s *Server) addTerminatingStartFunc(name string, fn server.Component) {
s.server.RunComponentAsyncAndWait(name, fn)
}
func (s *Server) waitForCacheSync(stop <-chan struct{}) bool {
start := time.Now()
log.Info("Waiting for caches to be synced")
if !kubelib.WaitForCacheSync("server", stop, s.cachesSynced) {
log.Errorf("Failed waiting for cache sync")
return false
}
log.Infof("All controller caches have been synced up in %v", time.Since(start))
// At this point, we know that all update events of the initial state-of-the-world have been
// received. We wait to ensure we have committed at least this many updates. This avoids a race
// condition where we are marked ready prior to updating the push context, leading to incomplete
// pushes.
expected := s.XDSServer.InboundUpdates.Load()
return kubelib.WaitForCacheSync("push context", stop, func() bool { return s.pushContextReady(expected) })
}
// pushContextReady indicates whether pushcontext has processed all inbound config updates.
func (s *Server) pushContextReady(expected int64) bool {
committed := s.XDSServer.CommittedUpdates.Load()
if committed < expected {
log.Debugf("Waiting for pushcontext to process inbound updates, inbound: %v, committed : %v", expected, committed)
return false
}
return true
}
// cachesSynced checks whether caches have been synced.
func (s *Server) cachesSynced() bool {
if s.multiclusterController != nil && !s.multiclusterController.HasSynced() {
return false
}
if !s.ServiceController().HasSynced() {
return false
}
if !s.configController.HasSynced() {
return false
}
if s.webhookInfo.wh != nil && !s.webhookInfo.wh.HasSynced() {
return false
}
return true
}
// initRegistryEventHandlers sets up event handlers for config and service updates
func (s *Server) initRegistryEventHandlers() {
log.Info("initializing registry event handlers")
// Flush cached discovery responses whenever services configuration change.
serviceHandler := func(prev, curr *model.Service, event model.Event) {
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: sets.New(model.ConfigKey{Kind: kind.ServiceEntry, Name: string(curr.Hostname), Namespace: curr.Attributes.Namespace}),
Reason: model.NewReasonStats(model.ServiceUpdate),
}
s.XDSServer.ConfigUpdate(pushReq)
}
s.ServiceController().AppendServiceHandler(serviceHandler)
if s.configController != nil {
configHandler := func(prev config.Config, curr config.Config, event model.Event) {
if s.statusReporter != nil {
defer func() {
if event != model.EventDelete {
s.statusReporter.AddInProgressResource(curr)
} else {
s.statusReporter.DeleteInProgressResource(curr)
}
}()
}
log.Debugf("Handle event %s for configuration %s", event, curr.Key())
// For update events, trigger push only if spec has changed.
if event == model.EventUpdate && !needsPush(prev, curr) {
log.Debugf("skipping push for %s as spec has not changed", prev.Key())
return
}
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: sets.New(model.ConfigKey{Kind: kind.MustFromGVK(curr.GroupVersionKind), Name: curr.Name, Namespace: curr.Namespace}),
Reason: model.NewReasonStats(model.ConfigUpdate),
}
s.XDSServer.ConfigUpdate(pushReq)
}
schemas := collections.Pilot.All()
if features.EnableGatewayAPI {
schemas = collections.PilotGatewayAPI().All()
}
for _, schema := range schemas {
// This resource type was handled in external/servicediscovery.go, no need to rehandle here.
if schema.GroupVersionKind() == gvk.ServiceEntry {
continue
}
if schema.GroupVersionKind() == gvk.WorkloadEntry {
continue
}
if schema.GroupVersionKind() == gvk.WorkloadGroup {
continue
}
s.configController.RegisterEventHandler(schema.GroupVersionKind(), configHandler)
}
if s.environment.GatewayAPIController != nil {
s.environment.GatewayAPIController.RegisterEventHandler(gvk.Namespace, func(config.Config, config.Config, model.Event) {
s.XDSServer.ConfigUpdate(&model.PushRequest{
Full: true,
Reason: model.NewReasonStats(model.NamespaceUpdate),
})
})
s.environment.GatewayAPIController.RegisterEventHandler(gvk.Secret, func(_ config.Config, gw config.Config, _ model.Event) {
s.XDSServer.ConfigUpdate(&model.PushRequest{
Full: true,
ConfigsUpdated: map[model.ConfigKey]struct{}{
{
Kind: kind.KubernetesGateway,
Name: gw.Name,
Namespace: gw.Namespace,
}: {},
},
Reason: model.NewReasonStats(model.SecretTrigger),
})
})
}
}
}
func (s *Server) initIstiodCertLoader() error {
if err := s.loadIstiodCert(); err != nil {
return fmt.Errorf("first time load IstiodCert failed: %v", err)
}
_, watchCh := s.istiodCertBundleWatcher.AddWatcher()
s.addStartFunc("reload certs", func(stop <-chan struct{}) error {
go s.reloadIstiodCert(watchCh, stop)
return nil
})
return nil
}
// initIstiodCerts creates Istiod certificates and also sets up watches to them.
func (s *Server) initIstiodCerts(args *PilotArgs, host string) error {
// Skip all certificates
var err error
s.dnsNames = getDNSNames(args, host)
if hasCustomCertArgsOrWellKnown, tlsCertPath, tlsKeyPath, caCertPath := hasCustomTLSCerts(args.ServerOptions.TLSOptions); hasCustomCertArgsOrWellKnown {
// Use the DNS certificate provided via args or in well known location.
err = s.initCertificateWatches(TLSOptions{
CaCertFile: caCertPath,
KeyFile: tlsKeyPath,
CertFile: tlsCertPath,
})
if err != nil {
// Not crashing istiod - This typically happens if certs are missing and in tests.
log.Errorf("error initializing certificate watches: %v", err)
return nil
}
} else if features.EnableCAServer && features.PilotCertProvider == constants.CertProviderIstiod {
log.Infof("initializing Istiod DNS certificates host: %s, custom host: %s", host, features.IstiodServiceCustomHost)
err = s.initDNSCerts()
} else if features.PilotCertProvider == constants.CertProviderKubernetes {
log.Infof("initializing Istiod DNS certificates host: %s, custom host: %s", host, features.IstiodServiceCustomHost)
err = s.initDNSCerts()
} else if strings.HasPrefix(features.PilotCertProvider, constants.CertProviderKubernetesSignerPrefix) {
log.Infof("initializing Istiod DNS certificates host: %s, custom host: %s", host, features.IstiodServiceCustomHost)
err = s.initDNSCerts()
} else {
return nil
}
if err == nil {
err = s.initIstiodCertLoader()
}
return err
}
func getDNSNames(args *PilotArgs, host string) []string {
// Append custom hostname if there is any
customHost := features.IstiodServiceCustomHost
var cHosts []string
if customHost != "" {
cHosts = strings.Split(customHost, ",")
}
sans := sets.New(cHosts...)
sans.Insert(host)
// The first is the recommended one, also used by Apiserver for webhooks.
// add a few known hostnames
knownHosts := []string{"istiod", "istiod-remote", "istio-pilot"}
// In some conditions, pilot address for sds is different from other xds,
// like multi-cluster primary-remote mode with revision.
if args.Revision != "" && args.Revision != "default" {
knownHosts = append(knownHosts, "istiod"+"-"+args.Revision)
}
knownSans := make([]string, 0, 2*len(knownHosts))
for _, altName := range knownHosts {
knownSans = append(knownSans,
fmt.Sprintf("%s.%s.svc", altName, args.Namespace))
}
sans.InsertAll(knownSans...)
dnsNames := sets.SortedList(sans)
log.Infof("Discover server subject alt names: %v", dnsNames)
return dnsNames
}
// createPeerCertVerifier creates a SPIFFE certificate verifier with the current istiod configuration.
func (s *Server) createPeerCertVerifier(tlsOptions TLSOptions) (*spiffe.PeerCertVerifier, error) {
customTLSCertsExists, _, _, caCertPath := hasCustomTLSCerts(tlsOptions)
if !customTLSCertsExists && s.CA == nil && !s.isCADisabled() {
// Running locally without configured certs - no TLS mode
return nil, nil
}
peerCertVerifier := spiffe.NewPeerCertVerifier()
var rootCertBytes []byte
var err error
if caCertPath != "" {
if rootCertBytes, err = os.ReadFile(caCertPath); err != nil {
return nil, err
}
} else {
if s.RA != nil {
if strings.HasPrefix(features.PilotCertProvider, constants.CertProviderKubernetesSignerPrefix) {
signerName := strings.TrimPrefix(features.PilotCertProvider, constants.CertProviderKubernetesSignerPrefix)
caBundle, _ := s.RA.GetRootCertFromMeshConfig(signerName)
rootCertBytes = append(rootCertBytes, caBundle...)
} else {
rootCertBytes = append(rootCertBytes, s.RA.GetCAKeyCertBundle().GetRootCertPem()...)
}
}
if s.CA != nil {
rootCertBytes = append(rootCertBytes, s.CA.GetCAKeyCertBundle().GetRootCertPem()...)
}
}
if len(rootCertBytes) != 0 {
err := peerCertVerifier.AddMappingFromPEM(spiffe.GetTrustDomain(), rootCertBytes)
if err != nil {
return nil, fmt.Errorf("add root CAs into peerCertVerifier failed: %v", err)
}
}
return peerCertVerifier, nil
}
func checkPathsExist(paths ...string) bool {
for _, path := range paths {
fInfo, err := os.Stat(path)
if err != nil || fInfo.IsDir() {
return false
}
}
return true
}
// hasCustomTLSCerts returns the tls cert paths, used both if custom TLS certificates are configured via args or by mounting in well known.
// while tls args should still take precedence the aim is to encourage loading the DNS tls cert in the well known path locations.
func hasCustomTLSCerts(tlsOptions TLSOptions) (ok bool, tlsCertPath, tlsKeyPath, caCertPath string) {
// load from tls args as priority
if hasCustomTLSCertArgs(tlsOptions) {
return true, tlsOptions.CertFile, tlsOptions.KeyFile, tlsOptions.CaCertFile
}
if ok = checkPathsExist(constants.DefaultPilotTLSCert, constants.DefaultPilotTLSKey, constants.DefaultPilotTLSCaCert); ok {
tlsCertPath = constants.DefaultPilotTLSCert
tlsKeyPath = constants.DefaultPilotTLSKey
caCertPath = constants.DefaultPilotTLSCaCert
return
}
if ok = checkPathsExist(constants.DefaultPilotTLSCert, constants.DefaultPilotTLSKey, constants.DefaultPilotTLSCaCertAlternatePath); ok {
tlsCertPath = constants.DefaultPilotTLSCert
tlsKeyPath = constants.DefaultPilotTLSKey
caCertPath = constants.DefaultPilotTLSCaCertAlternatePath
return
}
return
}
// hasCustomTLSCerts returns true if custom TLS certificates are configured via args.
func hasCustomTLSCertArgs(tlsOptions TLSOptions) bool {
return tlsOptions.CaCertFile != "" && tlsOptions.CertFile != "" && tlsOptions.KeyFile != ""
}
// getIstiodCertificate returns the istiod certificate.
func (s *Server) getIstiodCertificate(*tls.ClientHelloInfo) (*tls.Certificate, error) {
s.certMu.RLock()
defer s.certMu.RUnlock()
if s.istiodCert != nil {
return s.istiodCert, nil
}
return nil, fmt.Errorf("cert not initialized")
}
// initControllers initializes the controllers.
func (s *Server) initControllers(args *PilotArgs) error {
log.Info("initializing controllers")
s.initMulticluster(args)
s.initSDSServer()
if features.EnableEnhancedResourceScoping {
// setup namespace filter
args.RegistryOptions.KubeOptions.DiscoveryNamespacesFilter = s.multiclusterController.DiscoveryNamespacesFilter
}
if err := s.initConfigController(args); err != nil {
return fmt.Errorf("error initializing config controller: %v", err)
}
if err := s.initServiceControllers(args); err != nil {
return fmt.Errorf("error initializing service controllers: %v", err)
}
return nil
}
func (s *Server) initMulticluster(args *PilotArgs) {
if s.kubeClient == nil {
return
}
s.multiclusterController = multicluster.NewController(s.kubeClient, args.Namespace, s.clusterID, s.environment.Watcher, func(r *rest.Config) {
r.QPS = args.RegistryOptions.KubeOptions.KubernetesAPIQPS
r.Burst = args.RegistryOptions.KubeOptions.KubernetesAPIBurst
})
s.XDSServer.ListRemoteClusters = s.multiclusterController.ListRemoteClusters
s.addStartFunc("multicluster controller", func(stop <-chan struct{}) error {
return s.multiclusterController.Run(stop)
})
}
// maybeCreateCA creates and initializes CA Key if needed.
func (s *Server) maybeCreateCA(caOpts *caOptions) error {
// CA signing certificate must be created only if CA is enabled.
if features.EnableCAServer {
log.Info("creating CA and initializing public key")
var err error
if useRemoteCerts.Get() {
if err = s.loadCACerts(caOpts, LocalCertDir.Get()); err != nil {
return fmt.Errorf("failed to load remote CA certs: %v", err)
}
}
// May return nil, if the CA is missing required configs - This is not an error.
if caOpts.ExternalCAType != "" {
if s.RA, err = s.createIstioRA(caOpts); err != nil {
return fmt.Errorf("failed to create RA: %v", err)
}
}
if !s.isCADisabled() {
if s.CA, err = s.createIstioCA(caOpts); err != nil {
return fmt.Errorf("failed to create CA: %v", err)
}
}
}
return nil
}
func (s *Server) shouldStartNsController() bool {
if s.isCADisabled() {
return true
}
if s.CA == nil {
return false
}
// For Kubernetes CA, we don't distribute it; it is mounted in all pods by Kubernetes.
if features.PilotCertProvider == constants.CertProviderKubernetes {
return false
}
// For no CA we don't distribute it either, as there is no cert
if features.PilotCertProvider == constants.CertProviderNone {
return false
}
return true
}
// StartCA starts the CA or RA server if configured.
func (s *Server) startCA(caOpts *caOptions) {
if s.CA == nil && s.RA == nil {
return
}
s.addStartFunc("ca", func(stop <-chan struct{}) error {
grpcServer := s.secureGrpcServer
if s.secureGrpcServer == nil {
grpcServer = s.grpcServer
}
// Start the RA server if configured, else start the CA server
if s.RA != nil {
log.Infof("Starting RA")
s.RunCA(grpcServer, s.RA, caOpts)
} else if s.CA != nil {
log.Infof("Starting IstioD CA")
s.RunCA(grpcServer, s.CA, caOpts)
}
return nil
})
}
// initMeshHandlers initializes mesh and network handlers.
func (s *Server) initMeshHandlers(changeHandler func(_ *meshconfig.MeshConfig)) {
log.Info("initializing mesh handlers")
// When the mesh config or networks change, do a full push.
s.environment.AddMeshHandler(func() {
spiffe.SetTrustDomain(s.environment.Mesh().GetTrustDomain())
changeHandler(s.environment.Mesh())
s.XDSServer.ConfigUpdate(&model.PushRequest{
Full: true,
Reason: model.NewReasonStats(model.GlobalUpdate),
})
})
}
func (s *Server) addIstioCAToTrustBundle(args *PilotArgs) error {
var err error
if s.CA != nil {
// If IstioCA is setup, derive trustAnchor directly from CA
rootCerts := []string{string(s.CA.GetCAKeyCertBundle().GetRootCertPem())}
err = s.workloadTrustBundle.UpdateTrustAnchor(&tb.TrustAnchorUpdate{
TrustAnchorConfig: tb.TrustAnchorConfig{Certs: rootCerts},
Source: tb.SourceIstioCA,
})
if err != nil {
log.Errorf("unable to add CA root from namespace %s as trustAnchor", args.Namespace)
return err
}
return nil
}
return nil
}
func (s *Server) initWorkloadTrustBundle(args *PilotArgs) error {
var err error
if !features.MultiRootMesh {
return nil
}
s.workloadTrustBundle.UpdateCb(func() {
pushReq := &model.PushRequest{
Full: true,
Reason: model.NewReasonStats(model.GlobalUpdate),
}
s.XDSServer.ConfigUpdate(pushReq)
})
s.addStartFunc("remote trust anchors", func(stop <-chan struct{}) error {
go s.workloadTrustBundle.ProcessRemoteTrustAnchors(stop, tb.RemoteDefaultPollPeriod)
return nil
})
// MeshConfig: Add initial roots
err = s.workloadTrustBundle.AddMeshConfigUpdate(s.environment.Mesh())
if err != nil {
return err
}
// MeshConfig:Add callback for mesh config update
s.environment.AddMeshHandler(func() {
_ = s.workloadTrustBundle.AddMeshConfigUpdate(s.environment.Mesh())
})
err = s.addIstioCAToTrustBundle(args)
if err != nil {
return err
}
// IstioRA: Explicitly add roots corresponding to RA
if s.RA != nil {
// Implicitly add the Istio RA certificates to the Workload Trust Bundle
rootCerts := []string{string(s.RA.GetCAKeyCertBundle().GetRootCertPem())}
err = s.workloadTrustBundle.UpdateTrustAnchor(&tb.TrustAnchorUpdate{
TrustAnchorConfig: tb.TrustAnchorConfig{Certs: rootCerts},
Source: tb.SourceIstioRA,
})
if err != nil {
log.Errorf("fatal: unable to add RA root as trustAnchor")
return err
}
}
log.Infof("done initializing workload trustBundle")
return nil
}
// isCADisabled returns whether CA functionality is disabled in istiod.
// It returns true only if istiod certs is signed by Kubernetes or
// workload certs are signed by external CA
func (s *Server) isCADisabled() bool {
if s.RA == nil {
return false
}
// do not create CA server if PilotCertProvider is `kubernetes` and RA server exists
if features.PilotCertProvider == constants.CertProviderKubernetes {
return true
}
// do not create CA server if PilotCertProvider is `k8s.io/*` and RA server exists
if strings.HasPrefix(features.PilotCertProvider, constants.CertProviderKubernetesSignerPrefix) {
return true
}
return false
}
func (s *Server) initStatusManager(_ *PilotArgs) {
s.addStartFunc("status manager", func(stop <-chan struct{}) error {
s.statusManager = status.NewManager(s.RWConfigStore)
s.statusManager.Start(stop)
return nil
})
}
func (s *Server) serveHTTP() error {
// At this point we are ready - start Http Listener so that it can respond to readiness events.
httpListener, err := net.Listen("tcp", s.httpServer.Addr)
if err != nil {
return err
}
go func() {
log.Infof("starting HTTP service at %s", httpListener.Addr())
if err := s.httpServer.Serve(httpListener); network.IsUnexpectedListenerError(err) {
log.Errorf("error serving http server: %v", err)
}
}()
s.httpAddr = httpListener.Addr().String()
return nil
}
func (s *Server) initReadinessProbes() {
probes := map[string]readinessProbe{
"discovery": func() bool {
return s.XDSServer.IsServerReady()
},
"sidecar injector": func() bool {
return s.readinessFlags.sidecarInjectorReady.Load()
},
"config validation": func() bool {
return s.readinessFlags.configValidationReady.Load()
},
}
for name, probe := range probes {
s.addReadinessProbe(name, probe)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"fmt"
"istio.io/istio/pilot/pkg/serviceregistry/aggregate"
kubecontroller "istio.io/istio/pilot/pkg/serviceregistry/kube/controller"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/serviceregistry/serviceentry"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
func (s *Server) ServiceController() *aggregate.Controller {
return s.environment.ServiceDiscovery.(*aggregate.Controller)
}
// initServiceControllers creates and initializes the service controllers
func (s *Server) initServiceControllers(args *PilotArgs) error {
serviceControllers := s.ServiceController()
s.serviceEntryController = serviceentry.NewController(
s.configController, s.XDSServer,
serviceentry.WithClusterID(s.clusterID),
)
serviceControllers.AddRegistry(s.serviceEntryController)
registered := sets.New[provider.ID]()
for _, r := range args.RegistryOptions.Registries {
serviceRegistry := provider.ID(r)
if registered.Contains(serviceRegistry) {
log.Warnf("%s registry specified multiple times.", r)
continue
}
registered.Insert(serviceRegistry)
log.Infof("Adding %s registry adapter", serviceRegistry)
switch serviceRegistry {
case provider.Kubernetes:
if err := s.initKubeRegistry(args); err != nil {
return err
}
default:
return fmt.Errorf("service registry %s is not supported", r)
}
}
// Defer running of the service controllers.
s.addStartFunc("service controllers", func(stop <-chan struct{}) error {
go serviceControllers.Run(stop)
return nil
})
return nil
}
// initKubeRegistry creates all the k8s service controllers under this pilot
func (s *Server) initKubeRegistry(args *PilotArgs) (err error) {
args.RegistryOptions.KubeOptions.ClusterID = s.clusterID
args.RegistryOptions.KubeOptions.Metrics = s.environment
args.RegistryOptions.KubeOptions.XDSUpdater = s.XDSServer
args.RegistryOptions.KubeOptions.MeshNetworksWatcher = s.environment.NetworksWatcher
args.RegistryOptions.KubeOptions.MeshWatcher = s.environment.Watcher
args.RegistryOptions.KubeOptions.SystemNamespace = args.Namespace
args.RegistryOptions.KubeOptions.MeshServiceController = s.ServiceController()
// pass namespace to k8s service registry
args.RegistryOptions.KubeOptions.DiscoveryNamespacesFilter = s.multiclusterController.DiscoveryNamespacesFilter
s.multiclusterController.AddHandler(kubecontroller.NewMulticluster(args.PodName,
s.kubeClient.Kube(),
args.RegistryOptions.ClusterRegistriesNamespace,
args.RegistryOptions.KubeOptions,
s.serviceEntryController,
s.configController,
s.istiodCertBundleWatcher,
args.Revision,
s.shouldStartNsController(),
s.environment.ClusterLocal(),
s.server))
return
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"context"
"fmt"
"os"
"path/filepath"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/kube/inject"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/webhooks"
)
const (
// Name of the webhook config in the config - no need to change it.
webhookName = "sidecar-injector.istio.io"
// defaultInjectorConfigMapName is the default name of the ConfigMap with the injection config
// The actual name can be different - use getInjectorConfigMapName
defaultInjectorConfigMapName = "istio-sidecar-injector"
)
var injectionEnabled = env.Register("INJECT_ENABLED", true, "Enable mutating webhook handler.")
func (s *Server) initSidecarInjector(args *PilotArgs) (*inject.Webhook, error) {
// currently the constant: "./var/lib/istio/inject"
injectPath := args.InjectionOptions.InjectionDirectory
if injectPath == "" || !injectionEnabled.Get() {
log.Infof("Skipping sidecar injector, injection path is missing or disabled.")
return nil, nil
}
// If the injection config exists either locally or remotely, we will set up injection.
var watcher inject.Watcher
if _, err := os.Stat(filepath.Join(injectPath, "config")); !os.IsNotExist(err) {
configFile := filepath.Join(injectPath, "config")
valuesFile := filepath.Join(injectPath, "values")
watcher, err = inject.NewFileWatcher(configFile, valuesFile)
if err != nil {
return nil, err
}
} else if s.kubeClient != nil {
configMapName := getInjectorConfigMapName(args.Revision)
cms := s.kubeClient.Kube().CoreV1().ConfigMaps(args.Namespace)
if _, err := cms.Get(context.TODO(), configMapName, metav1.GetOptions{}); err != nil {
if errors.IsNotFound(err) {
log.Infof("Skipping sidecar injector, template not found")
return nil, nil
}
return nil, err
}
watcher = inject.NewConfigMapWatcher(s.kubeClient, args.Namespace, configMapName, "config", "values")
} else {
log.Infof("Skipping sidecar injector, template not found")
return nil, nil
}
log.Info("initializing sidecar injector")
parameters := inject.WebhookParameters{
Watcher: watcher,
Env: s.environment,
Mux: s.httpsMux,
Revision: args.Revision,
KubeClient: s.kubeClient,
}
wh, err := inject.NewWebhook(parameters)
if err != nil {
return nil, fmt.Errorf("failed to create injection webhook: %v", err)
}
// Patch cert if a webhook config name is provided.
// This requires RBAC permissions - a low-priv Istiod should not attempt to patch but rely on
// operator or CI/CD
if features.InjectionWebhookConfigName != "" {
s.addStartFunc("injection patcher", func(stop <-chan struct{}) error {
// No leader election - different istiod revisions will patch their own cert.
// update webhook configuration by watching the cabundle
patcher, err := webhooks.NewWebhookCertPatcher(s.kubeClient, args.Revision, webhookName, s.istiodCertBundleWatcher)
if err != nil {
log.Errorf("failed to create webhook cert patcher: %v", err)
return nil
}
go patcher.Run(stop)
return nil
})
}
s.addStartFunc("injection server", func(stop <-chan struct{}) error {
wh.Run(stop)
return nil
})
return wh, nil
}
func getInjectorConfigMapName(revision string) string {
name := defaultInjectorConfigMapName
if revision == "" || revision == "default" {
return name
}
return name + "-" + revision
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/ledger"
)
func hasKubeRegistry(registries []string) bool {
for _, r := range registries {
if provider.ID(r) == provider.Kubernetes {
return true
}
}
return false
}
func buildLedger(ca RegistryOptions) ledger.Ledger {
var result ledger.Ledger
if ca.DistributionTrackingEnabled {
result = ledger.Make(ca.DistributionCacheRetention)
} else {
result = &model.DisabledLedger{}
}
return result
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/webhooks/validation/controller"
"istio.io/istio/pkg/webhooks/validation/server"
)
func (s *Server) initConfigValidation(args *PilotArgs) error {
if s.kubeClient == nil {
return nil
}
log.Info("initializing config validator")
// always start the validation server
params := server.Options{
Schemas: collections.PilotGatewayAPI(),
DomainSuffix: args.RegistryOptions.KubeOptions.DomainSuffix,
Mux: s.httpsMux,
}
_, err := server.New(params)
if err != nil {
return err
}
s.readinessFlags.configValidationReady.Store(true)
if features.ValidationWebhookConfigName != "" && s.kubeClient != nil {
s.addStartFunc("validation controller", func(stop <-chan struct{}) error {
log.Infof("Starting validation controller")
go controller.NewValidatingWebhookController(
s.kubeClient, args.Revision, args.Namespace, s.istiodCertBundleWatcher).Run(stop)
return nil
})
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"crypto/tls"
"log"
"net/http"
"strings"
istiolog "istio.io/istio/pkg/log"
)
type httpServerErrorLogWriter struct{}
// Webhook http.Server.ErrorLog handler specifically to filter
// http: TLS handshake error from 127.0.0.1:<PORT>: EOF
// messages that occur when clients send RST while TLS handshake is still in progress.
// httpsReadyClient can trigger this periodically when multiple concurrent probes are hitting this endpoint.
func (*httpServerErrorLogWriter) Write(p []byte) (int, error) {
m := strings.TrimSuffix(string(p), "\n")
if strings.HasPrefix(m, "http: TLS handshake error") && strings.HasSuffix(m, ": EOF") {
istiolog.Debug(m)
} else {
istiolog.Info(m)
}
return len(p), nil
}
// initSSecureWebhookServer handles initialization for the HTTPS webhook server.
// If https address is off the injection handlers will be registered on the main http endpoint, with
// TLS handled by a proxy/gateway in front of Istiod.
func (s *Server) initSecureWebhookServer(args *PilotArgs) {
// create the https server for hosting the k8s injectionWebhook handlers.
if args.ServerOptions.HTTPSAddr == "" {
s.httpsMux = s.httpMux
istiolog.Infof("HTTPS port is disabled, multiplexing webhooks on the httpAddr %v", args.ServerOptions.HTTPAddr)
return
}
istiolog.Info("initializing secure webhook server for istiod webhooks")
// create the https server for hosting the k8s injectionWebhook handlers.
s.httpsMux = http.NewServeMux()
s.httpsServer = &http.Server{
Addr: args.ServerOptions.HTTPSAddr,
ErrorLog: log.New(&httpServerErrorLogWriter{}, "", 0),
Handler: s.httpsMux,
TLSConfig: &tls.Config{
GetCertificate: s.getIstiodCertificate,
MinVersion: tls.VersionTLS12,
CipherSuites: args.ServerOptions.TLSOptions.CipherSuits,
},
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package aggregate implements a read-only aggregator for config stores.
package aggregate
import (
"errors"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
var errorUnsupported = errors.New("unsupported operation: the config aggregator is read-only")
// makeStore creates an aggregate config store from several config stores and
// unifies their descriptors
func makeStore(stores []model.ConfigStore, writer model.ConfigStore) (model.ConfigStore, error) {
union := collection.NewSchemasBuilder()
storeTypes := make(map[config.GroupVersionKind][]model.ConfigStore)
for _, store := range stores {
for _, s := range store.Schemas().All() {
if len(storeTypes[s.GroupVersionKind()]) == 0 {
if err := union.Add(s); err != nil {
return nil, err
}
}
storeTypes[s.GroupVersionKind()] = append(storeTypes[s.GroupVersionKind()], store)
}
}
schemas := union.Build()
if err := schemas.Validate(); err != nil {
return nil, err
}
result := &store{
schemas: schemas,
stores: storeTypes,
writer: writer,
}
return result, nil
}
// MakeWriteableCache creates an aggregate config store cache from several config store caches. An additional
// `writer` config store is passed, which may or may not be part of `caches`.
func MakeWriteableCache(caches []model.ConfigStoreController, writer model.ConfigStore) (model.ConfigStoreController, error) {
stores := make([]model.ConfigStore, 0, len(caches))
for _, cache := range caches {
stores = append(stores, cache)
}
store, err := makeStore(stores, writer)
if err != nil {
return nil, err
}
return &storeCache{
ConfigStore: store,
caches: caches,
}, nil
}
// MakeCache creates an aggregate config store cache from several config store
// caches.
func MakeCache(caches []model.ConfigStoreController) (model.ConfigStoreController, error) {
return MakeWriteableCache(caches, nil)
}
type store struct {
// schemas is the unified
schemas collection.Schemas
// stores is a mapping from config type to a store
stores map[config.GroupVersionKind][]model.ConfigStore
writer model.ConfigStore
}
func (cr *store) Schemas() collection.Schemas {
return cr.schemas
}
// Get the first config found in the stores.
func (cr *store) Get(typ config.GroupVersionKind, name, namespace string) *config.Config {
for _, store := range cr.stores[typ] {
config := store.Get(typ, name, namespace)
if config != nil {
return config
}
}
return nil
}
// List all configs in the stores.
func (cr *store) List(typ config.GroupVersionKind, namespace string) []config.Config {
stores := cr.stores[typ]
if len(stores) == 0 {
return nil
}
var (
configs []config.Config
storeConfigs = make([][]config.Config, 0, len(stores))
configCnt int
)
for _, store := range stores {
curConfigs := store.List(typ, namespace)
storeConfigs = append(storeConfigs, curConfigs)
configCnt += len(curConfigs)
}
configs = make([]config.Config, 0, configCnt)
// Used to remove duplicated config
configMap := sets.NewWithLength[types.NamespacedName](configCnt)
for _, curConfigs := range storeConfigs {
configs = append(configs, curConfigs...)
}
configs = slices.FilterInPlace[config.Config](configs, func(cfg config.Config) bool {
return !configMap.InsertContains(cfg.NamespacedName())
})
return configs
}
func (cr *store) Delete(typ config.GroupVersionKind, name, namespace string, resourceVersion *string) error {
if cr.writer == nil {
return errorUnsupported
}
return cr.writer.Delete(typ, name, namespace, resourceVersion)
}
func (cr *store) Create(c config.Config) (string, error) {
if cr.writer == nil {
return "", errorUnsupported
}
return cr.writer.Create(c)
}
func (cr *store) Update(c config.Config) (string, error) {
if cr.writer == nil {
return "", errorUnsupported
}
return cr.writer.Update(c)
}
func (cr *store) UpdateStatus(c config.Config) (string, error) {
if cr.writer == nil {
return "", errorUnsupported
}
return cr.writer.UpdateStatus(c)
}
func (cr *store) Patch(orig config.Config, patchFn config.PatchFunc) (string, error) {
if cr.writer == nil {
return "", errorUnsupported
}
return cr.writer.Patch(orig, patchFn)
}
type storeCache struct {
model.ConfigStore
caches []model.ConfigStoreController
}
func (cr *storeCache) HasSynced() bool {
for _, cache := range cr.caches {
if !cache.HasSynced() {
return false
}
}
return true
}
func (cr *storeCache) RegisterEventHandler(kind config.GroupVersionKind, handler model.EventHandler) {
for _, cache := range cr.caches {
if _, exists := cache.Schemas().FindByGroupVersionKind(kind); exists {
cache.RegisterEventHandler(kind, handler)
}
}
}
func (cr *storeCache) Run(stop <-chan struct{}) {
for _, cache := range cr.caches {
go cache.Run(stop)
}
<-stop
}
/*
Copyright Istio Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package file
import (
"bufio"
"bytes"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"io"
"strings"
"sync"
"github.com/hashicorp/go-multierror"
yamlv3 "gopkg.in/yaml.v3"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
kubeJson "k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/yaml"
kubeyaml2 "istio.io/istio/pilot/pkg/config/file/util/kubeyaml"
"istio.io/istio/pilot/pkg/config/memory"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
legacykube "istio.io/istio/pkg/config/analysis/legacy/source/kube"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/collection"
sresource "istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
var (
inMemoryKubeNameDiscriminator int64
scope = log.RegisterScope("file", "File client messages")
)
// KubeSource is an in-memory source implementation that can handle K8s style resources.
type KubeSource struct {
mu sync.Mutex
name string
schemas *collection.Schemas
inner model.ConfigStore
defaultNs resource.Namespace
shas map[kubeResourceKey]resourceSha
byFile map[string]map[kubeResourceKey]config.GroupVersionKind
// If meshConfig.DiscoverySelectors are specified, the namespacesFilter tracks the namespaces this controller watches.
namespacesFilter func(obj interface{}) bool
}
func (s *KubeSource) Schemas() collection.Schemas {
return *s.schemas
}
func (s *KubeSource) Get(typ config.GroupVersionKind, name, namespace string) *config.Config {
return s.inner.Get(typ, name, namespace)
}
func (s *KubeSource) List(typ config.GroupVersionKind, namespace string) []config.Config {
configs := s.inner.List(typ, namespace)
if s.namespacesFilter != nil {
return slices.Filter(configs, func(c config.Config) bool {
return s.namespacesFilter(c)
})
}
return configs
}
func (s *KubeSource) Create(config config.Config) (revision string, err error) {
return s.inner.Create(config)
}
func (s *KubeSource) Update(config config.Config) (newRevision string, err error) {
return s.inner.Update(config)
}
func (s *KubeSource) UpdateStatus(config config.Config) (newRevision string, err error) {
return s.inner.UpdateStatus(config)
}
func (s *KubeSource) Patch(orig config.Config, patchFn config.PatchFunc) (string, error) {
return s.inner.Patch(orig, patchFn)
}
func (s *KubeSource) Delete(typ config.GroupVersionKind, name, namespace string, resourceVersion *string) error {
return s.inner.Delete(typ, name, namespace, resourceVersion)
}
func (s *KubeSource) RegisterEventHandler(kind config.GroupVersionKind, handler model.EventHandler) {
panic("implement me")
}
func (s *KubeSource) Run(stop <-chan struct{}) {
}
func (s *KubeSource) HasSynced() bool {
return true
}
type resourceSha [sha256.Size]byte
type kubeResource struct {
// resource *resource.Instance
config *config.Config
schema sresource.Schema
sha resourceSha
}
func (r *kubeResource) newKey() kubeResourceKey {
return kubeResourceKey{
kind: r.schema.Kind(),
fullName: r.fullName(),
}
}
func (r *kubeResource) fullName() resource.FullName {
return resource.NewFullName(resource.Namespace(r.config.Namespace),
resource.LocalName(r.config.Name))
}
type kubeResourceKey struct {
fullName resource.FullName
kind string
}
var _ model.ConfigStore = &KubeSource{}
// NewKubeSource returns a new in-memory Source that works with Kubernetes resources.
func NewKubeSource(schemas collection.Schemas) *KubeSource {
name := fmt.Sprintf("kube-inmemory-%d", inMemoryKubeNameDiscriminator)
inMemoryKubeNameDiscriminator++
return &KubeSource{
name: name,
schemas: &schemas,
inner: memory.MakeSkipValidation(schemas),
shas: make(map[kubeResourceKey]resourceSha),
byFile: make(map[string]map[kubeResourceKey]config.GroupVersionKind),
}
}
// SetDefaultNamespace enables injecting a default namespace for resources where none is already specified
func (s *KubeSource) SetDefaultNamespace(defaultNs resource.Namespace) {
s.defaultNs = defaultNs
}
// SetNamespacesFilter enables filtering the namespaces this controller watches.
func (s *KubeSource) SetNamespacesFilter(namespacesFilter func(obj interface{}) bool) {
s.namespacesFilter = namespacesFilter
}
// Clear the contents of this source
func (s *KubeSource) Clear() {
s.shas = make(map[kubeResourceKey]resourceSha)
s.byFile = make(map[string]map[kubeResourceKey]config.GroupVersionKind)
s.inner = memory.MakeSkipValidation(*s.schemas)
}
// ContentNames returns the names known to this source.
func (s *KubeSource) ContentNames() map[string]struct{} {
s.mu.Lock()
defer s.mu.Unlock()
result := sets.New[string]()
for n := range s.byFile {
result.Insert(n)
}
return result
}
// ApplyContent applies the given yamltext to this source. The content is tracked with the given name. If ApplyContent
// gets called multiple times with the same name, the contents applied by the previous incarnation will be overwritten
// or removed, depending on the new content.
// Returns an error if any were encountered, but that still may represent a partial success
func (s *KubeSource) ApplyContent(name, yamlText string) error {
s.mu.Lock()
defer s.mu.Unlock()
// We hold off on dealing with parseErr until the end, since partial success is possible
resources, parseErrs := s.parseContent(s.schemas, name, yamlText)
oldKeys := s.byFile[name]
newKeys := make(map[kubeResourceKey]config.GroupVersionKind)
for _, r := range resources {
key := r.newKey()
oldSha, found := s.shas[key]
if !found || oldSha != r.sha {
scope.Debugf("KubeSource.ApplyContent: Set: %v/%v", r.schema.GroupVersionKind(), r.fullName())
// apply is idempotent, but configstore is not, thus the odd logic here
_, err := s.inner.Update(*r.config)
if err != nil {
_, err = s.inner.Create(*r.config)
if err != nil {
return fmt.Errorf("cannot store config %s/%s %s from reader: %s",
r.schema.Version(), r.schema.Kind(), r.fullName(), err)
}
}
s.shas[key] = r.sha
}
newKeys[key] = r.schema.GroupVersionKind()
if oldKeys != nil {
scope.Debugf("KubeSource.ApplyContent: Delete: %v/%v", r.schema.GroupVersionKind(), key)
delete(oldKeys, key)
}
}
for k, col := range oldKeys {
empty := ""
err := s.inner.Delete(col, k.fullName.Name.String(), k.fullName.Namespace.String(), &empty)
if err != nil {
scope.Errorf("encountered unexpected error removing resource from filestore: %s", err)
}
}
s.byFile[name] = newKeys
if parseErrs != nil {
return fmt.Errorf("errors parsing content %q: %v", name, parseErrs)
}
return nil
}
// RemoveContent removes the content for the given name
func (s *KubeSource) RemoveContent(name string) {
s.mu.Lock()
defer s.mu.Unlock()
keys := s.byFile[name]
if keys != nil {
for key, col := range keys {
empty := ""
err := s.inner.Delete(col, key.fullName.Name.String(), key.fullName.Namespace.String(), &empty)
if err != nil {
scope.Errorf("encountered unexpected error removing resource from filestore: %s", err)
}
delete(s.shas, key)
}
delete(s.byFile, name)
}
}
func (s *KubeSource) parseContent(r *collection.Schemas, name, yamlText string) ([]kubeResource, error) {
var resources []kubeResource
var errs error
reader := bufio.NewReader(strings.NewReader(yamlText))
decoder := kubeyaml2.NewYAMLReader(reader)
chunkCount := -1
for {
chunkCount++
doc, lineNum, err := decoder.Read()
if err == io.EOF {
break
}
if err != nil {
e := fmt.Errorf("error reading documents in %s[%d]: %v", name, chunkCount, err)
scope.Warnf("%v - skipping", e)
scope.Debugf("Failed to parse yamlText chunk: %v", yamlText)
errs = multierror.Append(errs, e)
break
}
chunk := bytes.TrimSpace(doc)
if len(chunk) == 0 {
continue
}
chunkResources, err := s.parseChunk(r, name, lineNum, chunk)
if err != nil {
var uerr *unknownSchemaError
if errors.As(err, &uerr) {
scope.Debugf("skipping unknown yaml chunk %s: %s", name, uerr.Error())
} else {
e := fmt.Errorf("error processing %s[%d]: %v", name, chunkCount, err)
scope.Warnf("%v - skipping", e)
scope.Debugf("Failed to parse yaml chunk: %v", string(chunk))
errs = multierror.Append(errs, e)
}
continue
}
resources = append(resources, chunkResources...)
}
return resources, errs
}
// unknownSchemaError represents a schema was not found for a group+version+kind.
type unknownSchemaError struct {
group string
version string
kind string
}
func (e unknownSchemaError) Error() string {
return fmt.Sprintf("failed finding schema for group/version/kind: %s/%s/%s", e.group, e.version, e.kind)
}
func (s *KubeSource) parseChunk(r *collection.Schemas, name string, lineNum int, yamlChunk []byte) ([]kubeResource, error) {
resources := make([]kubeResource, 0)
// Convert to JSON
jsonChunk, err := yaml.ToJSON(yamlChunk)
if err != nil {
return resources, fmt.Errorf("failed converting YAML to JSON: %v", err)
}
// ignore null json
if len(jsonChunk) == 0 || bytes.Equal(jsonChunk, []byte("null")) {
return resources, nil
}
// Peek at the beginning of the JSON to
groupVersionKind, err := kubeJson.DefaultMetaFactory.Interpret(jsonChunk)
if err != nil {
return resources, fmt.Errorf("failed interpreting jsonChunk: %v", err)
}
if (len(groupVersionKind.Kind) >= 1 && string(groupVersionKind.Kind[0]) == "L") && (len(groupVersionKind.Kind) >= 2 && string(groupVersionKind.Kind[1]) == "i") && (len(groupVersionKind.Kind) >= 3 && string(groupVersionKind.Kind[2]) == "s") && (len(groupVersionKind.Kind) >= 4 && string(groupVersionKind.Kind[3]) == "t") {
resourceChunks, err := extractResourceChunksFromListYamlChunk(yamlChunk)
if err != nil {
return resources, fmt.Errorf("failed extracting resource chunks from list yaml chunk: %v", err)
}
for _, resourceChunk := range resourceChunks {
lr, err := s.parseChunk(r, name, resourceChunk.lineNum+lineNum, resourceChunk.yamlChunk)
if err != nil {
return resources, fmt.Errorf("failed parsing resource chunk: %v", err)
}
resources = append(resources, lr...)
}
return resources, nil
}
if groupVersionKind.Empty() {
return resources, fmt.Errorf("unable to parse resource with no group, version and kind")
}
schema, found := r.FindByGroupVersionAliasesKind(sresource.FromKubernetesGVK(groupVersionKind))
if !found {
return resources, &unknownSchemaError{
group: groupVersionKind.Group,
version: groupVersionKind.Version,
kind: groupVersionKind.Kind,
}
}
// Cannot create new instance. This occurs because while newer types do not implement proto.Message,
// this legacy code only supports proto.Messages.
// Note: while NewInstance can be slightly modified to not return error here, the rest of the code
// still requires a proto.Message so it won't work without completely refactoring galley/
_, e := schema.NewInstance()
cannotHandleProto := e != nil
if cannotHandleProto {
return resources, &unknownSchemaError{
group: groupVersionKind.Group,
version: groupVersionKind.Version,
kind: groupVersionKind.Kind,
}
}
runtimeScheme := runtime.NewScheme()
codecs := serializer.NewCodecFactory(runtimeScheme)
deserializer := codecs.UniversalDeserializer()
obj, err := kube.IstioScheme.New(schema.GroupVersionKind().Kubernetes())
if err != nil {
return resources, fmt.Errorf("failed to initialize interface for built-in type: %v", err)
}
_, _, err = deserializer.Decode(jsonChunk, nil, obj)
if err != nil {
return resources, fmt.Errorf("failed parsing JSON for built-in type: %v", err)
}
objMeta, ok := obj.(metav1.Object)
if !ok {
return resources, errors.New("failed to assert type of object metadata")
}
// If namespace is blank and we have a default set, fill in the default
// (This mirrors the behavior if you kubectl apply a resource without a namespace defined)
// Don't do this for cluster scoped resources
if !schema.IsClusterScoped() {
if objMeta.GetNamespace() == "" && s.defaultNs != "" {
scope.Debugf("KubeSource.parseChunk: namespace not specified for %q, using %q", objMeta.GetName(), s.defaultNs)
objMeta.SetNamespace(string(s.defaultNs))
}
} else {
// Clear the namespace if there is any specified.
objMeta.SetNamespace("")
}
// Build flat map for analyzers if the line JSON object exists, if the YAML text is ill-formed, this will be nil
fieldMap := make(map[string]int)
// yamlv3.Node contains information like line number of the node, which will be used with its name to construct the field map
yamlChunkNode := yamlv3.Node{}
err = yamlv3.Unmarshal(yamlChunk, &yamlChunkNode)
if err == nil && len(yamlChunkNode.Content) == 1 {
// Get the Node that contains all the YAML chunk information
yamlNode := yamlChunkNode.Content[0]
BuildFieldPathMap(yamlNode, lineNum, "", fieldMap)
}
pos := legacykube.Position{Filename: name, Line: lineNum}
c, err := ToConfig(objMeta, schema, &pos, fieldMap)
if err != nil {
return resources, err
}
return []kubeResource{
{
schema: schema,
sha: sha256.Sum256(yamlChunk),
config: c,
},
}, nil
}
type resourceYamlChunk struct {
lineNum int
yamlChunk []byte
}
func extractResourceChunksFromListYamlChunk(chunk []byte) ([]resourceYamlChunk, error) {
chunks := make([]resourceYamlChunk, 0)
yamlChunkNode := yamlv3.Node{}
err := yamlv3.Unmarshal(chunk, &yamlChunkNode)
if err != nil {
return nil, fmt.Errorf("failed parsing yamlChunk: %v", err)
}
if len(yamlChunkNode.Content) == 0 {
return nil, fmt.Errorf("failed parsing yamlChunk: no content")
}
yamlNode := yamlChunkNode.Content[0]
var itemsInd int
for ; itemsInd < len(yamlNode.Content); itemsInd++ {
if yamlNode.Content[itemsInd].Kind == yamlv3.ScalarNode && yamlNode.Content[itemsInd].Value == "items" {
itemsInd++
break
}
}
if itemsInd >= len(yamlNode.Content) || yamlNode.Content[itemsInd].Kind != yamlv3.SequenceNode {
return nil, fmt.Errorf("failed parsing yamlChunk: malformed items field")
}
for _, n := range yamlNode.Content[itemsInd].Content {
if n.Kind != yamlv3.MappingNode {
return nil, fmt.Errorf("failed parsing yamlChunk: malformed items field")
}
resourceChunk, err := yamlv3.Marshal(n)
if err != nil {
return nil, fmt.Errorf("failed marshaling yamlChunk: %v", err)
}
chunks = append(chunks, resourceYamlChunk{
lineNum: n.Line,
yamlChunk: resourceChunk,
})
}
return chunks, nil
}
const (
FieldMapKey = "istiofilefieldmap"
ReferenceKey = "istiosource"
)
// ToConfig converts the given object and proto to a config.Config
func ToConfig(object metav1.Object, schema sresource.Schema, source resource.Reference, fieldMap map[string]int) (*config.Config, error) {
m, err := runtime.DefaultUnstructuredConverter.ToUnstructured(object)
if err != nil {
return nil, err
}
u := &unstructured.Unstructured{Object: m}
if len(fieldMap) > 0 || source != nil {
// TODO: populate
annots := u.GetAnnotations()
if annots == nil {
annots = map[string]string{}
}
jsonfm, err := json.Marshal(fieldMap)
if err != nil {
return nil, err
}
annots[FieldMapKey] = string(jsonfm)
jsonsource, err := json.Marshal(source)
if err != nil {
return nil, err
}
annots[ReferenceKey] = string(jsonsource)
u.SetAnnotations(annots)
}
result := TranslateObject(u, "", schema)
return result, nil
}
func TranslateObject(obj *unstructured.Unstructured, domainSuffix string, schema sresource.Schema) *config.Config {
mv2, err := schema.NewInstance()
if err != nil {
panic(err)
}
if spec, ok := obj.UnstructuredContent()["spec"]; ok {
err = runtime.DefaultUnstructuredConverter.FromUnstructured(spec.(map[string]any), mv2)
} else {
err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), mv2)
}
if err != nil {
panic(err)
}
m := obj
return &config.Config{
Meta: config.Meta{
GroupVersionKind: schema.GroupVersionKind(),
UID: string(m.GetUID()),
Name: m.GetName(),
Namespace: m.GetNamespace(),
Labels: m.GetLabels(),
Annotations: m.GetAnnotations(),
ResourceVersion: m.GetResourceVersion(),
CreationTimestamp: m.GetCreationTimestamp().Time,
OwnerReferences: m.GetOwnerReferences(),
Generation: m.GetGeneration(),
Domain: domainSuffix,
},
Spec: mv2,
}
}
// BuildFieldPathMap builds the flat map for each field of the YAML resource
func BuildFieldPathMap(yamlNode *yamlv3.Node, startLineNum int, curPath string, fieldPathMap map[string]int) {
// If no content in the node, terminate the DFS search
if len(yamlNode.Content) == 0 {
return
}
nodeContent := yamlNode.Content
// Iterate content by a step of 2, because in the content array the value is in the key's next index position
for i := 0; i < len(nodeContent)-1; i += 2 {
// Two condition, i + 1 positions have no content, which means they have the format like "key: value", then build the map
// Or i + 1 has contents, which means "key:\n value...", then perform one more DFS search
keyNode := nodeContent[i]
valueNode := nodeContent[i+1]
pathKeyForMap := fmt.Sprintf("%s.%s", curPath, keyNode.Value)
switch {
case valueNode.Kind == yamlv3.ScalarNode:
// Can build map because the value node has no content anymore
// minus one because startLineNum starts at line 1, and yamlv3.Node.line also starts at line 1
fieldPathMap[fmt.Sprintf("{%s}", pathKeyForMap)] = valueNode.Line + startLineNum - 1
case valueNode.Kind == yamlv3.MappingNode:
BuildFieldPathMap(valueNode, startLineNum, pathKeyForMap, fieldPathMap)
case valueNode.Kind == yamlv3.SequenceNode:
for j, node := range valueNode.Content {
pathWithIndex := fmt.Sprintf("%s[%d]", pathKeyForMap, j)
// Array with values or array with maps
if node.Kind == yamlv3.ScalarNode {
fieldPathMap[fmt.Sprintf("{%s}", pathWithIndex)] = node.Line + startLineNum - 1
} else {
BuildFieldPathMap(node, startLineNum, pathWithIndex, fieldPathMap)
}
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubeyaml
import (
"bufio"
"bytes"
"io"
"strings"
"unicode"
customBytes "github.com/AdamKorcz/bugdetectors/bytes"
)
const (
yamlSeparator = "---\n"
separator = "---"
)
// Join the given yaml parts into a single multipart document.
func Join(parts ...[]byte) []byte {
var b bytes.Buffer
var lastIsNewLine bool
for _, p := range parts {
if len(p) == 0 {
continue
}
if b.Len() != 0 {
if !lastIsNewLine {
_, _ = b.WriteString("\n")
}
b.WriteString(yamlSeparator)
}
_, _ = b.Write(p)
s := string(p)
lastIsNewLine = s[len(s)-1] == '\n'
}
return customBytes.CheckLen(b.Bytes(),
// JoinString joins the given yaml parts into a single multipart document.
"/src/istio/pilot/pkg/config/file/util/kubeyaml/kubeyaml.go:50:9 (May be slightly inaccurate) NEW_LINEb.Bytes()")
}
func JoinString(parts ...string) string {
var st strings.Builder
var lastIsNewLine bool
for _, p := range parts {
if len(p) == 0 {
continue
}
if st.Len() != 0 {
if !lastIsNewLine {
_, _ = st.WriteString("\n")
}
st.WriteString(yamlSeparator)
}
_, _ = st.WriteString(p)
lastIsNewLine = p[len(p)-1] == '\n'
}
return st.String()
}
type Reader interface {
Read() ([]byte, error)
}
// YAMLReader adapts from Kubernetes YAMLReader(apimachinery.k8s.io/pkg/util/yaml/decoder.go).
// It records the start line number of the chunk it reads each time.
type YAMLReader struct {
reader Reader
currLine int
}
func NewYAMLReader(r *bufio.Reader) *YAMLReader {
return &YAMLReader{
reader: &LineReader{reader: r},
currLine: 0,
}
}
// Read returns a full YAML document and its first line number.
func (r *YAMLReader) Read() ([]byte, int, error) {
var buffer bytes.Buffer
startLine := r.currLine + 1
foundStart := false
for {
r.currLine++
line, err := r.reader.Read()
if err != nil && err != io.EOF {
return nil, startLine, err
}
// detect beginning of the chunk
if !bytes.Equal(line, []byte("\n")) && !bytes.Equal(line, []byte(yamlSeparator)) && !foundStart {
startLine = r.currLine
foundStart = true
}
sep := len([]byte(separator))
if i := bytes.Index(line, []byte(separator)); i == 0 {
// We have a potential document terminator
i += sep
after := line[i:]
if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 {
if buffer.Len() != 0 {
return customBytes.CheckLen(buffer.Bytes(), "/src/istio/pilot/pkg/config/file/util/kubeyaml/kubeyaml.go:118:13 (May be slightly inaccurate) NEW_LINEbuffer.Bytes()"), startLine, nil
}
if err == io.EOF {
return nil, startLine, err
}
}
}
if err == io.EOF {
if buffer.Len() != 0 {
// If we're at EOF, we have a final, non-terminated line. Return it.
return customBytes.CheckLen(buffer.Bytes(), "/src/istio/pilot/pkg/config/file/util/kubeyaml/kubeyaml.go:128:12 (May be slightly inaccurate) NEW_LINEbuffer.Bytes()"), startLine, nil
}
return nil, startLine, err
}
buffer.Write(line)
}
}
type LineReader struct {
reader *bufio.Reader
}
// Read returns a single line (with '\n' ended) from the underlying reader.
// An error is returned iff there is an error with the underlying reader.
func (r *LineReader) Read() ([]byte, error) {
var (
isPrefix = true
err error
line []byte
buffer bytes.Buffer
)
for isPrefix && err == nil {
line, isPrefix, err = r.reader.ReadLine()
buffer.Write(line)
}
buffer.WriteByte('\n')
return customBytes.CheckLen(buffer.Bytes(), "/src/istio/pilot/pkg/config/file/util/kubeyaml/kubeyaml.go:155:9 (May be slightly inaccurate) NEW_LINEbuffer.Bytes()"), err
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crd
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// IstioKind is the generic Kubernetes API object wrapper
type IstioKind struct {
metav1.TypeMeta
metav1.ObjectMeta `json:"metadata"`
Spec map[string]any `json:"spec"`
Status map[string]any `json:"status,omitempty"`
}
// GetSpec from a wrapper
func (in *IstioKind) GetSpec() map[string]any {
return in.Spec
}
// GetStatus from a wrapper
func (in *IstioKind) GetStatus() map[string]any {
return in.Status
}
// GetObjectMeta from a wrapper
func (in *IstioKind) GetObjectMeta() metav1.ObjectMeta {
return in.ObjectMeta
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IstioKind) DeepCopyInto(out *IstioKind) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioKind.
func (in *IstioKind) DeepCopy() *IstioKind {
if in == nil {
return nil
}
out := new(IstioKind)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *IstioKind) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// IstioObject is a k8s wrapper interface for config objects
type IstioObject interface {
runtime.Object
GetSpec() map[string]any
GetStatus() map[string]any
GetObjectMeta() metav1.ObjectMeta
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crd
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"github.com/hashicorp/go-multierror"
"gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeyaml "k8s.io/apimachinery/pkg/util/yaml"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/log"
)
// FromJSON converts a canonical JSON to a proto message
func FromJSON(s resource.Schema, js string) (config.Spec, error) {
c, err := s.NewInstance()
if err != nil {
return nil, err
}
if err = config.ApplyJSON(c, js); err != nil {
return nil, err
}
return c, nil
}
func StatusJSONFromMap(schema resource.Schema, jsonMap map[string]any) (config.Status, error) {
if jsonMap == nil {
return nil, nil
}
js, err := json.Marshal(jsonMap)
if err != nil {
return nil, err
}
status, err := schema.Status()
if err != nil {
return nil, err
}
err = json.Unmarshal(js, status)
if err != nil {
return nil, err
}
return status, nil
}
// FromYAML converts a canonical YAML to a proto message
func FromYAML(s resource.Schema, yml string) (config.Spec, error) {
c, err := s.NewInstance()
if err != nil {
return nil, err
}
if err = config.ApplyYAML(c, yml); err != nil {
return nil, err
}
return c, nil
}
// FromJSONMap converts from a generic map to a proto message using canonical JSON encoding
// JSON encoding is specified here: https://developers.google.com/protocol-buffers/docs/proto3#json
func FromJSONMap(s resource.Schema, data any) (config.Spec, error) {
// Marshal to YAML bytes
str, err := yaml.Marshal(data)
if err != nil {
return nil, err
}
out, err := FromYAML(s, string(str))
if err != nil {
return nil, multierror.Prefix(err, fmt.Sprintf("YAML decoding error: %v", string(str)))
}
return out, nil
}
// ConvertObject converts an IstioObject k8s-style object to the internal configuration model.
func ConvertObject(schema resource.Schema, object IstioObject, domain string) (*config.Config, error) {
js, err := json.Marshal(object.GetSpec())
if err != nil {
return nil, err
}
spec, err := FromJSON(schema, string(js))
if err != nil {
return nil, err
}
status, err := StatusJSONFromMap(schema, object.GetStatus())
if err != nil {
log.Errorf("could not get istio status from map %v, err %v", object.GetStatus(), err)
}
meta := object.GetObjectMeta()
return &config.Config{
Meta: config.Meta{
GroupVersionKind: schema.GroupVersionKind(),
Name: meta.Name,
Namespace: meta.Namespace,
Domain: domain,
Labels: meta.Labels,
Annotations: meta.Annotations,
ResourceVersion: meta.ResourceVersion,
CreationTimestamp: meta.CreationTimestamp.Time,
},
Spec: spec,
Status: status,
}, nil
}
// ConvertConfig translates Istio config to k8s config JSON
func ConvertConfig(cfg config.Config) (IstioObject, error) {
spec, err := config.ToMap(cfg.Spec)
if err != nil {
return nil, err
}
status, err := config.ToMap(cfg.Status)
if err != nil {
return nil, err
}
namespace := cfg.Namespace
if namespace == "" {
namespace = metav1.NamespaceDefault
}
return &IstioKind{
TypeMeta: metav1.TypeMeta{
Kind: cfg.GroupVersionKind.Kind,
APIVersion: cfg.GroupVersionKind.Group + "/" + cfg.GroupVersionKind.Version,
},
ObjectMeta: metav1.ObjectMeta{
Name: cfg.Name,
Namespace: namespace,
ResourceVersion: cfg.ResourceVersion,
Labels: cfg.Labels,
Annotations: cfg.Annotations,
CreationTimestamp: metav1.NewTime(cfg.CreationTimestamp),
},
Spec: spec,
Status: status,
}, nil
}
// TODO - add special cases for type-to-kind and kind-to-type
// conversions with initial-isms. Consider adding additional type
// information to the abstract model and/or elevating k8s
// representation to first-class type to avoid extra conversions.
func parseInputsImpl(inputs string, withValidate bool) ([]config.Config, []IstioKind, error) {
var varr []config.Config
var others []IstioKind
reader := bytes.NewReader([]byte(inputs))
empty := IstioKind{}
// We store configs as a YaML stream; there may be more than one decoder.
yamlDecoder := kubeyaml.NewYAMLOrJSONDecoder(reader, 512*1024)
for {
obj := IstioKind{}
err := yamlDecoder.Decode(&obj)
if err == io.EOF {
break
}
if err != nil {
return nil, nil, fmt.Errorf("cannot parse proto message: %v", err)
}
if reflect.DeepEqual(obj, empty) {
continue
}
gvk := obj.GroupVersionKind()
s, exists := collections.PilotGatewayAPI().FindByGroupVersionAliasesKind(resource.FromKubernetesGVK(&gvk))
if !exists {
log.Debugf("unrecognized type %v", obj.Kind)
others = append(others, obj)
continue
}
cfg, err := ConvertObject(s, &obj, "")
if err != nil {
return nil, nil, fmt.Errorf("cannot parse proto message for %v: %v", obj.Name, err)
}
if withValidate {
if _, err := s.ValidateConfig(*cfg); err != nil {
return nil, nil, fmt.Errorf("configuration is invalid: %v", err)
}
}
varr = append(varr, *cfg)
}
return varr, others, nil
}
// ParseInputs reads multiple documents from `kubectl` output and checks with
// the schema. It also returns the list of unrecognized kinds as the second
// response.
//
// NOTE: This function only decodes a subset of the complete k8s
// ObjectMeta as identified by the fields in model.Meta. This
// would typically only be a problem if a user dumps an configuration
// object with kubectl and then re-ingests it.
func ParseInputs(inputs string) ([]config.Config, []IstioKind, error) {
return parseInputsImpl(inputs, true)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package crdclient provides an implementation of the config store and cache
// using Kubernetes Custom Resources and the informer framework from Kubernetes
//
// This code relies heavily on code generation for performance reasons; to implement the
// Istio store interface, we need to take dynamic inputs. Using the dynamic informers results in poor
// performance, as the cache will store unstructured objects which need to be marshaled on each Get/List call.
// Using istio/client-go directly will cache objects marshaled, allowing us to have cheap Get/List calls,
// at the expense of some code gen.
package crdclient
import (
"fmt"
"sync"
"time"
jsonmerge "github.com/evanphx/json-patch/v5"
"go.uber.org/atomic"
"gomodules.xyz/jsonpatch/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // import GKE cluster authentication plugin
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc" // import OIDC cluster authentication plugin, e.g. for Tectonic
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/queue"
)
var scope = log.RegisterScope("kube", "Kubernetes client messages")
// Client is a client for Istio CRDs, implementing config store cache
// This is used for CRUD operators on Istio configuration, as well as handling of events on config changes
type Client struct {
// schemas defines the set of schemas used by this client.
// Note: this must be a subset of the schemas defined in the codegen
schemas collection.Schemas
// domainSuffix for the config metadata
domainSuffix string
// revision for this control plane instance. We will only read configs that match this revision.
revision string
// kinds keeps track of all cache handlers for known types
kinds map[config.GroupVersionKind]kclient.Untyped
kindsMu sync.RWMutex
queue queue.Instance
// a flag indicates whether this client has been run, it is to prevent run queue twice
started *atomic.Bool
// handlers defines a list of event handlers per-type
handlers map[config.GroupVersionKind][]model.EventHandler
schemasByCRDName map[string]resource.Schema
client kube.Client
logger *log.Scope
// namespacesFilter is only used to initiate filtered informer.
namespacesFilter func(obj interface{}) bool
filtersByGVK map[config.GroupVersionKind]kubetypes.Filter
}
type Option struct {
Revision string
DomainSuffix string
Identifier string
NamespacesFilter func(obj interface{}) bool
FiltersByGVK map[config.GroupVersionKind]kubetypes.Filter
}
var _ model.ConfigStoreController = &Client{}
func New(client kube.Client, opts Option) *Client {
schemas := collections.Pilot
if features.EnableGatewayAPI {
schemas = collections.PilotGatewayAPI()
}
return NewForSchemas(client, opts, schemas)
}
func NewForSchemas(client kube.Client, opts Option, schemas collection.Schemas) *Client {
schemasByCRDName := map[string]resource.Schema{}
for _, s := range schemas.All() {
// From the spec: "Its name MUST be in the format <.spec.name>.<.spec.group>."
name := fmt.Sprintf("%s.%s", s.Plural(), s.Group())
schemasByCRDName[name] = s
}
out := &Client{
domainSuffix: opts.DomainSuffix,
schemas: schemas,
schemasByCRDName: schemasByCRDName,
revision: opts.Revision,
queue: queue.NewQueue(1 * time.Second),
started: atomic.NewBool(false),
kinds: map[config.GroupVersionKind]kclient.Untyped{},
handlers: map[config.GroupVersionKind][]model.EventHandler{},
client: client,
logger: scope.WithLabels("controller", opts.Identifier),
namespacesFilter: opts.NamespacesFilter,
filtersByGVK: opts.FiltersByGVK,
}
for _, s := range out.schemas.All() {
// From the spec: "Its name MUST be in the format <.spec.name>.<.spec.group>."
name := fmt.Sprintf("%s.%s", s.Plural(), s.Group())
out.addCRD(name)
}
return out
}
func (cl *Client) RegisterEventHandler(kind config.GroupVersionKind, handler model.EventHandler) {
cl.handlers[kind] = append(cl.handlers[kind], handler)
}
// Run the queue and all informers. Callers should wait for HasSynced() before depending on results.
func (cl *Client) Run(stop <-chan struct{}) {
if cl.started.Swap(true) {
// was already started by other thread
return
}
t0 := time.Now()
cl.logger.Infof("Starting Pilot K8S CRD controller")
if !kube.WaitForCacheSync("crdclient", stop, cl.informerSynced) {
cl.logger.Errorf("Failed to sync Pilot K8S CRD controller cache")
return
}
cl.logger.Infof("Pilot K8S CRD controller synced in %v", time.Since(t0))
cl.queue.Run(stop)
cl.logger.Infof("controller terminated")
}
func (cl *Client) informerSynced() bool {
for gk, ctl := range cl.allKinds() {
if !ctl.HasSynced() {
cl.logger.Infof("controller %q is syncing...", gk)
return false
}
}
return true
}
func (cl *Client) HasSynced() bool {
return cl.queue.HasSynced()
}
// Schemas for the store
func (cl *Client) Schemas() collection.Schemas {
return cl.schemas
}
// Get implements store interface
func (cl *Client) Get(typ config.GroupVersionKind, name, namespace string) *config.Config {
h, f := cl.kind(typ)
if !f {
cl.logger.Warnf("unknown type: %s", typ)
return nil
}
obj := h.Get(name, namespace)
if obj == nil {
cl.logger.Debugf("couldn't find %s/%s in informer index", namespace, name)
return nil
}
cfg := TranslateObject(obj, typ, cl.domainSuffix)
return &cfg
}
// Create implements store interface
func (cl *Client) Create(cfg config.Config) (string, error) {
if cfg.Spec == nil {
return "", fmt.Errorf("nil spec for %v/%v", cfg.Name, cfg.Namespace)
}
meta, err := create(cl.client, cfg, getObjectMetadata(cfg))
if err != nil {
return "", err
}
return meta.GetResourceVersion(), nil
}
// Update implements store interface
func (cl *Client) Update(cfg config.Config) (string, error) {
if cfg.Spec == nil {
return "", fmt.Errorf("nil spec for %v/%v", cfg.Name, cfg.Namespace)
}
meta, err := update(cl.client, cfg, getObjectMetadata(cfg))
if err != nil {
return "", err
}
return meta.GetResourceVersion(), nil
}
func (cl *Client) UpdateStatus(cfg config.Config) (string, error) {
if cfg.Status == nil {
return "", fmt.Errorf("nil status for %v/%v on updateStatus()", cfg.Name, cfg.Namespace)
}
meta, err := updateStatus(cl.client, cfg, getObjectMetadata(cfg))
if err != nil {
return "", err
}
return meta.GetResourceVersion(), nil
}
// Patch applies only the modifications made in the PatchFunc rather than doing a full replace. Useful to avoid
// read-modify-write conflicts when there are many concurrent-writers to the same resource.
func (cl *Client) Patch(orig config.Config, patchFn config.PatchFunc) (string, error) {
modified, patchType := patchFn(orig.DeepCopy())
meta, err := patch(cl.client, orig, getObjectMetadata(orig), modified, getObjectMetadata(modified), patchType)
if err != nil {
return "", err
}
return meta.GetResourceVersion(), nil
}
// Delete implements store interface
// `resourceVersion` must be matched before deletion is carried out. If not possible, a 409 Conflict status will be
func (cl *Client) Delete(typ config.GroupVersionKind, name, namespace string, resourceVersion *string) error {
return delete(cl.client, typ, name, namespace, resourceVersion)
}
// List implements store interface
func (cl *Client) List(kind config.GroupVersionKind, namespace string) []config.Config {
h, f := cl.kind(kind)
if !f {
return nil
}
list := h.List(namespace, klabels.Everything())
out := make([]config.Config, 0, len(list))
for _, item := range list {
cfg := TranslateObject(item, kind, cl.domainSuffix)
out = append(out, cfg)
}
return out
}
func (cl *Client) allKinds() map[config.GroupVersionKind]kclient.Untyped {
cl.kindsMu.RLock()
defer cl.kindsMu.RUnlock()
return maps.Clone(cl.kinds)
}
func (cl *Client) kind(r config.GroupVersionKind) (kclient.Untyped, bool) {
cl.kindsMu.RLock()
defer cl.kindsMu.RUnlock()
ch, ok := cl.kinds[r]
return ch, ok
}
func TranslateObject(r runtime.Object, gvk config.GroupVersionKind, domainSuffix string) config.Config {
translateFunc, f := translationMap[gvk]
if !f {
scope.Errorf("unknown type %v", gvk)
return config.Config{}
}
c := translateFunc(r)
c.Domain = domainSuffix
return c
}
func getObjectMetadata(config config.Config) metav1.ObjectMeta {
return metav1.ObjectMeta{
Name: config.Name,
Namespace: config.Namespace,
Labels: config.Labels,
Annotations: config.Annotations,
ResourceVersion: config.ResourceVersion,
OwnerReferences: config.OwnerReferences,
UID: types.UID(config.UID),
}
}
func genPatchBytes(oldRes, modRes runtime.Object, patchType types.PatchType) ([]byte, error) {
oldJSON, err := json.Marshal(oldRes)
if err != nil {
return nil, fmt.Errorf("failed marhsalling original resource: %v", err)
}
newJSON, err := json.Marshal(modRes)
if err != nil {
return nil, fmt.Errorf("failed marhsalling modified resource: %v", err)
}
switch patchType {
case types.JSONPatchType:
ops, err := jsonpatch.CreatePatch(oldJSON, newJSON)
if err != nil {
return nil, err
}
return json.Marshal(ops)
case types.MergePatchType:
return jsonmerge.CreateMergePatch(oldJSON, newJSON)
default:
return nil, fmt.Errorf("unsupported patch type: %v. must be one of JSONPatchType or MergePatchType", patchType)
}
}
func (cl *Client) addCRD(name string) {
cl.logger.Debugf("adding CRD %q", name)
s, f := cl.schemasByCRDName[name]
if !f {
cl.logger.Debugf("added resource that we are not watching: %v", name)
return
}
resourceGVK := s.GroupVersionKind()
gvr := s.GroupVersionResource()
cl.kindsMu.Lock()
defer cl.kindsMu.Unlock()
if _, f := cl.kinds[resourceGVK]; f {
cl.logger.Debugf("added resource that already exists: %v", resourceGVK)
return
}
filter := cl.filtersByGVK[resourceGVK]
objectFilter := filter.ObjectFilter
filter.ObjectFilter = func(t any) bool {
if objectFilter != nil && !objectFilter(t) {
return false
}
if cl.namespacesFilter != nil && !cl.namespacesFilter(t) {
return false
}
return config.LabelsInRevision(t.(controllers.Object).GetLabels(), cl.revision)
}
var kc kclient.Untyped
if s.IsBuiltin() {
kc = kclient.NewUntypedInformer(cl.client, gvr, filter)
} else {
kc = kclient.NewDelayedInformer[controllers.Object](
cl.client,
gvr,
kubetypes.StandardInformer,
filter,
)
}
kind := s.Kind()
kc.AddEventHandler(controllers.EventHandler[controllers.Object]{
AddFunc: func(obj controllers.Object) {
incrementEvent(kind, "add")
cl.queue.Push(func() error {
cl.onEvent(resourceGVK, nil, obj, model.EventAdd)
return nil
})
},
UpdateFunc: func(old, cur controllers.Object) {
incrementEvent(kind, "update")
cl.queue.Push(func() error {
cl.onEvent(resourceGVK, old, cur, model.EventUpdate)
return nil
})
},
DeleteFunc: func(obj controllers.Object) {
incrementEvent(kind, "delete")
cl.queue.Push(func() error {
cl.onEvent(resourceGVK, nil, obj, model.EventDelete)
return nil
})
},
})
cl.kinds[resourceGVK] = kc
}
func (cl *Client) onEvent(resourceGVK config.GroupVersionKind, old controllers.Object, curr controllers.Object, event model.Event) {
currItem := controllers.ExtractObject(curr)
if currItem == nil {
return
}
currConfig := TranslateObject(currItem, resourceGVK, cl.domainSuffix)
var oldConfig config.Config
if old != nil {
oldConfig = TranslateObject(old, resourceGVK, cl.domainSuffix)
}
for _, f := range cl.handlers[resourceGVK] {
f(oldConfig, currConfig, event)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package crdclient
import (
"istio.io/istio/pkg/monitoring"
)
var (
typeTag = monitoring.CreateLabel("type")
eventTag = monitoring.CreateLabel("event")
k8sEvents = monitoring.NewSum(
"pilot_k8s_cfg_events",
"Events from k8s config.",
)
)
func incrementEvent(kind, event string) {
k8sEvents.With(typeTag.Value(kind), eventTag.Value(event)).Increment()
}
// Code generated by pkg/config/schema/codegen/tools/collections.main.go. DO NOT EDIT.
package crdclient
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube"
k8sioapiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1"
k8sioapiappsv1 "k8s.io/api/apps/v1"
k8sioapicertificatesv1 "k8s.io/api/certificates/v1"
k8sioapicoordinationv1 "k8s.io/api/coordination/v1"
k8sioapicorev1 "k8s.io/api/core/v1"
k8sioapidiscoveryv1 "k8s.io/api/discovery/v1"
k8sioapinetworkingv1 "k8s.io/api/networking/v1"
k8sioapiextensionsapiserverpkgapisapiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
sigsk8siogatewayapiapisv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2"
sigsk8siogatewayapiapisv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
istioioapiextensionsv1alpha1 "istio.io/api/extensions/v1alpha1"
istioioapimetav1alpha1 "istio.io/api/meta/v1alpha1"
istioioapinetworkingv1alpha3 "istio.io/api/networking/v1alpha3"
istioioapinetworkingv1beta1 "istio.io/api/networking/v1beta1"
istioioapisecurityv1beta1 "istio.io/api/security/v1beta1"
istioioapitelemetryv1alpha1 "istio.io/api/telemetry/v1alpha1"
apiistioioapiextensionsv1alpha1 "istio.io/client-go/pkg/apis/extensions/v1alpha1"
apiistioioapinetworkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
apiistioioapinetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1"
apiistioioapisecurityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1"
apiistioioapitelemetryv1alpha1 "istio.io/client-go/pkg/apis/telemetry/v1alpha1"
)
func create(c kube.Client, cfg config.Config, objMeta metav1.ObjectMeta) (metav1.Object, error) {
switch cfg.GroupVersionKind {
case gvk.AuthorizationPolicy:
return c.Istio().SecurityV1beta1().AuthorizationPolicies(cfg.Namespace).Create(context.TODO(), &apiistioioapisecurityv1beta1.AuthorizationPolicy{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapisecurityv1beta1.AuthorizationPolicy)),
}, metav1.CreateOptions{})
case gvk.DestinationRule:
return c.Istio().NetworkingV1alpha3().DestinationRules(cfg.Namespace).Create(context.TODO(), &apiistioioapinetworkingv1alpha3.DestinationRule{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.DestinationRule)),
}, metav1.CreateOptions{})
case gvk.EnvoyFilter:
return c.Istio().NetworkingV1alpha3().EnvoyFilters(cfg.Namespace).Create(context.TODO(), &apiistioioapinetworkingv1alpha3.EnvoyFilter{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.EnvoyFilter)),
}, metav1.CreateOptions{})
case gvk.GRPCRoute:
return c.GatewayAPI().GatewayV1alpha2().GRPCRoutes(cfg.Namespace).Create(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.GRPCRoute{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1alpha2.GRPCRouteSpec)),
}, metav1.CreateOptions{})
case gvk.Gateway:
return c.Istio().NetworkingV1alpha3().Gateways(cfg.Namespace).Create(context.TODO(), &apiistioioapinetworkingv1alpha3.Gateway{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.Gateway)),
}, metav1.CreateOptions{})
case gvk.GatewayClass:
return c.GatewayAPI().GatewayV1beta1().GatewayClasses().Create(context.TODO(), &sigsk8siogatewayapiapisv1beta1.GatewayClass{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1beta1.GatewayClassSpec)),
}, metav1.CreateOptions{})
case gvk.HTTPRoute:
return c.GatewayAPI().GatewayV1beta1().HTTPRoutes(cfg.Namespace).Create(context.TODO(), &sigsk8siogatewayapiapisv1beta1.HTTPRoute{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1beta1.HTTPRouteSpec)),
}, metav1.CreateOptions{})
case gvk.KubernetesGateway:
return c.GatewayAPI().GatewayV1beta1().Gateways(cfg.Namespace).Create(context.TODO(), &sigsk8siogatewayapiapisv1beta1.Gateway{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1beta1.GatewaySpec)),
}, metav1.CreateOptions{})
case gvk.PeerAuthentication:
return c.Istio().SecurityV1beta1().PeerAuthentications(cfg.Namespace).Create(context.TODO(), &apiistioioapisecurityv1beta1.PeerAuthentication{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapisecurityv1beta1.PeerAuthentication)),
}, metav1.CreateOptions{})
case gvk.ProxyConfig:
return c.Istio().NetworkingV1beta1().ProxyConfigs(cfg.Namespace).Create(context.TODO(), &apiistioioapinetworkingv1beta1.ProxyConfig{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1beta1.ProxyConfig)),
}, metav1.CreateOptions{})
case gvk.ReferenceGrant:
return c.GatewayAPI().GatewayV1beta1().ReferenceGrants(cfg.Namespace).Create(context.TODO(), &sigsk8siogatewayapiapisv1beta1.ReferenceGrant{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1beta1.ReferenceGrantSpec)),
}, metav1.CreateOptions{})
case gvk.RequestAuthentication:
return c.Istio().SecurityV1beta1().RequestAuthentications(cfg.Namespace).Create(context.TODO(), &apiistioioapisecurityv1beta1.RequestAuthentication{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapisecurityv1beta1.RequestAuthentication)),
}, metav1.CreateOptions{})
case gvk.ServiceEntry:
return c.Istio().NetworkingV1alpha3().ServiceEntries(cfg.Namespace).Create(context.TODO(), &apiistioioapinetworkingv1alpha3.ServiceEntry{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.ServiceEntry)),
}, metav1.CreateOptions{})
case gvk.Sidecar:
return c.Istio().NetworkingV1alpha3().Sidecars(cfg.Namespace).Create(context.TODO(), &apiistioioapinetworkingv1alpha3.Sidecar{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.Sidecar)),
}, metav1.CreateOptions{})
case gvk.TCPRoute:
return c.GatewayAPI().GatewayV1alpha2().TCPRoutes(cfg.Namespace).Create(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.TCPRoute{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1alpha2.TCPRouteSpec)),
}, metav1.CreateOptions{})
case gvk.TLSRoute:
return c.GatewayAPI().GatewayV1alpha2().TLSRoutes(cfg.Namespace).Create(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.TLSRoute{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1alpha2.TLSRouteSpec)),
}, metav1.CreateOptions{})
case gvk.Telemetry:
return c.Istio().TelemetryV1alpha1().Telemetries(cfg.Namespace).Create(context.TODO(), &apiistioioapitelemetryv1alpha1.Telemetry{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapitelemetryv1alpha1.Telemetry)),
}, metav1.CreateOptions{})
case gvk.UDPRoute:
return c.GatewayAPI().GatewayV1alpha2().UDPRoutes(cfg.Namespace).Create(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.UDPRoute{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1alpha2.UDPRouteSpec)),
}, metav1.CreateOptions{})
case gvk.VirtualService:
return c.Istio().NetworkingV1alpha3().VirtualServices(cfg.Namespace).Create(context.TODO(), &apiistioioapinetworkingv1alpha3.VirtualService{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.VirtualService)),
}, metav1.CreateOptions{})
case gvk.WasmPlugin:
return c.Istio().ExtensionsV1alpha1().WasmPlugins(cfg.Namespace).Create(context.TODO(), &apiistioioapiextensionsv1alpha1.WasmPlugin{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapiextensionsv1alpha1.WasmPlugin)),
}, metav1.CreateOptions{})
case gvk.WorkloadEntry:
return c.Istio().NetworkingV1alpha3().WorkloadEntries(cfg.Namespace).Create(context.TODO(), &apiistioioapinetworkingv1alpha3.WorkloadEntry{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.WorkloadEntry)),
}, metav1.CreateOptions{})
case gvk.WorkloadGroup:
return c.Istio().NetworkingV1alpha3().WorkloadGroups(cfg.Namespace).Create(context.TODO(), &apiistioioapinetworkingv1alpha3.WorkloadGroup{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.WorkloadGroup)),
}, metav1.CreateOptions{})
default:
return nil, fmt.Errorf("unsupported type: %v", cfg.GroupVersionKind)
}
}
func update(c kube.Client, cfg config.Config, objMeta metav1.ObjectMeta) (metav1.Object, error) {
switch cfg.GroupVersionKind {
case gvk.AuthorizationPolicy:
return c.Istio().SecurityV1beta1().AuthorizationPolicies(cfg.Namespace).Update(context.TODO(), &apiistioioapisecurityv1beta1.AuthorizationPolicy{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapisecurityv1beta1.AuthorizationPolicy)),
}, metav1.UpdateOptions{})
case gvk.DestinationRule:
return c.Istio().NetworkingV1alpha3().DestinationRules(cfg.Namespace).Update(context.TODO(), &apiistioioapinetworkingv1alpha3.DestinationRule{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.DestinationRule)),
}, metav1.UpdateOptions{})
case gvk.EnvoyFilter:
return c.Istio().NetworkingV1alpha3().EnvoyFilters(cfg.Namespace).Update(context.TODO(), &apiistioioapinetworkingv1alpha3.EnvoyFilter{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.EnvoyFilter)),
}, metav1.UpdateOptions{})
case gvk.GRPCRoute:
return c.GatewayAPI().GatewayV1alpha2().GRPCRoutes(cfg.Namespace).Update(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.GRPCRoute{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1alpha2.GRPCRouteSpec)),
}, metav1.UpdateOptions{})
case gvk.Gateway:
return c.Istio().NetworkingV1alpha3().Gateways(cfg.Namespace).Update(context.TODO(), &apiistioioapinetworkingv1alpha3.Gateway{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.Gateway)),
}, metav1.UpdateOptions{})
case gvk.GatewayClass:
return c.GatewayAPI().GatewayV1beta1().GatewayClasses().Update(context.TODO(), &sigsk8siogatewayapiapisv1beta1.GatewayClass{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1beta1.GatewayClassSpec)),
}, metav1.UpdateOptions{})
case gvk.HTTPRoute:
return c.GatewayAPI().GatewayV1beta1().HTTPRoutes(cfg.Namespace).Update(context.TODO(), &sigsk8siogatewayapiapisv1beta1.HTTPRoute{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1beta1.HTTPRouteSpec)),
}, metav1.UpdateOptions{})
case gvk.KubernetesGateway:
return c.GatewayAPI().GatewayV1beta1().Gateways(cfg.Namespace).Update(context.TODO(), &sigsk8siogatewayapiapisv1beta1.Gateway{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1beta1.GatewaySpec)),
}, metav1.UpdateOptions{})
case gvk.PeerAuthentication:
return c.Istio().SecurityV1beta1().PeerAuthentications(cfg.Namespace).Update(context.TODO(), &apiistioioapisecurityv1beta1.PeerAuthentication{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapisecurityv1beta1.PeerAuthentication)),
}, metav1.UpdateOptions{})
case gvk.ProxyConfig:
return c.Istio().NetworkingV1beta1().ProxyConfigs(cfg.Namespace).Update(context.TODO(), &apiistioioapinetworkingv1beta1.ProxyConfig{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1beta1.ProxyConfig)),
}, metav1.UpdateOptions{})
case gvk.ReferenceGrant:
return c.GatewayAPI().GatewayV1beta1().ReferenceGrants(cfg.Namespace).Update(context.TODO(), &sigsk8siogatewayapiapisv1beta1.ReferenceGrant{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1beta1.ReferenceGrantSpec)),
}, metav1.UpdateOptions{})
case gvk.RequestAuthentication:
return c.Istio().SecurityV1beta1().RequestAuthentications(cfg.Namespace).Update(context.TODO(), &apiistioioapisecurityv1beta1.RequestAuthentication{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapisecurityv1beta1.RequestAuthentication)),
}, metav1.UpdateOptions{})
case gvk.ServiceEntry:
return c.Istio().NetworkingV1alpha3().ServiceEntries(cfg.Namespace).Update(context.TODO(), &apiistioioapinetworkingv1alpha3.ServiceEntry{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.ServiceEntry)),
}, metav1.UpdateOptions{})
case gvk.Sidecar:
return c.Istio().NetworkingV1alpha3().Sidecars(cfg.Namespace).Update(context.TODO(), &apiistioioapinetworkingv1alpha3.Sidecar{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.Sidecar)),
}, metav1.UpdateOptions{})
case gvk.TCPRoute:
return c.GatewayAPI().GatewayV1alpha2().TCPRoutes(cfg.Namespace).Update(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.TCPRoute{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1alpha2.TCPRouteSpec)),
}, metav1.UpdateOptions{})
case gvk.TLSRoute:
return c.GatewayAPI().GatewayV1alpha2().TLSRoutes(cfg.Namespace).Update(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.TLSRoute{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1alpha2.TLSRouteSpec)),
}, metav1.UpdateOptions{})
case gvk.Telemetry:
return c.Istio().TelemetryV1alpha1().Telemetries(cfg.Namespace).Update(context.TODO(), &apiistioioapitelemetryv1alpha1.Telemetry{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapitelemetryv1alpha1.Telemetry)),
}, metav1.UpdateOptions{})
case gvk.UDPRoute:
return c.GatewayAPI().GatewayV1alpha2().UDPRoutes(cfg.Namespace).Update(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.UDPRoute{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*sigsk8siogatewayapiapisv1alpha2.UDPRouteSpec)),
}, metav1.UpdateOptions{})
case gvk.VirtualService:
return c.Istio().NetworkingV1alpha3().VirtualServices(cfg.Namespace).Update(context.TODO(), &apiistioioapinetworkingv1alpha3.VirtualService{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.VirtualService)),
}, metav1.UpdateOptions{})
case gvk.WasmPlugin:
return c.Istio().ExtensionsV1alpha1().WasmPlugins(cfg.Namespace).Update(context.TODO(), &apiistioioapiextensionsv1alpha1.WasmPlugin{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapiextensionsv1alpha1.WasmPlugin)),
}, metav1.UpdateOptions{})
case gvk.WorkloadEntry:
return c.Istio().NetworkingV1alpha3().WorkloadEntries(cfg.Namespace).Update(context.TODO(), &apiistioioapinetworkingv1alpha3.WorkloadEntry{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.WorkloadEntry)),
}, metav1.UpdateOptions{})
case gvk.WorkloadGroup:
return c.Istio().NetworkingV1alpha3().WorkloadGroups(cfg.Namespace).Update(context.TODO(), &apiistioioapinetworkingv1alpha3.WorkloadGroup{
ObjectMeta: objMeta,
Spec: *(cfg.Spec.(*istioioapinetworkingv1alpha3.WorkloadGroup)),
}, metav1.UpdateOptions{})
default:
return nil, fmt.Errorf("unsupported type: %v", cfg.GroupVersionKind)
}
}
func updateStatus(c kube.Client, cfg config.Config, objMeta metav1.ObjectMeta) (metav1.Object, error) {
switch cfg.GroupVersionKind {
case gvk.AuthorizationPolicy:
return c.Istio().SecurityV1beta1().AuthorizationPolicies(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapisecurityv1beta1.AuthorizationPolicy{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.DestinationRule:
return c.Istio().NetworkingV1alpha3().DestinationRules(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapinetworkingv1alpha3.DestinationRule{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.EnvoyFilter:
return c.Istio().NetworkingV1alpha3().EnvoyFilters(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapinetworkingv1alpha3.EnvoyFilter{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.GRPCRoute:
return c.GatewayAPI().GatewayV1alpha2().GRPCRoutes(cfg.Namespace).UpdateStatus(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.GRPCRoute{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*sigsk8siogatewayapiapisv1alpha2.GRPCRouteStatus)),
}, metav1.UpdateOptions{})
case gvk.Gateway:
return c.Istio().NetworkingV1alpha3().Gateways(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapinetworkingv1alpha3.Gateway{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.GatewayClass:
return c.GatewayAPI().GatewayV1beta1().GatewayClasses().UpdateStatus(context.TODO(), &sigsk8siogatewayapiapisv1beta1.GatewayClass{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*sigsk8siogatewayapiapisv1beta1.GatewayClassStatus)),
}, metav1.UpdateOptions{})
case gvk.HTTPRoute:
return c.GatewayAPI().GatewayV1beta1().HTTPRoutes(cfg.Namespace).UpdateStatus(context.TODO(), &sigsk8siogatewayapiapisv1beta1.HTTPRoute{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*sigsk8siogatewayapiapisv1beta1.HTTPRouteStatus)),
}, metav1.UpdateOptions{})
case gvk.KubernetesGateway:
return c.GatewayAPI().GatewayV1beta1().Gateways(cfg.Namespace).UpdateStatus(context.TODO(), &sigsk8siogatewayapiapisv1beta1.Gateway{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*sigsk8siogatewayapiapisv1beta1.GatewayStatus)),
}, metav1.UpdateOptions{})
case gvk.PeerAuthentication:
return c.Istio().SecurityV1beta1().PeerAuthentications(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapisecurityv1beta1.PeerAuthentication{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.ProxyConfig:
return c.Istio().NetworkingV1beta1().ProxyConfigs(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapinetworkingv1beta1.ProxyConfig{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.RequestAuthentication:
return c.Istio().SecurityV1beta1().RequestAuthentications(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapisecurityv1beta1.RequestAuthentication{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.ServiceEntry:
return c.Istio().NetworkingV1alpha3().ServiceEntries(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapinetworkingv1alpha3.ServiceEntry{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.Sidecar:
return c.Istio().NetworkingV1alpha3().Sidecars(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapinetworkingv1alpha3.Sidecar{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.TCPRoute:
return c.GatewayAPI().GatewayV1alpha2().TCPRoutes(cfg.Namespace).UpdateStatus(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.TCPRoute{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*sigsk8siogatewayapiapisv1alpha2.TCPRouteStatus)),
}, metav1.UpdateOptions{})
case gvk.TLSRoute:
return c.GatewayAPI().GatewayV1alpha2().TLSRoutes(cfg.Namespace).UpdateStatus(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.TLSRoute{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*sigsk8siogatewayapiapisv1alpha2.TLSRouteStatus)),
}, metav1.UpdateOptions{})
case gvk.Telemetry:
return c.Istio().TelemetryV1alpha1().Telemetries(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapitelemetryv1alpha1.Telemetry{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.UDPRoute:
return c.GatewayAPI().GatewayV1alpha2().UDPRoutes(cfg.Namespace).UpdateStatus(context.TODO(), &sigsk8siogatewayapiapisv1alpha2.UDPRoute{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*sigsk8siogatewayapiapisv1alpha2.UDPRouteStatus)),
}, metav1.UpdateOptions{})
case gvk.VirtualService:
return c.Istio().NetworkingV1alpha3().VirtualServices(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapinetworkingv1alpha3.VirtualService{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.WasmPlugin:
return c.Istio().ExtensionsV1alpha1().WasmPlugins(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapiextensionsv1alpha1.WasmPlugin{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.WorkloadEntry:
return c.Istio().NetworkingV1alpha3().WorkloadEntries(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapinetworkingv1alpha3.WorkloadEntry{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
case gvk.WorkloadGroup:
return c.Istio().NetworkingV1alpha3().WorkloadGroups(cfg.Namespace).UpdateStatus(context.TODO(), &apiistioioapinetworkingv1alpha3.WorkloadGroup{
ObjectMeta: objMeta,
Status: *(cfg.Status.(*istioioapimetav1alpha1.IstioStatus)),
}, metav1.UpdateOptions{})
default:
return nil, fmt.Errorf("unsupported type: %v", cfg.GroupVersionKind)
}
}
func patch(c kube.Client, orig config.Config, origMeta metav1.ObjectMeta, mod config.Config, modMeta metav1.ObjectMeta, typ types.PatchType) (metav1.Object, error) {
if orig.GroupVersionKind != mod.GroupVersionKind {
return nil, fmt.Errorf("gvk mismatch: %v, modified: %v", orig.GroupVersionKind, mod.GroupVersionKind)
}
switch orig.GroupVersionKind {
case gvk.AuthorizationPolicy:
oldRes := &apiistioioapisecurityv1beta1.AuthorizationPolicy{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapisecurityv1beta1.AuthorizationPolicy)),
}
modRes := &apiistioioapisecurityv1beta1.AuthorizationPolicy{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapisecurityv1beta1.AuthorizationPolicy)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().SecurityV1beta1().AuthorizationPolicies(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.DestinationRule:
oldRes := &apiistioioapinetworkingv1alpha3.DestinationRule{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapinetworkingv1alpha3.DestinationRule)),
}
modRes := &apiistioioapinetworkingv1alpha3.DestinationRule{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapinetworkingv1alpha3.DestinationRule)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().NetworkingV1alpha3().DestinationRules(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.EnvoyFilter:
oldRes := &apiistioioapinetworkingv1alpha3.EnvoyFilter{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapinetworkingv1alpha3.EnvoyFilter)),
}
modRes := &apiistioioapinetworkingv1alpha3.EnvoyFilter{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapinetworkingv1alpha3.EnvoyFilter)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().NetworkingV1alpha3().EnvoyFilters(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.GRPCRoute:
oldRes := &sigsk8siogatewayapiapisv1alpha2.GRPCRoute{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*sigsk8siogatewayapiapisv1alpha2.GRPCRouteSpec)),
}
modRes := &sigsk8siogatewayapiapisv1alpha2.GRPCRoute{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*sigsk8siogatewayapiapisv1alpha2.GRPCRouteSpec)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.GatewayAPI().GatewayV1alpha2().GRPCRoutes(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.Gateway:
oldRes := &apiistioioapinetworkingv1alpha3.Gateway{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapinetworkingv1alpha3.Gateway)),
}
modRes := &apiistioioapinetworkingv1alpha3.Gateway{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapinetworkingv1alpha3.Gateway)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().NetworkingV1alpha3().Gateways(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.GatewayClass:
oldRes := &sigsk8siogatewayapiapisv1beta1.GatewayClass{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*sigsk8siogatewayapiapisv1beta1.GatewayClassSpec)),
}
modRes := &sigsk8siogatewayapiapisv1beta1.GatewayClass{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*sigsk8siogatewayapiapisv1beta1.GatewayClassSpec)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.GatewayAPI().GatewayV1beta1().GatewayClasses().
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.HTTPRoute:
oldRes := &sigsk8siogatewayapiapisv1beta1.HTTPRoute{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*sigsk8siogatewayapiapisv1beta1.HTTPRouteSpec)),
}
modRes := &sigsk8siogatewayapiapisv1beta1.HTTPRoute{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*sigsk8siogatewayapiapisv1beta1.HTTPRouteSpec)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.GatewayAPI().GatewayV1beta1().HTTPRoutes(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.KubernetesGateway:
oldRes := &sigsk8siogatewayapiapisv1beta1.Gateway{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*sigsk8siogatewayapiapisv1beta1.GatewaySpec)),
}
modRes := &sigsk8siogatewayapiapisv1beta1.Gateway{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*sigsk8siogatewayapiapisv1beta1.GatewaySpec)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.GatewayAPI().GatewayV1beta1().Gateways(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.PeerAuthentication:
oldRes := &apiistioioapisecurityv1beta1.PeerAuthentication{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapisecurityv1beta1.PeerAuthentication)),
}
modRes := &apiistioioapisecurityv1beta1.PeerAuthentication{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapisecurityv1beta1.PeerAuthentication)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().SecurityV1beta1().PeerAuthentications(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.ProxyConfig:
oldRes := &apiistioioapinetworkingv1beta1.ProxyConfig{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapinetworkingv1beta1.ProxyConfig)),
}
modRes := &apiistioioapinetworkingv1beta1.ProxyConfig{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapinetworkingv1beta1.ProxyConfig)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().NetworkingV1beta1().ProxyConfigs(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.ReferenceGrant:
oldRes := &sigsk8siogatewayapiapisv1beta1.ReferenceGrant{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*sigsk8siogatewayapiapisv1beta1.ReferenceGrantSpec)),
}
modRes := &sigsk8siogatewayapiapisv1beta1.ReferenceGrant{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*sigsk8siogatewayapiapisv1beta1.ReferenceGrantSpec)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.GatewayAPI().GatewayV1beta1().ReferenceGrants(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.RequestAuthentication:
oldRes := &apiistioioapisecurityv1beta1.RequestAuthentication{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapisecurityv1beta1.RequestAuthentication)),
}
modRes := &apiistioioapisecurityv1beta1.RequestAuthentication{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapisecurityv1beta1.RequestAuthentication)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().SecurityV1beta1().RequestAuthentications(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.ServiceEntry:
oldRes := &apiistioioapinetworkingv1alpha3.ServiceEntry{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapinetworkingv1alpha3.ServiceEntry)),
}
modRes := &apiistioioapinetworkingv1alpha3.ServiceEntry{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapinetworkingv1alpha3.ServiceEntry)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().NetworkingV1alpha3().ServiceEntries(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.Sidecar:
oldRes := &apiistioioapinetworkingv1alpha3.Sidecar{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapinetworkingv1alpha3.Sidecar)),
}
modRes := &apiistioioapinetworkingv1alpha3.Sidecar{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapinetworkingv1alpha3.Sidecar)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().NetworkingV1alpha3().Sidecars(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.TCPRoute:
oldRes := &sigsk8siogatewayapiapisv1alpha2.TCPRoute{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*sigsk8siogatewayapiapisv1alpha2.TCPRouteSpec)),
}
modRes := &sigsk8siogatewayapiapisv1alpha2.TCPRoute{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*sigsk8siogatewayapiapisv1alpha2.TCPRouteSpec)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.GatewayAPI().GatewayV1alpha2().TCPRoutes(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.TLSRoute:
oldRes := &sigsk8siogatewayapiapisv1alpha2.TLSRoute{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*sigsk8siogatewayapiapisv1alpha2.TLSRouteSpec)),
}
modRes := &sigsk8siogatewayapiapisv1alpha2.TLSRoute{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*sigsk8siogatewayapiapisv1alpha2.TLSRouteSpec)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.GatewayAPI().GatewayV1alpha2().TLSRoutes(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.Telemetry:
oldRes := &apiistioioapitelemetryv1alpha1.Telemetry{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapitelemetryv1alpha1.Telemetry)),
}
modRes := &apiistioioapitelemetryv1alpha1.Telemetry{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapitelemetryv1alpha1.Telemetry)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().TelemetryV1alpha1().Telemetries(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.UDPRoute:
oldRes := &sigsk8siogatewayapiapisv1alpha2.UDPRoute{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*sigsk8siogatewayapiapisv1alpha2.UDPRouteSpec)),
}
modRes := &sigsk8siogatewayapiapisv1alpha2.UDPRoute{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*sigsk8siogatewayapiapisv1alpha2.UDPRouteSpec)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.GatewayAPI().GatewayV1alpha2().UDPRoutes(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.VirtualService:
oldRes := &apiistioioapinetworkingv1alpha3.VirtualService{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapinetworkingv1alpha3.VirtualService)),
}
modRes := &apiistioioapinetworkingv1alpha3.VirtualService{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapinetworkingv1alpha3.VirtualService)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().NetworkingV1alpha3().VirtualServices(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.WasmPlugin:
oldRes := &apiistioioapiextensionsv1alpha1.WasmPlugin{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapiextensionsv1alpha1.WasmPlugin)),
}
modRes := &apiistioioapiextensionsv1alpha1.WasmPlugin{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapiextensionsv1alpha1.WasmPlugin)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().ExtensionsV1alpha1().WasmPlugins(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.WorkloadEntry:
oldRes := &apiistioioapinetworkingv1alpha3.WorkloadEntry{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapinetworkingv1alpha3.WorkloadEntry)),
}
modRes := &apiistioioapinetworkingv1alpha3.WorkloadEntry{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapinetworkingv1alpha3.WorkloadEntry)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().NetworkingV1alpha3().WorkloadEntries(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
case gvk.WorkloadGroup:
oldRes := &apiistioioapinetworkingv1alpha3.WorkloadGroup{
ObjectMeta: origMeta,
Spec: *(orig.Spec.(*istioioapinetworkingv1alpha3.WorkloadGroup)),
}
modRes := &apiistioioapinetworkingv1alpha3.WorkloadGroup{
ObjectMeta: modMeta,
Spec: *(mod.Spec.(*istioioapinetworkingv1alpha3.WorkloadGroup)),
}
patchBytes, err := genPatchBytes(oldRes, modRes, typ)
if err != nil {
return nil, err
}
return c.Istio().NetworkingV1alpha3().WorkloadGroups(orig.Namespace).
Patch(context.TODO(), orig.Name, typ, patchBytes, metav1.PatchOptions{FieldManager: "pilot-discovery"})
default:
return nil, fmt.Errorf("unsupported type: %v", orig.GroupVersionKind)
}
}
func delete(c kube.Client, typ config.GroupVersionKind, name, namespace string, resourceVersion *string) error {
var deleteOptions metav1.DeleteOptions
if resourceVersion != nil {
deleteOptions.Preconditions = &metav1.Preconditions{ResourceVersion: resourceVersion}
}
switch typ {
case gvk.AuthorizationPolicy:
return c.Istio().SecurityV1beta1().AuthorizationPolicies(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.DestinationRule:
return c.Istio().NetworkingV1alpha3().DestinationRules(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.EnvoyFilter:
return c.Istio().NetworkingV1alpha3().EnvoyFilters(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.GRPCRoute:
return c.GatewayAPI().GatewayV1alpha2().GRPCRoutes(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.Gateway:
return c.Istio().NetworkingV1alpha3().Gateways(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.GatewayClass:
return c.GatewayAPI().GatewayV1beta1().GatewayClasses().Delete(context.TODO(), name, deleteOptions)
case gvk.HTTPRoute:
return c.GatewayAPI().GatewayV1beta1().HTTPRoutes(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.KubernetesGateway:
return c.GatewayAPI().GatewayV1beta1().Gateways(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.PeerAuthentication:
return c.Istio().SecurityV1beta1().PeerAuthentications(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.ProxyConfig:
return c.Istio().NetworkingV1beta1().ProxyConfigs(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.ReferenceGrant:
return c.GatewayAPI().GatewayV1beta1().ReferenceGrants(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.RequestAuthentication:
return c.Istio().SecurityV1beta1().RequestAuthentications(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.ServiceEntry:
return c.Istio().NetworkingV1alpha3().ServiceEntries(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.Sidecar:
return c.Istio().NetworkingV1alpha3().Sidecars(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.TCPRoute:
return c.GatewayAPI().GatewayV1alpha2().TCPRoutes(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.TLSRoute:
return c.GatewayAPI().GatewayV1alpha2().TLSRoutes(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.Telemetry:
return c.Istio().TelemetryV1alpha1().Telemetries(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.UDPRoute:
return c.GatewayAPI().GatewayV1alpha2().UDPRoutes(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.VirtualService:
return c.Istio().NetworkingV1alpha3().VirtualServices(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.WasmPlugin:
return c.Istio().ExtensionsV1alpha1().WasmPlugins(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.WorkloadEntry:
return c.Istio().NetworkingV1alpha3().WorkloadEntries(namespace).Delete(context.TODO(), name, deleteOptions)
case gvk.WorkloadGroup:
return c.Istio().NetworkingV1alpha3().WorkloadGroups(namespace).Delete(context.TODO(), name, deleteOptions)
default:
return fmt.Errorf("unsupported type: %v", typ)
}
}
var translationMap = map[config.GroupVersionKind]func(r runtime.Object) config.Config{
gvk.AuthorizationPolicy: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapisecurityv1beta1.AuthorizationPolicy)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.AuthorizationPolicy,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.CertificateSigningRequest: func(r runtime.Object) config.Config {
obj := r.(*k8sioapicertificatesv1.CertificateSigningRequest)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.CertificateSigningRequest,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.ConfigMap: func(r runtime.Object) config.Config {
obj := r.(*k8sioapicorev1.ConfigMap)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.ConfigMap,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: obj,
}
},
gvk.CustomResourceDefinition: func(r runtime.Object) config.Config {
obj := r.(*k8sioapiextensionsapiserverpkgapisapiextensionsv1.CustomResourceDefinition)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.CustomResourceDefinition,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.DaemonSet: func(r runtime.Object) config.Config {
obj := r.(*k8sioapiappsv1.DaemonSet)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.DaemonSet,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.Deployment: func(r runtime.Object) config.Config {
obj := r.(*k8sioapiappsv1.Deployment)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Deployment,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.DestinationRule: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapinetworkingv1alpha3.DestinationRule)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.DestinationRule,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.EndpointSlice: func(r runtime.Object) config.Config {
obj := r.(*k8sioapidiscoveryv1.EndpointSlice)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.EndpointSlice,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: obj,
}
},
gvk.Endpoints: func(r runtime.Object) config.Config {
obj := r.(*k8sioapicorev1.Endpoints)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Endpoints,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: obj,
}
},
gvk.EnvoyFilter: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapinetworkingv1alpha3.EnvoyFilter)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.EnvoyFilter,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.GRPCRoute: func(r runtime.Object) config.Config {
obj := r.(*sigsk8siogatewayapiapisv1alpha2.GRPCRoute)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.GRPCRoute,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.Gateway: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapinetworkingv1alpha3.Gateway)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Gateway,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.GatewayClass: func(r runtime.Object) config.Config {
obj := r.(*sigsk8siogatewayapiapisv1beta1.GatewayClass)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.GatewayClass,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.HTTPRoute: func(r runtime.Object) config.Config {
obj := r.(*sigsk8siogatewayapiapisv1beta1.HTTPRoute)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.HTTPRoute,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.Ingress: func(r runtime.Object) config.Config {
obj := r.(*k8sioapinetworkingv1.Ingress)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Ingress,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.IngressClass: func(r runtime.Object) config.Config {
obj := r.(*k8sioapinetworkingv1.IngressClass)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.IngressClass,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.KubernetesGateway: func(r runtime.Object) config.Config {
obj := r.(*sigsk8siogatewayapiapisv1beta1.Gateway)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.KubernetesGateway,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.Lease: func(r runtime.Object) config.Config {
obj := r.(*k8sioapicoordinationv1.Lease)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Lease,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.MutatingWebhookConfiguration: func(r runtime.Object) config.Config {
obj := r.(*k8sioapiadmissionregistrationv1.MutatingWebhookConfiguration)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.MutatingWebhookConfiguration,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: obj,
}
},
gvk.Namespace: func(r runtime.Object) config.Config {
obj := r.(*k8sioapicorev1.Namespace)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Namespace,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.Node: func(r runtime.Object) config.Config {
obj := r.(*k8sioapicorev1.Node)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Node,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.PeerAuthentication: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapisecurityv1beta1.PeerAuthentication)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.PeerAuthentication,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.Pod: func(r runtime.Object) config.Config {
obj := r.(*k8sioapicorev1.Pod)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Pod,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.ProxyConfig: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapinetworkingv1beta1.ProxyConfig)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.ProxyConfig,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.ReferenceGrant: func(r runtime.Object) config.Config {
obj := r.(*sigsk8siogatewayapiapisv1beta1.ReferenceGrant)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.ReferenceGrant,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.RequestAuthentication: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapisecurityv1beta1.RequestAuthentication)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.RequestAuthentication,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.Secret: func(r runtime.Object) config.Config {
obj := r.(*k8sioapicorev1.Secret)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Secret,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: obj,
}
},
gvk.Service: func(r runtime.Object) config.Config {
obj := r.(*k8sioapicorev1.Service)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Service,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.ServiceAccount: func(r runtime.Object) config.Config {
obj := r.(*k8sioapicorev1.ServiceAccount)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.ServiceAccount,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: obj,
}
},
gvk.ServiceEntry: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapinetworkingv1alpha3.ServiceEntry)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.ServiceEntry,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.Sidecar: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapinetworkingv1alpha3.Sidecar)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Sidecar,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.StatefulSet: func(r runtime.Object) config.Config {
obj := r.(*k8sioapiappsv1.StatefulSet)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.StatefulSet,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
}
},
gvk.TCPRoute: func(r runtime.Object) config.Config {
obj := r.(*sigsk8siogatewayapiapisv1alpha2.TCPRoute)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.TCPRoute,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.TLSRoute: func(r runtime.Object) config.Config {
obj := r.(*sigsk8siogatewayapiapisv1alpha2.TLSRoute)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.TLSRoute,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.Telemetry: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapitelemetryv1alpha1.Telemetry)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Telemetry,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.UDPRoute: func(r runtime.Object) config.Config {
obj := r.(*sigsk8siogatewayapiapisv1alpha2.UDPRoute)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.UDPRoute,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.ValidatingWebhookConfiguration: func(r runtime.Object) config.Config {
obj := r.(*k8sioapiadmissionregistrationv1.ValidatingWebhookConfiguration)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.ValidatingWebhookConfiguration,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: obj,
}
},
gvk.VirtualService: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapinetworkingv1alpha3.VirtualService)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.VirtualService,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.WasmPlugin: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapiextensionsv1alpha1.WasmPlugin)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.WasmPlugin,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.WorkloadEntry: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapinetworkingv1alpha3.WorkloadEntry)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.WorkloadEntry,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
gvk.WorkloadGroup: func(r runtime.Object) config.Config {
obj := r.(*apiistioioapinetworkingv1alpha3.WorkloadGroup)
return config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.WorkloadGroup,
Name: obj.Name,
Namespace: obj.Namespace,
Labels: obj.Labels,
Annotations: obj.Annotations,
ResourceVersion: obj.ResourceVersion,
CreationTimestamp: obj.CreationTimestamp.Time,
OwnerReferences: obj.OwnerReferences,
UID: string(obj.UID),
Generation: obj.Generation,
},
Spec: &obj.Spec,
Status: &obj.Status,
}
},
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"fmt"
"sort"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sv1 "sigs.k8s.io/gateway-api/apis/v1"
k8s "sigs.k8s.io/gateway-api/apis/v1alpha2"
"istio.io/istio/pilot/pkg/model/kstatus"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/ptr"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
// RouteParentResult holds the result of a route for a specific parent
type RouteParentResult struct {
// OriginalReference contains the original reference
OriginalReference k8s.ParentReference
// DeniedReason, if present, indicates why the reference was not valid
DeniedReason *ParentError
// RouteError, if present, indicates why the reference was not valid
RouteError *ConfigError
}
func createRouteStatus(parentResults []RouteParentResult, obj config.Config, currentParents []k8s.RouteParentStatus) []k8s.RouteParentStatus {
parents := make([]k8s.RouteParentStatus, 0, len(parentResults))
// Fill in all the gateways that are already present but not owned by us. This is non-trivial as there may be multiple
// gateway controllers that are exposing their status on the same route. We need to attempt to manage ours properly (including
// removing gateway references when they are removed), without mangling other Controller's status.
for _, r := range currentParents {
if r.ControllerName != constants.ManagedGatewayController {
// We don't own this status, so keep it around
parents = append(parents, r)
}
}
// Collect all of our unique parent references. There may be multiple when we have a route without section name,
// but reference a parent with multiple sections.
// While we process these internally for-each sectionName, in the status we are just supposed to report one merged entry
seen := map[k8s.ParentReference][]RouteParentResult{}
seenReasons := sets.New[ParentErrorReason]()
successCount := map[k8s.ParentReference]int{}
for _, incoming := range parentResults {
// We will append it if it is our first occurrence, or the existing one has an error. This means
// if *any* section has no errors, we will declare Admitted
if incoming.DeniedReason == nil {
successCount[incoming.OriginalReference]++
}
seen[incoming.OriginalReference] = append(seen[incoming.OriginalReference], incoming)
if incoming.DeniedReason != nil {
seenReasons.Insert(incoming.DeniedReason.Reason)
} else {
seenReasons.Insert(ParentNoError)
}
}
reasonRanking := []ParentErrorReason{
// No errors is preferred
ParentNoError,
// All route level errors
ParentErrorNotAllowed,
ParentErrorNoHostname,
ParentErrorParentRefConflict,
// Failures to match the Port or SectionName. These are last so that if we bind to 1 listener we
// just report errors for that 1 listener instead of for all sections we didn't bind to
ParentErrorNotAccepted,
}
// Next we want to collapse these. We need to report 1 type of error, or none.
report := map[k8s.ParentReference]RouteParentResult{}
for _, wantReason := range reasonRanking {
if !seenReasons.Contains(wantReason) {
continue
}
// We found our highest priority ranking, now we need to collapse this into a single message
for k, refs := range seen {
for _, ref := range refs {
reason := ParentNoError
if ref.DeniedReason != nil {
reason = ref.DeniedReason.Reason
}
if wantReason != reason {
// Skip this one, it is for a less relevant reason
continue
}
exist, f := report[k]
if f {
if ref.DeniedReason != nil {
if exist.DeniedReason != nil {
// join the error
exist.DeniedReason.Message += "; " + ref.DeniedReason.Message
} else {
exist.DeniedReason = ref.DeniedReason
}
}
} else {
exist = ref
}
report[k] = exist
}
}
// Once we find the best reason, do not consider any others
break
}
// Now we fill in all the parents we do own
for k, gw := range report {
msg := "Route was valid"
if successCount[k] > 1 {
msg = fmt.Sprintf("Route was valid, bound to %d parents", successCount[k])
}
conds := map[string]*condition{
string(k8s.RouteConditionAccepted): {
reason: string(k8s.RouteReasonAccepted),
message: msg,
},
string(k8s.RouteConditionResolvedRefs): {
reason: string(k8s.RouteReasonResolvedRefs),
message: "All references resolved",
},
}
if gw.RouteError != nil {
// Currently, the spec is not clear on where errors should be reported. The provided resources are:
// * Accepted - used to describe errors binding to parents
// * ResolvedRefs - used to describe errors about binding to objects
// But no general errors
// For now, we will treat all general route errors as "Ref" errors.
conds[string(k8s.RouteConditionResolvedRefs)].error = gw.RouteError
}
if gw.DeniedReason != nil {
conds[string(k8s.RouteConditionAccepted)].error = &ConfigError{
Reason: ConfigErrorReason(gw.DeniedReason.Reason),
Message: gw.DeniedReason.Message,
}
}
var currentConditions []metav1.Condition
currentStatus := slices.FindFunc(currentParents, func(s k8sv1.RouteParentStatus) bool {
return parentRefString(s.ParentRef) == parentRefString(gw.OriginalReference)
})
if currentStatus != nil {
currentConditions = currentStatus.Conditions
}
parents = append(parents, k8s.RouteParentStatus{
ParentRef: gw.OriginalReference,
ControllerName: constants.ManagedGatewayController,
Conditions: setConditions(obj.Generation, currentConditions, conds),
})
}
// Ensure output is deterministic.
// TODO: will we fight over other controllers doing similar (but not identical) ordering?
sort.SliceStable(parents, func(i, j int) bool {
return parentRefString(parents[i].ParentRef) > parentRefString(parents[j].ParentRef)
})
return parents
}
type ParentErrorReason string
const (
ParentErrorNotAccepted = ParentErrorReason(k8sv1.RouteReasonNoMatchingParent)
ParentErrorNotAllowed = ParentErrorReason(k8s.RouteReasonNotAllowedByListeners)
ParentErrorNoHostname = ParentErrorReason(k8s.RouteReasonNoMatchingListenerHostname)
ParentErrorParentRefConflict = ParentErrorReason("ParentRefConflict")
ParentNoError = ParentErrorReason("")
)
type ConfigErrorReason = string
const (
// InvalidRefNotPermitted indicates a route was not permitted
InvalidRefNotPermitted ConfigErrorReason = ConfigErrorReason(k8s.RouteReasonRefNotPermitted)
// InvalidDestination indicates an issue with the destination
InvalidDestination ConfigErrorReason = "InvalidDestination"
InvalidAddress ConfigErrorReason = ConfigErrorReason(k8sv1.GatewayReasonUnsupportedAddress)
// InvalidDestinationPermit indicates a destination was not permitted
InvalidDestinationPermit ConfigErrorReason = ConfigErrorReason(k8s.RouteReasonRefNotPermitted)
// InvalidDestinationKind indicates an issue with the destination kind
InvalidDestinationKind ConfigErrorReason = ConfigErrorReason(k8s.RouteReasonInvalidKind)
// InvalidDestinationNotFound indicates a destination does not exist
InvalidDestinationNotFound ConfigErrorReason = ConfigErrorReason(k8s.RouteReasonBackendNotFound)
// InvalidParentRef indicates we could not refer to the parent we request
InvalidParentRef ConfigErrorReason = "InvalidParentReference"
// InvalidFilter indicates an issue with the filters
InvalidFilter ConfigErrorReason = "InvalidFilter"
// InvalidTLS indicates an issue with TLS settings
InvalidTLS ConfigErrorReason = ConfigErrorReason(k8sv1.ListenerReasonInvalidCertificateRef)
// InvalidListenerRefNotPermitted indicates a listener reference was not permitted
InvalidListenerRefNotPermitted ConfigErrorReason = ConfigErrorReason(k8sv1.ListenerReasonRefNotPermitted)
// InvalidConfiguration indicates a generic error for all other invalid configurations
InvalidConfiguration ConfigErrorReason = "InvalidConfiguration"
InvalidResources ConfigErrorReason = ConfigErrorReason(k8sv1.GatewayReasonNoResources)
DeprecateFieldUsage = "DeprecatedField"
)
// ParentError represents that a parent could not be referenced
type ParentError struct {
Reason ParentErrorReason
Message string
}
// ConfigError represents an invalid configuration that will be reported back to the user.
type ConfigError struct {
Reason ConfigErrorReason
Message string
}
type condition struct {
// reason defines the reason to report on success. Ignored if error is set
reason string
// message defines the message to report on success. Ignored if error is set
message string
// status defines the status to report on success. The inverse will be set if error is set
// If not set, will default to StatusTrue
status metav1.ConditionStatus
// error defines an error state; the reason and message will be replaced with that of the error and
// the status inverted
error *ConfigError
// setOnce, if enabled, will only set the condition if it is not yet present or set to this reason
setOnce string
}
// setConditions sets the existingConditions with the new conditions
func setConditions(generation int64, existingConditions []metav1.Condition, conditions map[string]*condition) []metav1.Condition {
// Sort keys for deterministic ordering
for _, k := range slices.Sort(maps.Keys(conditions)) {
cond := conditions[k]
setter := kstatus.UpdateConditionIfChanged
if cond.setOnce != "" {
setter = func(conditions []metav1.Condition, condition metav1.Condition) []metav1.Condition {
return kstatus.CreateCondition(conditions, condition, cond.setOnce)
}
}
// A condition can be "negative polarity" (ex: ListenerInvalid) or "positive polarity" (ex:
// ListenerValid), so in order to determine the status we should set each `condition` defines its
// default positive status. When there is an error, we will invert that. Example: If we have
// condition ListenerInvalid, the status will be set to StatusFalse. If an error is reported, it
// will be inverted to StatusTrue to indicate listeners are invalid. See
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties
// for more information
if cond.error != nil {
existingConditions = setter(existingConditions, metav1.Condition{
Type: k,
Status: kstatus.InvertStatus(cond.status),
ObservedGeneration: generation,
LastTransitionTime: metav1.Now(),
Reason: cond.error.Reason,
Message: cond.error.Message,
})
} else {
status := cond.status
if status == "" {
status = kstatus.StatusTrue
}
existingConditions = setter(existingConditions, metav1.Condition{
Type: k,
Status: status,
ObservedGeneration: generation,
LastTransitionTime: metav1.Now(),
Reason: cond.reason,
Message: cond.message,
})
}
}
return existingConditions
}
func reportListenerAttachedRoutes(index int, obj config.Config, i int32) {
obj.Status.(*kstatus.WrappedStatus).Mutate(func(s config.Status) config.Status {
gs := s.(*k8s.GatewayStatus)
for index >= len(gs.Listeners) {
gs.Listeners = append(gs.Listeners, k8s.ListenerStatus{})
}
status := gs.Listeners[index]
status.AttachedRoutes = i
gs.Listeners[index] = status
return gs
})
}
func reportListenerCondition(index int, l k8s.Listener, obj config.Config, conditions map[string]*condition) {
obj.Status.(*kstatus.WrappedStatus).Mutate(func(s config.Status) config.Status {
gs := s.(*k8s.GatewayStatus)
for index >= len(gs.Listeners) {
gs.Listeners = append(gs.Listeners, k8s.ListenerStatus{})
}
cond := gs.Listeners[index].Conditions
supported, valid := generateSupportedKinds(l)
if !valid {
conditions[string(k8sv1.ListenerConditionResolvedRefs)] = &condition{
reason: string(k8sv1.ListenerReasonInvalidRouteKinds),
status: metav1.ConditionFalse,
message: "Invalid route kinds",
}
}
gs.Listeners[index] = k8s.ListenerStatus{
Name: l.Name,
AttachedRoutes: 0, // this will be reported later
SupportedKinds: supported,
Conditions: setConditions(obj.Generation, cond, conditions),
}
return gs
})
}
func generateSupportedKinds(l k8s.Listener) ([]k8s.RouteGroupKind, bool) {
supported := []k8s.RouteGroupKind{}
switch l.Protocol {
case k8sv1.HTTPProtocolType, k8sv1.HTTPSProtocolType:
// Only terminate allowed, so its always HTTP
supported = []k8s.RouteGroupKind{
{Group: (*k8s.Group)(ptr.Of(gvk.HTTPRoute.Group)), Kind: k8s.Kind(gvk.HTTPRoute.Kind)},
{Group: (*k8s.Group)(ptr.Of(gvk.GRPCRoute.Group)), Kind: k8s.Kind(gvk.GRPCRoute.Kind)},
}
case k8sv1.TCPProtocolType:
supported = []k8s.RouteGroupKind{{Group: (*k8s.Group)(ptr.Of(gvk.TCPRoute.Group)), Kind: k8s.Kind(gvk.TCPRoute.Kind)}}
case k8sv1.TLSProtocolType:
if l.TLS != nil && l.TLS.Mode != nil && *l.TLS.Mode == k8sv1.TLSModePassthrough {
supported = []k8s.RouteGroupKind{{Group: (*k8s.Group)(ptr.Of(gvk.TLSRoute.Group)), Kind: k8s.Kind(gvk.TLSRoute.Kind)}}
} else {
supported = []k8s.RouteGroupKind{{Group: (*k8s.Group)(ptr.Of(gvk.TCPRoute.Group)), Kind: k8s.Kind(gvk.TCPRoute.Kind)}}
}
// UDP route note support
}
if l.AllowedRoutes != nil && len(l.AllowedRoutes.Kinds) > 0 {
// We need to filter down to only ones we actually support
intersection := []k8s.RouteGroupKind{}
for _, s := range supported {
for _, kind := range l.AllowedRoutes.Kinds {
if routeGroupKindEqual(s, kind) {
intersection = append(intersection, s)
break
}
}
}
return intersection, len(intersection) == len(l.AllowedRoutes.Kinds)
}
return supported, true
}
// This and the following function really belongs in some gateway-api lib
func routeGroupKindEqual(rgk1, rgk2 k8s.RouteGroupKind) bool {
return rgk1.Kind == rgk2.Kind && getGroup(rgk1) == getGroup(rgk2)
}
func getGroup(rgk k8s.RouteGroupKind) k8s.Group {
return ptr.OrDefault(rgk.Group, k8s.Group(gvk.KubernetesGateway.Group))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"fmt"
"sort"
"strconv"
"strings"
corev1 "k8s.io/api/core/v1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/util/sets"
)
// GatewayContext contains a minimal subset of push context functionality to be exposed to GatewayAPIControllers
type GatewayContext struct {
ps *model.PushContext
}
func NewGatewayContext(ps *model.PushContext) GatewayContext {
return GatewayContext{ps}
}
// ResolveGatewayInstances attempts to resolve all instances that a gateway will be exposed on.
// Note: this function considers *all* instances of the service; its possible those instances will not actually be properly functioning
// gateways, so this is not 100% accurate, but sufficient to expose intent to users.
// The actual configuration generation is done on a per-workload basis and will get the exact set of matched instances for that workload.
// Four sets are exposed:
// * Internal addresses (eg istio-ingressgateway.istio-system.svc.cluster.local:80).
// * Internal IP addresses (eg 1.2.3.4). This comes from ClusterIP.
// * External addresses (eg 1.2.3.4), this comes from LoadBalancer services. There may be multiple in some cases (especially multi cluster).
// * Pending addresses (eg istio-ingressgateway.istio-system.svc), are LoadBalancer-type services with pending external addresses.
// * Warnings for references that could not be resolved. These are intended to be user facing.
func (gc GatewayContext) ResolveGatewayInstances(
namespace string,
gwsvcs []string,
servers []*networking.Server,
) (internal, internalIP, external, pending, warns []string, allUsable bool) {
ports := map[int]struct{}{}
for _, s := range servers {
ports[int(s.Port.Number)] = struct{}{}
}
foundInternal := sets.New[string]()
foundInternalIP := sets.New[string]()
foundExternal := sets.New[string]()
foundPending := sets.New[string]()
warnings := []string{}
foundUnusable := false
log.Debugf("Resolving gateway instances for %v in namespace %s", gwsvcs, namespace)
for _, g := range gwsvcs {
svc, f := gc.ps.ServiceIndex.HostnameAndNamespace[host.Name(g)][namespace]
if !f {
otherNamespaces := []string{}
for ns := range gc.ps.ServiceIndex.HostnameAndNamespace[host.Name(g)] {
otherNamespaces = append(otherNamespaces, `"`+ns+`"`) // Wrap in quotes for output
}
if len(otherNamespaces) > 0 {
sort.Strings(otherNamespaces)
warnings = append(warnings, fmt.Sprintf("hostname %q not found in namespace %q, but it was found in namespace(s) %v",
g, namespace, strings.Join(otherNamespaces, ", ")))
} else {
warnings = append(warnings, fmt.Sprintf("hostname %q not found", g))
}
foundUnusable = true
continue
}
svcKey := svc.Key()
for port := range ports {
instances := gc.ps.ServiceEndpointsByPort(svc, port, nil)
if len(instances) > 0 {
foundInternal.Insert(fmt.Sprintf("%s:%d", g, port))
foundInternalIP.InsertAll(svc.GetAddresses(&model.Proxy{})...)
if svc.Attributes.ClusterExternalAddresses.Len() > 0 {
// Fetch external IPs from all clusters
svc.Attributes.ClusterExternalAddresses.ForEach(func(c cluster.ID, externalIPs []string) {
foundExternal.InsertAll(externalIPs...)
})
} else if corev1.ServiceType(svc.Attributes.Type) == corev1.ServiceTypeLoadBalancer {
if !foundPending.Contains(g) {
warnings = append(warnings, fmt.Sprintf("address pending for hostname %q", g))
foundPending.Insert(g)
}
}
} else {
instancesByPort := gc.ps.ServiceEndpoints(svcKey)
if instancesEmpty(instancesByPort) {
warnings = append(warnings, fmt.Sprintf("no instances found for hostname %q", g))
} else {
hintPort := sets.New[string]()
for servicePort, instances := range instancesByPort {
for _, i := range instances {
if i.EndpointPort == uint32(port) {
hintPort.Insert(strconv.Itoa(servicePort))
}
}
}
if hintPort.Len() > 0 {
warnings = append(warnings, fmt.Sprintf(
"port %d not found for hostname %q (hint: the service port should be specified, not the workload port. Did you mean one of these ports: %v?)",
port, g, sets.SortedList(hintPort)))
foundUnusable = true
} else {
_, isManaged := svc.Attributes.Labels[constants.ManagedGatewayLabel]
var portExistsOnService bool
for _, p := range svc.Ports {
if p.Port == port {
portExistsOnService = true
break
}
}
// If this is a managed gateway, the only possible explanation for no instances for the port
// is a delay in endpoint sync. Therefore, we don't want to warn/change the Programmed condition
// in this case as long as the port exists on the `Service` object.
if !isManaged || !portExistsOnService {
warnings = append(warnings, fmt.Sprintf("port %d not found for hostname %q", port, g))
foundUnusable = true
}
}
}
}
}
}
sort.Strings(warnings)
return sets.SortedList(foundInternal), sets.SortedList(foundInternalIP), sets.SortedList(foundExternal), sets.SortedList(foundPending),
warnings, !foundUnusable
}
func (gc GatewayContext) GetService(hostname, namespace string) *model.Service {
return gc.ps.ServiceIndex.HostnameAndNamespace[host.Name(hostname)][namespace]
}
func instancesEmpty(m map[int][]*model.IstioEndpoint) bool {
for _, instances := range m {
if len(instances) > 0 {
return false
}
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"fmt"
"sync"
"time"
"go.uber.org/atomic"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"istio.io/istio/pilot/pkg/credentials"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/model/kstatus"
"istio.io/istio/pilot/pkg/serviceregistry/kube/controller"
"istio.io/istio/pilot/pkg/status"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/gvr"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
istiolog "istio.io/istio/pkg/log"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
var log = istiolog.RegisterScope("gateway", "gateway-api controller")
var errUnsupportedOp = fmt.Errorf("unsupported operation: the gateway config store is a read-only view")
// Controller defines the controller for the gateway-api. The controller acts a bit different from most.
// Rather than watching the CRs directly, we depend on the existing model.ConfigStoreController which
// already watches all CRs. When there are updates, a new PushContext will be computed, which will eventually
// call Controller.Reconcile(). Once this happens, we will inspect the current state of the world, and transform
// gateway-api types into Istio types (Gateway/VirtualService). Future calls to Get/List will return these
// Istio types. These are not stored in the cluster at all, and are purely internal; they can be seen on /debug/configz.
// During Reconcile(), the status on all gateway-api types is also tracked. Once completed, if the status
// has changed at all, it is queued to asynchronously update the status of the object in Kubernetes.
type Controller struct {
// client for accessing Kubernetes
client kube.Client
// cache provides access to the underlying gateway-configs
cache model.ConfigStoreController
// Gateway-api types reference namespace labels directly, so we need access to these
namespaces kclient.Client[*corev1.Namespace]
namespaceHandler model.EventHandler
// Gateway-api types reference secrets directly, so we need access to these
credentialsController credentials.MulticlusterController
secretHandler model.EventHandler
// the cluster where the gateway-api controller runs
cluster cluster.ID
// domain stores the cluster domain, typically cluster.local
domain string
// state is our computed Istio resources. Access is guarded by stateMu. This is updated from Reconcile().
state IstioResources
stateMu sync.RWMutex
// statusController controls the status working queue. Status will only be written if statusEnabled is true, which
// is only the case when we are the leader.
statusController *status.Controller
statusEnabled *atomic.Bool
waitForCRD func(class schema.GroupVersionResource, stop <-chan struct{}) bool
}
var _ model.GatewayController = &Controller{}
func NewController(
kc kube.Client,
c model.ConfigStoreController,
waitForCRD func(class schema.GroupVersionResource, stop <-chan struct{}) bool,
credsController credentials.MulticlusterController,
options controller.Options,
) *Controller {
var ctl *status.Controller
namespaces := kclient.New[*corev1.Namespace](kc)
gatewayController := &Controller{
client: kc,
cache: c,
namespaces: namespaces,
credentialsController: credsController,
cluster: options.ClusterID,
domain: options.DomainSuffix,
statusController: ctl,
// Disabled by default, we will enable only if we win the leader election
statusEnabled: atomic.NewBool(false),
waitForCRD: waitForCRD,
}
namespaces.AddEventHandler(controllers.EventHandler[*corev1.Namespace]{
UpdateFunc: func(oldNs, newNs *corev1.Namespace) {
if options.DiscoveryNamespacesFilter != nil && !options.DiscoveryNamespacesFilter.Filter(newNs) {
return
}
if !labels.Instance(oldNs.Labels).Equals(newNs.Labels) {
gatewayController.namespaceEvent(oldNs, newNs)
}
},
})
if credsController != nil {
credsController.AddSecretHandler(gatewayController.secretEvent)
}
return gatewayController
}
func (c *Controller) Schemas() collection.Schemas {
return collection.SchemasFor(
collections.VirtualService,
collections.Gateway,
)
}
func (c *Controller) Get(typ config.GroupVersionKind, name, namespace string) *config.Config {
return nil
}
func (c *Controller) List(typ config.GroupVersionKind, namespace string) []config.Config {
if typ != gvk.Gateway && typ != gvk.VirtualService {
return nil
}
c.stateMu.RLock()
defer c.stateMu.RUnlock()
switch typ {
case gvk.Gateway:
return filterNamespace(c.state.Gateway, namespace)
case gvk.VirtualService:
return filterNamespace(c.state.VirtualService, namespace)
default:
return nil
}
}
func (c *Controller) SetStatusWrite(enabled bool, statusManager *status.Manager) {
c.statusEnabled.Store(enabled)
if enabled && features.EnableGatewayAPIStatus && statusManager != nil {
c.statusController = statusManager.CreateGenericController(func(status any, context any) status.GenerationProvider {
return &gatewayGeneration{context}
})
} else {
c.statusController = nil
}
}
// Reconcile takes in a current snapshot of the gateway-api configs, and regenerates our internal state.
// Any status updates required will be enqueued as well.
func (c *Controller) Reconcile(ps *model.PushContext) error {
t0 := time.Now()
defer func() {
log.Debugf("reconcile complete in %v", time.Since(t0))
}()
gatewayClass := c.cache.List(gvk.GatewayClass, metav1.NamespaceAll)
gateway := c.cache.List(gvk.KubernetesGateway, metav1.NamespaceAll)
httpRoute := c.cache.List(gvk.HTTPRoute, metav1.NamespaceAll)
grpcRoute := c.cache.List(gvk.GRPCRoute, metav1.NamespaceAll)
tcpRoute := c.cache.List(gvk.TCPRoute, metav1.NamespaceAll)
tlsRoute := c.cache.List(gvk.TLSRoute, metav1.NamespaceAll)
referenceGrant := c.cache.List(gvk.ReferenceGrant, metav1.NamespaceAll)
serviceEntry := c.cache.List(gvk.ServiceEntry, metav1.NamespaceAll) // TODO lazy load only referenced SEs?
input := GatewayResources{
GatewayClass: deepCopyStatus(gatewayClass),
Gateway: deepCopyStatus(gateway),
HTTPRoute: deepCopyStatus(httpRoute),
GRPCRoute: deepCopyStatus(grpcRoute),
TCPRoute: deepCopyStatus(tcpRoute),
TLSRoute: deepCopyStatus(tlsRoute),
ReferenceGrant: referenceGrant,
ServiceEntry: serviceEntry,
Domain: c.domain,
Context: NewGatewayContext(ps),
}
if !input.hasResources() {
// Early exit for common case of no gateway-api used.
c.stateMu.Lock()
defer c.stateMu.Unlock()
// make sure we clear out the state, to handle the last gateway-api resource being removed
c.state = IstioResources{}
return nil
}
nsl := c.namespaces.List("", klabels.Everything())
namespaces := make(map[string]*corev1.Namespace, len(nsl))
for _, ns := range nsl {
namespaces[ns.Name] = ns
}
input.Namespaces = namespaces
if c.credentialsController != nil {
credentials, err := c.credentialsController.ForCluster(c.cluster)
if err != nil {
return fmt.Errorf("failed to get credentials: %v", err)
}
input.Credentials = credentials
}
output := convertResources(input)
// Handle all status updates
c.QueueStatusUpdates(input)
c.stateMu.Lock()
defer c.stateMu.Unlock()
c.state = output
return nil
}
func (c *Controller) QueueStatusUpdates(r GatewayResources) {
c.handleStatusUpdates(r.GatewayClass)
c.handleStatusUpdates(r.Gateway)
c.handleStatusUpdates(r.HTTPRoute)
c.handleStatusUpdates(r.GRPCRoute)
c.handleStatusUpdates(r.TCPRoute)
c.handleStatusUpdates(r.TLSRoute)
}
func (c *Controller) handleStatusUpdates(configs []config.Config) {
if c.statusController == nil || !c.statusEnabled.Load() {
return
}
for _, cfg := range configs {
ws := cfg.Status.(*kstatus.WrappedStatus)
if ws.Dirty {
res := status.ResourceFromModelConfig(cfg)
c.statusController.EnqueueStatusUpdateResource(ws.Unwrap(), res)
}
}
}
func (c *Controller) Create(config config.Config) (revision string, err error) {
return "", errUnsupportedOp
}
func (c *Controller) Update(config config.Config) (newRevision string, err error) {
return "", errUnsupportedOp
}
func (c *Controller) UpdateStatus(config config.Config) (newRevision string, err error) {
return "", errUnsupportedOp
}
func (c *Controller) Patch(orig config.Config, patchFn config.PatchFunc) (string, error) {
return "", errUnsupportedOp
}
func (c *Controller) Delete(typ config.GroupVersionKind, name, namespace string, _ *string) error {
return errUnsupportedOp
}
func (c *Controller) RegisterEventHandler(typ config.GroupVersionKind, handler model.EventHandler) {
switch typ {
case gvk.Namespace:
c.namespaceHandler = handler
case gvk.Secret:
c.secretHandler = handler
}
// For all other types, do nothing as c.cache has been registered
}
func (c *Controller) Run(stop <-chan struct{}) {
if features.EnableGatewayAPIGatewayClassController {
go func() {
if c.waitForCRD(gvr.GatewayClass, stop) {
gcc := NewClassController(c.client)
c.client.RunAndWait(stop)
gcc.Run(stop)
}
}()
}
}
func (c *Controller) HasSynced() bool {
return c.cache.HasSynced() && c.namespaces.HasSynced()
}
func (c *Controller) SecretAllowed(resourceName string, namespace string) bool {
c.stateMu.RLock()
defer c.stateMu.RUnlock()
return c.state.AllowedReferences.SecretAllowed(resourceName, namespace)
}
// namespaceEvent handles a namespace add/update. Gateway's can select routes by label, so we need to handle
// when the labels change.
// Note: we don't handle delete as a delete would also clean up any relevant gateway-api types which will
// trigger its own event.
func (c *Controller) namespaceEvent(oldNs, newNs *corev1.Namespace) {
// First, find all the label keys on the old/new namespace. We include NamespaceNameLabel
// since we have special logic to always allow this on namespace.
touchedNamespaceLabels := sets.New(NamespaceNameLabel)
touchedNamespaceLabels.InsertAll(getLabelKeys(oldNs)...)
touchedNamespaceLabels.InsertAll(getLabelKeys(newNs)...)
// Next, we find all keys our Gateways actually reference.
c.stateMu.RLock()
intersection := touchedNamespaceLabels.Intersection(c.state.ReferencedNamespaceKeys)
c.stateMu.RUnlock()
// If there was any overlap, then a relevant namespace label may have changed, and we trigger a
// push. A more exact check could actually determine if the label selection result actually changed.
// However, this is a much simpler approach that is likely to scale well enough for now.
if !intersection.IsEmpty() && c.namespaceHandler != nil {
log.Debugf("namespace labels changed, triggering namespace handler: %v", intersection.UnsortedList())
c.namespaceHandler(config.Config{}, config.Config{}, model.EventUpdate)
}
}
// getLabelKeys extracts all label keys from a namespace object.
func getLabelKeys(ns *corev1.Namespace) []string {
if ns == nil {
return nil
}
return maps.Keys(ns.Labels)
}
func (c *Controller) secretEvent(name, namespace string) {
var impactedConfigs []model.ConfigKey
c.stateMu.RLock()
impactedConfigs = c.state.ResourceReferences[model.ConfigKey{
Kind: kind.Secret,
Namespace: namespace,
Name: name,
}]
c.stateMu.RUnlock()
if len(impactedConfigs) > 0 {
log.Debugf("secret %s/%s changed, triggering secret handler", namespace, name)
for _, cfg := range impactedConfigs {
gw := config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.KubernetesGateway,
Namespace: cfg.Namespace,
Name: cfg.Name,
},
}
c.secretHandler(gw, gw, model.EventUpdate)
}
}
}
// deepCopyStatus creates a copy of all configs, with a copy of the status field that we can mutate.
// This allows our functions to call Status.Mutate, and then we can later persist all changes into the
// API server.
func deepCopyStatus(configs []config.Config) []config.Config {
return slices.Map(configs, func(c config.Config) config.Config {
return config.Config{
Meta: c.Meta,
Spec: c.Spec,
Status: kstatus.Wrap(c.Status),
}
})
}
// filterNamespace allows filtering out configs to only a specific namespace. This allows implementing the
// List call which can specify a specific namespace.
func filterNamespace(cfgs []config.Config, namespace string) []config.Config {
if namespace == metav1.NamespaceAll {
return cfgs
}
return slices.Filter(cfgs, func(c config.Config) bool {
return c.Namespace == namespace
})
}
// hasResources determines if there are any gateway-api resources created at all.
// If not, we can short circuit all processing to avoid excessive work.
func (kr GatewayResources) hasResources() bool {
return len(kr.GatewayClass) > 0 ||
len(kr.Gateway) > 0 ||
len(kr.HTTPRoute) > 0 ||
len(kr.GRPCRoute) > 0 ||
len(kr.TCPRoute) > 0 ||
len(kr.TLSRoute) > 0 ||
len(kr.ReferenceGrant) > 0
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"crypto/tls"
"fmt"
"net"
"net/netip"
"sort"
"strings"
"time"
"google.golang.org/protobuf/types/known/durationpb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
k8sv1 "sigs.k8s.io/gateway-api/apis/v1"
k8s "sigs.k8s.io/gateway-api/apis/v1alpha2"
istio "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
creds "istio.io/istio/pilot/pkg/model/credentials"
"istio.io/istio/pilot/pkg/model/kstatus"
"istio.io/istio/pilot/pkg/serviceregistry/kube"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
kubeconfig "istio.io/istio/pkg/config/gateway/kube"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/ptr"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
func sortConfigByCreationTime(configs []config.Config) {
sort.Slice(configs, func(i, j int) bool {
if configs[i].CreationTimestamp.Equal(configs[j].CreationTimestamp) {
in := configs[i].Namespace + "/" + configs[i].Name
jn := configs[j].Namespace + "/" + configs[j].Name
return in < jn
}
return configs[i].CreationTimestamp.Before(configs[j].CreationTimestamp)
})
}
// convertResources is the top level entrypoint to our conversion logic, computing the full state based
// on KubernetesResources inputs.
func convertResources(r GatewayResources) IstioResources {
// sort HTTPRoutes by creation timestamp and namespace/name
sortConfigByCreationTime(r.HTTPRoute)
sortConfigByCreationTime(r.GRPCRoute)
result := IstioResources{}
ctx := configContext{
GatewayResources: r,
AllowedReferences: convertReferencePolicies(r),
resourceReferences: make(map[model.ConfigKey][]model.ConfigKey),
}
gw, gwMap, nsReferences := convertGateways(ctx)
ctx.GatewayReferences = gwMap
result.Gateway = gw
result.VirtualService = convertVirtualService(ctx)
// Once we have gone through all route computation, we will know how many routes bound to each gateway.
// Report this in the status.
for _, dm := range gwMap {
for _, pri := range dm {
if pri.ReportAttachedRoutes != nil {
pri.ReportAttachedRoutes()
}
}
}
result.AllowedReferences = ctx.AllowedReferences
result.ReferencedNamespaceKeys = nsReferences
result.ResourceReferences = ctx.resourceReferences
return result
}
// convertReferencePolicies extracts all ReferencePolicy into an easily accessibly index.
func convertReferencePolicies(r GatewayResources) AllowedReferences {
res := map[Reference]map[Reference]*Grants{}
type namespacedGrant struct {
Namespace string
Grant *k8s.ReferenceGrantSpec
}
specs := make([]namespacedGrant, 0, len(r.ReferenceGrant))
for _, obj := range r.ReferenceGrant {
rp := obj.Spec.(*k8s.ReferenceGrantSpec)
specs = append(specs, namespacedGrant{Namespace: obj.Namespace, Grant: rp})
}
for _, ng := range specs {
rp := ng.Grant
for _, from := range rp.From {
fromKey := Reference{
Namespace: from.Namespace,
}
if string(from.Group) == gvk.KubernetesGateway.Group && string(from.Kind) == gvk.KubernetesGateway.Kind {
fromKey.Kind = gvk.KubernetesGateway
} else if string(from.Group) == gvk.HTTPRoute.Group && string(from.Kind) == gvk.HTTPRoute.Kind {
fromKey.Kind = gvk.HTTPRoute
} else if string(from.Group) == gvk.TLSRoute.Group && string(from.Kind) == gvk.TLSRoute.Kind {
fromKey.Kind = gvk.TLSRoute
} else if string(from.Group) == gvk.TCPRoute.Group && string(from.Kind) == gvk.TCPRoute.Kind {
fromKey.Kind = gvk.TCPRoute
} else {
// Not supported type. Not an error; may be for another controller
continue
}
for _, to := range rp.To {
toKey := Reference{
Namespace: k8s.Namespace(ng.Namespace),
}
if to.Group == "" && string(to.Kind) == gvk.Secret.Kind {
toKey.Kind = gvk.Secret
} else if to.Group == "" && string(to.Kind) == gvk.Service.Kind {
toKey.Kind = gvk.Service
} else {
// Not supported type. Not an error; may be for another controller
continue
}
if _, f := res[fromKey]; !f {
res[fromKey] = map[Reference]*Grants{}
}
if _, f := res[fromKey][toKey]; !f {
res[fromKey][toKey] = &Grants{
AllowedNames: sets.New[string](),
}
}
if to.Name != nil {
res[fromKey][toKey].AllowedNames.Insert(string(*to.Name))
} else {
res[fromKey][toKey].AllowAll = true
}
}
}
}
return res
}
// convertVirtualService takes all xRoute types and generates corresponding VirtualServices.
func convertVirtualService(r configContext) []config.Config {
result := []config.Config{}
for _, obj := range r.TCPRoute {
result = append(result, buildTCPVirtualService(r, obj)...)
}
for _, obj := range r.TLSRoute {
result = append(result, buildTLSVirtualService(r, obj)...)
}
// for gateway routes, build one VS per gateway+host
gatewayRoutes := make(map[string]map[string]*config.Config)
// for mesh routes, build one VS per namespace+host
meshRoutes := make(map[string]map[string]*config.Config)
for _, obj := range r.HTTPRoute {
buildHTTPVirtualServices(r, obj, gatewayRoutes, meshRoutes)
}
for _, obj := range r.GRPCRoute {
buildGRPCVirtualServices(r, obj, gatewayRoutes, meshRoutes)
}
for _, vsByHost := range gatewayRoutes {
for _, vsConfig := range vsByHost {
result = append(result, *vsConfig)
}
}
for _, vsByHost := range meshRoutes {
for _, vsConfig := range vsByHost {
result = append(result, *vsConfig)
}
}
return result
}
func convertHTTPRoute(r k8s.HTTPRouteRule, ctx configContext,
obj config.Config, pos int, enforceRefGrant bool,
) (*istio.HTTPRoute, *ConfigError) {
// TODO: implement rewrite, timeout, corspolicy, retries
vs := &istio.HTTPRoute{}
// Auto-name the route. If upstream defines an explicit name, will use it instead
// The position within the route is unique
vs.Name = fmt.Sprintf("%s.%s.%d", obj.Namespace, obj.Name, pos)
for _, match := range r.Matches {
uri, err := createURIMatch(match)
if err != nil {
return nil, err
}
headers, err := createHeadersMatch(match)
if err != nil {
return nil, err
}
qp, err := createQueryParamsMatch(match)
if err != nil {
return nil, err
}
method, err := createMethodMatch(match)
if err != nil {
return nil, err
}
vs.Match = append(vs.Match, &istio.HTTPMatchRequest{
Uri: uri,
Headers: headers,
QueryParams: qp,
Method: method,
})
}
for _, filter := range r.Filters {
switch filter.Type {
case k8sv1.HTTPRouteFilterRequestHeaderModifier:
h := createHeadersFilter(filter.RequestHeaderModifier)
if h == nil {
continue
}
if vs.Headers == nil {
vs.Headers = &istio.Headers{}
}
vs.Headers.Request = h
case k8sv1.HTTPRouteFilterResponseHeaderModifier:
h := createHeadersFilter(filter.ResponseHeaderModifier)
if h == nil {
continue
}
if vs.Headers == nil {
vs.Headers = &istio.Headers{}
}
vs.Headers.Response = h
case k8sv1.HTTPRouteFilterRequestRedirect:
vs.Redirect = createRedirectFilter(filter.RequestRedirect)
case k8sv1.HTTPRouteFilterRequestMirror:
mirror, err := createMirrorFilter(ctx, filter.RequestMirror, obj.Namespace, enforceRefGrant, gvk.HTTPRoute)
if err != nil {
return nil, err
}
vs.Mirrors = append(vs.Mirrors, mirror)
case k8sv1.HTTPRouteFilterURLRewrite:
vs.Rewrite = createRewriteFilter(filter.URLRewrite)
default:
return nil, &ConfigError{
Reason: InvalidFilter,
Message: fmt.Sprintf("unsupported filter type %q", filter.Type),
}
}
}
if r.Timeouts != nil {
if r.Timeouts.Request != nil {
request, _ := time.ParseDuration(string(*r.Timeouts.Request))
if request != 0 {
vs.Timeout = durationpb.New(request)
}
}
if r.Timeouts.BackendRequest != nil {
backendRequest, _ := time.ParseDuration(string(*r.Timeouts.BackendRequest))
if backendRequest != 0 {
timeout := durationpb.New(backendRequest)
if vs.Retries != nil {
vs.Retries.PerTryTimeout = timeout
} else {
vs.Timeout = timeout
}
}
}
}
if weightSum(r.BackendRefs) == 0 && vs.Redirect == nil {
// The spec requires us to return 500 when there are no >0 weight backends
vs.DirectResponse = &istio.HTTPDirectResponse{
Status: 500,
}
} else {
route, backendErr, err := buildHTTPDestination(ctx, r.BackendRefs, obj.Namespace, enforceRefGrant)
if err != nil {
return nil, err
}
vs.Route = route
return vs, backendErr
}
return vs, nil
}
func convertGRPCRoute(r k8s.GRPCRouteRule, ctx configContext,
obj config.Config, pos int, enforceRefGrant bool,
) (*istio.HTTPRoute, *ConfigError) {
// TODO: implement rewrite, timeout, mirror, corspolicy, retries
vs := &istio.HTTPRoute{}
// Auto-name the route. If upstream defines an explicit name, will use it instead
// The position within the route is unique
vs.Name = fmt.Sprintf("%s.%s.%d", obj.Namespace, obj.Name, pos)
for _, match := range r.Matches {
uri, err := createGRPCURIMatch(match)
if err != nil {
return nil, err
}
headers, err := createGRPCHeadersMatch(match)
if err != nil {
return nil, err
}
vs.Match = append(vs.Match, &istio.HTTPMatchRequest{
Uri: uri,
Headers: headers,
})
}
for _, filter := range r.Filters {
switch filter.Type {
case k8s.GRPCRouteFilterRequestHeaderModifier:
h := createHeadersFilter(filter.RequestHeaderModifier)
if h == nil {
continue
}
if vs.Headers == nil {
vs.Headers = &istio.Headers{}
}
vs.Headers.Request = h
case k8s.GRPCRouteFilterResponseHeaderModifier:
h := createHeadersFilter(filter.ResponseHeaderModifier)
if h == nil {
continue
}
if vs.Headers == nil {
vs.Headers = &istio.Headers{}
}
vs.Headers.Response = h
case k8s.GRPCRouteFilterRequestMirror:
mirror, err := createMirrorFilter(ctx, filter.RequestMirror, obj.Namespace, enforceRefGrant, gvk.GRPCRoute)
if err != nil {
return nil, err
}
vs.Mirrors = append(vs.Mirrors, mirror)
default:
return nil, &ConfigError{
Reason: InvalidFilter,
Message: fmt.Sprintf("unsupported filter type %q", filter.Type),
}
}
}
if grpcWeightSum(r.BackendRefs) == 0 && vs.Redirect == nil {
// The spec requires us to return 500 when there are no >0 weight backends
vs.DirectResponse = &istio.HTTPDirectResponse{
Status: 500,
}
} else {
route, backendErr, err := buildGRPCDestination(ctx, r.BackendRefs, obj.Namespace, enforceRefGrant)
if err != nil {
return nil, err
}
vs.Route = route
return vs, backendErr
}
return vs, nil
}
func parentTypes(rpi []routeParentReference) (mesh, gateway bool) {
for _, r := range rpi {
if r.IsMesh() {
mesh = true
} else {
gateway = true
}
}
return
}
func buildHTTPVirtualServices(
ctx configContext,
obj config.Config,
gatewayRoutes map[string]map[string]*config.Config,
meshRoutes map[string]map[string]*config.Config,
) {
route := obj.Spec.(*k8s.HTTPRouteSpec)
parentRefs := extractParentReferenceInfo(ctx.GatewayReferences, route.ParentRefs, route.Hostnames, gvk.HTTPRoute, obj.Namespace)
reportStatus := func(results []RouteParentResult) {
obj.Status.(*kstatus.WrappedStatus).Mutate(func(s config.Status) config.Status {
rs := s.(*k8s.HTTPRouteStatus)
rs.Parents = createRouteStatus(results, obj, rs.Parents)
return rs
})
}
type conversionResult struct {
error *ConfigError
routes []*istio.HTTPRoute
}
convertRules := func(mesh bool) conversionResult {
res := conversionResult{}
for n, r := range route.Rules {
// split the rule to make sure each rule has up to one match
matches := slices.Reference(r.Matches)
if len(matches) == 0 {
matches = append(matches, nil)
}
for _, m := range matches {
if m != nil {
r.Matches = []k8s.HTTPRouteMatch{*m}
}
vs, err := convertHTTPRoute(r, ctx, obj, n, !mesh)
// This was a hard error
if vs == nil {
res.error = err
return conversionResult{error: err}
}
// Got an error but also routes
if err != nil {
res.error = err
}
res.routes = append(res.routes, vs)
}
}
return res
}
meshResult, gwResult := buildMeshAndGatewayRoutes(parentRefs, convertRules)
reportStatus(slices.Map(parentRefs, func(r routeParentReference) RouteParentResult {
res := RouteParentResult{
OriginalReference: r.OriginalReference,
DeniedReason: r.DeniedReason,
RouteError: gwResult.error,
}
if r.IsMesh() {
res.RouteError = meshResult.error
}
return res
}))
count := 0
for _, parent := range filteredReferences(parentRefs) {
// for gateway routes, build one VS per gateway+host
routeMap := gatewayRoutes
routeKey := parent.InternalName
vsHosts := hostnameToStringList(route.Hostnames)
routes := gwResult.routes
if parent.IsMesh() {
routes = meshResult.routes
// for mesh routes, build one VS per namespace/port->host
routeMap = meshRoutes
routeKey = obj.Namespace
if parent.OriginalReference.Port != nil {
routes = augmentPortMatch(routes, *parent.OriginalReference.Port)
routeKey += fmt.Sprintf("/%d", *parent.OriginalReference.Port)
}
if parent.InternalKind == gvk.ServiceEntry {
vsHosts = serviceEntryHosts(ctx.ServiceEntry,
string(parent.OriginalReference.Name),
string(ptr.OrDefault(parent.OriginalReference.Namespace, k8s.Namespace(obj.Namespace))))
} else {
vsHosts = []string{fmt.Sprintf("%s.%s.svc.%s",
parent.OriginalReference.Name, ptr.OrDefault(parent.OriginalReference.Namespace, k8s.Namespace(obj.Namespace)), ctx.Domain)}
}
}
if len(routes) == 0 {
continue
}
if _, f := routeMap[routeKey]; !f {
routeMap[routeKey] = make(map[string]*config.Config)
}
// Create one VS per hostname with a single hostname.
// This ensures we can treat each hostname independently, as the spec requires
for _, h := range vsHosts {
if cfg := routeMap[routeKey][h]; cfg != nil {
// merge http routes
vs := cfg.Spec.(*istio.VirtualService)
vs.Http = append(vs.Http, routes...)
// append parents
cfg.Annotations[constants.InternalParentNames] = fmt.Sprintf("%s,%s/%s.%s",
cfg.Annotations[constants.InternalParentNames], obj.GroupVersionKind.Kind, obj.Name, obj.Namespace)
} else {
name := fmt.Sprintf("%s-%d-%s", obj.Name, count, constants.KubernetesGatewayName)
routeMap[routeKey][h] = &config.Config{
Meta: config.Meta{
CreationTimestamp: obj.CreationTimestamp,
GroupVersionKind: gvk.VirtualService,
Name: name,
Annotations: routeMeta(obj),
Namespace: obj.Namespace,
Domain: ctx.Domain,
},
Spec: &istio.VirtualService{
Hosts: []string{h},
Gateways: []string{parent.InternalName},
Http: routes,
},
}
count++
}
}
}
for _, vsByHost := range gatewayRoutes {
for _, cfg := range vsByHost {
vs := cfg.Spec.(*istio.VirtualService)
sortHTTPRoutes(vs.Http)
}
}
for _, vsByHost := range meshRoutes {
for _, cfg := range vsByHost {
vs := cfg.Spec.(*istio.VirtualService)
sortHTTPRoutes(vs.Http)
}
}
}
func serviceEntryHosts(ses []config.Config, name, namespace string) []string {
for _, obj := range ses {
if obj.Meta.Name == name {
ns := obj.Meta.Namespace
if ns == "" {
ns = metav1.NamespaceDefault
}
if ns == namespace {
se := obj.Spec.(*istio.ServiceEntry)
return se.Hosts
}
}
}
return []string{}
}
func buildMeshAndGatewayRoutes[T any](parentRefs []routeParentReference, convertRules func(mesh bool) T) (T, T) {
var meshResult, gwResult T
needMesh, needGw := parentTypes(parentRefs)
if needMesh {
meshResult = convertRules(true)
}
if needGw {
gwResult = convertRules(false)
}
return meshResult, gwResult
}
func augmentPortMatch(routes []*istio.HTTPRoute, port k8sv1.PortNumber) []*istio.HTTPRoute {
res := make([]*istio.HTTPRoute, 0, len(routes))
for _, r := range routes {
r = r.DeepCopy()
for _, m := range r.Match {
m.Port = uint32(port)
}
if len(r.Match) == 0 {
r.Match = []*istio.HTTPMatchRequest{{
Port: uint32(port),
}}
}
res = append(res, r)
}
return res
}
func augmentTCPPortMatch(routes []*istio.TCPRoute, port k8sv1.PortNumber) []*istio.TCPRoute {
res := make([]*istio.TCPRoute, 0, len(routes))
for _, r := range routes {
r = r.DeepCopy()
for _, m := range r.Match {
m.Port = uint32(port)
}
if len(r.Match) == 0 {
r.Match = []*istio.L4MatchAttributes{{
Port: uint32(port),
}}
}
res = append(res, r)
}
return res
}
func augmentTLSPortMatch(routes []*istio.TLSRoute, port *k8sv1.PortNumber, parentHosts []string) []*istio.TLSRoute {
res := make([]*istio.TLSRoute, 0, len(routes))
for _, r := range routes {
r = r.DeepCopy()
if len(r.Match) == 1 && slices.Equal(r.Match[0].SniHosts, []string{"*"}) {
// For mesh, we use parent hosts for SNI if TLSRroute.hostnames were not specified.
r.Match[0].SniHosts = parentHosts
}
for _, m := range r.Match {
if port != nil {
m.Port = uint32(*port)
}
}
res = append(res, r)
}
return res
}
func compatibleRoutesForHost(routes []*istio.TLSRoute, parentHost string) []*istio.TLSRoute {
res := make([]*istio.TLSRoute, 0, len(routes))
for _, r := range routes {
if len(r.Match) == 1 && len(r.Match[0].SniHosts) > 1 {
r = r.DeepCopy()
sniHosts := []string{}
for _, h := range r.Match[0].SniHosts {
if host.Name(parentHost).Matches(host.Name(h)) {
sniHosts = append(sniHosts, h)
}
}
r.Match[0].SniHosts = sniHosts
}
res = append(res, r)
}
return res
}
func buildGRPCVirtualServices(
ctx configContext,
obj config.Config,
gatewayRoutes map[string]map[string]*config.Config,
meshRoutes map[string]map[string]*config.Config,
) {
route := obj.Spec.(*k8s.GRPCRouteSpec)
parentRefs := extractParentReferenceInfo(ctx.GatewayReferences, route.ParentRefs, route.Hostnames, gvk.GRPCRoute, obj.Namespace)
reportStatus := func(results []RouteParentResult) {
obj.Status.(*kstatus.WrappedStatus).Mutate(func(s config.Status) config.Status {
rs := s.(*k8s.GRPCRouteStatus)
rs.Parents = createRouteStatus(results, obj, rs.Parents)
return rs
})
}
type conversionResult struct {
error *ConfigError
routes []*istio.HTTPRoute
}
convertRules := func(mesh bool) conversionResult {
res := conversionResult{}
for n, r := range route.Rules {
// split the rule to make sure each rule has up to one match
matches := slices.Reference(r.Matches)
if len(matches) == 0 {
matches = append(matches, nil)
}
for _, m := range matches {
if m != nil {
r.Matches = []k8s.GRPCRouteMatch{*m}
}
vs, err := convertGRPCRoute(r, ctx, obj, n, !mesh)
// This was a hard error
if vs == nil {
res.error = err
return conversionResult{error: err}
}
// Got an error but also routes
if err != nil {
res.error = err
}
res.routes = append(res.routes, vs)
}
}
return res
}
meshResult, gwResult := buildMeshAndGatewayRoutes(parentRefs, convertRules)
reportStatus(slices.Map(parentRefs, func(r routeParentReference) RouteParentResult {
res := RouteParentResult{
OriginalReference: r.OriginalReference,
DeniedReason: r.DeniedReason,
RouteError: gwResult.error,
}
if r.IsMesh() {
res.RouteError = meshResult.error
}
return res
}))
count := 0
for _, parent := range filteredReferences(parentRefs) {
// for gateway routes, build one VS per gateway+host
routeMap := gatewayRoutes
routeKey := parent.InternalName
vsHosts := hostnameToStringList(route.Hostnames)
routes := gwResult.routes
if parent.IsMesh() {
routes = meshResult.routes
// for mesh routes, build one VS per namespace/port->host
routeMap = meshRoutes
routeKey = obj.Namespace
if parent.OriginalReference.Port != nil {
routes = augmentPortMatch(routes, *parent.OriginalReference.Port)
routeKey += fmt.Sprintf("/%d", *parent.OriginalReference.Port)
}
if parent.InternalKind == gvk.ServiceEntry {
vsHosts = serviceEntryHosts(ctx.ServiceEntry,
string(parent.OriginalReference.Name),
string(ptr.OrDefault(parent.OriginalReference.Namespace, k8s.Namespace(obj.Namespace))))
} else {
vsHosts = []string{fmt.Sprintf("%s.%s.svc.%s",
parent.OriginalReference.Name, ptr.OrDefault(parent.OriginalReference.Namespace, k8s.Namespace(obj.Namespace)), ctx.Domain)}
}
}
if len(routes) == 0 {
continue
}
if _, f := routeMap[routeKey]; !f {
routeMap[routeKey] = make(map[string]*config.Config)
}
// Create one VS per hostname with a single hostname.
// This ensures we can treat each hostname independently, as the spec requires
for _, h := range vsHosts {
if cfg := routeMap[routeKey][h]; cfg != nil {
// merge http routes
vs := cfg.Spec.(*istio.VirtualService)
vs.Http = append(vs.Http, routes...)
// append parents
cfg.Annotations[constants.InternalParentNames] = fmt.Sprintf("%s,%s/%s.%s",
cfg.Annotations[constants.InternalParentNames], obj.GroupVersionKind.Kind, obj.Name, obj.Namespace)
} else {
name := fmt.Sprintf("%s-%d-%s", obj.Name, count, constants.KubernetesGatewayName)
routeMap[routeKey][h] = &config.Config{
Meta: config.Meta{
CreationTimestamp: obj.CreationTimestamp,
GroupVersionKind: gvk.VirtualService,
Name: name,
Annotations: routeMeta(obj),
Namespace: obj.Namespace,
Domain: ctx.Domain,
},
Spec: &istio.VirtualService{
Hosts: []string{h},
Gateways: []string{parent.InternalName},
Http: routes,
},
}
count++
}
}
}
for _, vsByHost := range gatewayRoutes {
for _, cfg := range vsByHost {
vs := cfg.Spec.(*istio.VirtualService)
sortHTTPRoutes(vs.Http)
}
}
for _, vsByHost := range meshRoutes {
for _, cfg := range vsByHost {
vs := cfg.Spec.(*istio.VirtualService)
sortHTTPRoutes(vs.Http)
}
}
}
func routeMeta(obj config.Config) map[string]string {
m := parentMeta(obj, nil)
m[constants.InternalRouteSemantics] = constants.RouteSemanticsGateway
return m
}
// sortHTTPRoutes sorts generated vs routes to meet gateway-api requirements
// see https://gateway-api.sigs.k8s.io/v1alpha2/references/spec/#gateway.networking.k8s.io/v1alpha2.HTTPRouteRule
func sortHTTPRoutes(routes []*istio.HTTPRoute) {
sort.SliceStable(routes, func(i, j int) bool {
if len(routes[i].Match) == 0 {
return false
} else if len(routes[j].Match) == 0 {
return true
}
// Only look at match[0], we always generate only one match
m1, m2 := routes[i].Match[0], routes[j].Match[0]
r1, r2 := getURIRank(m1), getURIRank(m2)
len1, len2 := getURILength(m1), getURILength(m2)
switch {
// 1: Exact/Prefix/Regex
case r1 != r2:
return r1 > r2
case len1 != len2:
return len1 > len2
// 2: method math
case (m1.Method == nil) != (m2.Method == nil):
return m1.Method != nil
// 3: number of header matches
case len(m1.Headers) != len(m2.Headers):
return len(m1.Headers) > len(m2.Headers)
// 4: number of query matches
default:
return len(m1.QueryParams) > len(m2.QueryParams)
}
})
}
// getURIRank ranks a URI match type. Exact > Prefix > Regex
func getURIRank(match *istio.HTTPMatchRequest) int {
if match.Uri == nil {
return -1
}
switch match.Uri.MatchType.(type) {
case *istio.StringMatch_Exact:
return 3
case *istio.StringMatch_Prefix:
return 2
case *istio.StringMatch_Regex:
return 1
}
// should not happen
return -1
}
func getURILength(match *istio.HTTPMatchRequest) int {
if match.Uri == nil {
return 0
}
switch match.Uri.MatchType.(type) {
case *istio.StringMatch_Prefix:
return len(match.Uri.GetPrefix())
case *istio.StringMatch_Exact:
return len(match.Uri.GetExact())
case *istio.StringMatch_Regex:
return len(match.Uri.GetRegex())
}
// should not happen
return -1
}
func parentMeta(obj config.Config, sectionName *k8s.SectionName) map[string]string {
name := fmt.Sprintf("%s/%s.%s", obj.GroupVersionKind.Kind, obj.Name, obj.Namespace)
if sectionName != nil {
name = fmt.Sprintf("%s/%s/%s.%s", obj.GroupVersionKind.Kind, obj.Name, *sectionName, obj.Namespace)
}
return map[string]string{
constants.InternalParentNames: name,
}
}
func hostnameToStringList(h []k8s.Hostname) []string {
// In the Istio API, empty hostname is not allowed. In the Kubernetes API hosts means "any"
if len(h) == 0 {
return []string{"*"}
}
return slices.Map(h, func(e k8s.Hostname) string {
return string(e)
})
}
func toInternalParentReference(p k8s.ParentReference, localNamespace string) (parentKey, error) {
empty := parentKey{}
kind := ptr.OrDefault((*string)(p.Kind), gvk.KubernetesGateway.Kind)
group := ptr.OrDefault((*string)(p.Group), gvk.KubernetesGateway.Group)
var ik config.GroupVersionKind
var ns string
// Currently supported types are Gateway, Service, and ServiceEntry
if kind == gvk.KubernetesGateway.Kind && group == gvk.KubernetesGateway.Group {
ik = gvk.KubernetesGateway
} else if kind == gvk.Service.Kind && group == gvk.Service.Group {
ik = gvk.Service
} else if kind == gvk.ServiceEntry.Kind && group == gvk.ServiceEntry.Group {
ik = gvk.ServiceEntry
} else {
return empty, fmt.Errorf("unsupported parentKey: %v/%v", p.Group, kind)
}
// Unset namespace means "same namespace"
ns = ptr.OrDefault((*string)(p.Namespace), localNamespace)
return parentKey{
Kind: ik,
Name: string(p.Name),
Namespace: ns,
}, nil
}
func referenceAllowed(
parent *parentInfo,
routeKind config.GroupVersionKind,
parentRef parentReference,
hostnames []k8s.Hostname,
namespace string,
) *ParentError {
if parentRef.Kind == gvk.Service || parentRef.Kind == gvk.ServiceEntry {
// TODO: check if the service reference is valid
if false {
return &ParentError{
Reason: ParentErrorParentRefConflict,
Message: fmt.Sprintf("parent service: %q is invalid", parentRef.Name),
}
}
} else {
// First, check section and port apply. This must come first
if parentRef.Port != 0 && parentRef.Port != parent.Port {
return &ParentError{
Reason: ParentErrorNotAccepted,
Message: fmt.Sprintf("port %v not found", parentRef.Port),
}
}
if len(parentRef.SectionName) > 0 && parentRef.SectionName != parent.SectionName {
return &ParentError{
Reason: ParentErrorNotAccepted,
Message: fmt.Sprintf("sectionName %q not found", parentRef.SectionName),
}
}
// Next check the hostnames are a match. This is a bi-directional wildcard match. Only one route
// hostname must match for it to be allowed (but the others will be filtered at runtime)
// If either is empty its treated as a wildcard which always matches
if len(hostnames) == 0 {
hostnames = []k8s.Hostname{"*"}
}
if len(parent.Hostnames) > 0 {
// TODO: the spec actually has a label match, not a string match. That is, *.com does not match *.apple.com
// We are doing a string match here
matched := false
hostMatched := false
out:
for _, routeHostname := range hostnames {
for _, parentHostNamespace := range parent.Hostnames {
spl := strings.Split(parentHostNamespace, "/")
parentNamespace, parentHostname := spl[0], spl[1]
hostnameMatch := host.Name(parentHostname).Matches(host.Name(routeHostname))
namespaceMatch := parentNamespace == "*" || parentNamespace == namespace
hostMatched = hostMatched || hostnameMatch
if hostnameMatch && namespaceMatch {
matched = true
break out
}
}
}
if !matched {
if hostMatched {
return &ParentError{
Reason: ParentErrorNotAllowed,
Message: fmt.Sprintf(
"hostnames matched parent hostname %q, but namespace %q is not allowed by the parent",
parent.OriginalHostname, namespace,
),
}
}
return &ParentError{
Reason: ParentErrorNoHostname,
Message: fmt.Sprintf(
"no hostnames matched parent hostname %q",
parent.OriginalHostname,
),
}
}
}
}
// Also make sure this route kind is allowed
matched := false
for _, ak := range parent.AllowedKinds {
if string(ak.Kind) == routeKind.Kind && ptr.OrDefault((*string)(ak.Group), gvk.GatewayClass.Group) == routeKind.Group {
matched = true
break
}
}
if !matched {
return &ParentError{
Reason: ParentErrorNotAllowed,
Message: fmt.Sprintf("kind %v is not allowed", routeKind),
}
}
return nil
}
func extractParentReferenceInfo(gateways map[parentKey][]*parentInfo, routeRefs []k8s.ParentReference,
hostnames []k8s.Hostname, kind config.GroupVersionKind, localNamespace string,
) []routeParentReference {
parentRefs := []routeParentReference{}
for _, ref := range routeRefs {
ir, err := toInternalParentReference(ref, localNamespace)
if err != nil {
// Cannot handle the reference. Maybe it is for another controller, so we just ignore it
continue
}
pk := parentReference{
parentKey: ir,
SectionName: ptr.OrEmpty(ref.SectionName),
Port: ptr.OrEmpty(ref.Port),
}
appendParent := func(pr *parentInfo, pk parentReference) {
rpi := routeParentReference{
InternalName: pr.InternalName,
InternalKind: ir.Kind,
Hostname: pr.OriginalHostname,
DeniedReason: referenceAllowed(pr, kind, pk, hostnames, localNamespace),
OriginalReference: ref,
}
if rpi.DeniedReason == nil {
// Record that we were able to bind to the parent
pr.AttachedRoutes++
}
parentRefs = append(parentRefs, rpi)
}
gk := ir
if ir.Kind == gvk.Service || ir.Kind == gvk.ServiceEntry {
gk = meshParentKey
}
for _, gw := range gateways[gk] {
// Append all matches. Note we may be adding mismatch section or ports; this is handled later
appendParent(gw, pk)
}
}
// Ensure stable order
slices.SortBy(parentRefs, func(a routeParentReference) string {
return parentRefString(a.OriginalReference)
})
return parentRefs
}
func buildTCPVirtualService(ctx configContext, obj config.Config) []config.Config {
route := obj.Spec.(*k8s.TCPRouteSpec)
parentRefs := extractParentReferenceInfo(ctx.GatewayReferences, route.ParentRefs, nil, gvk.TCPRoute, obj.Namespace)
reportStatus := func(results []RouteParentResult) {
obj.Status.(*kstatus.WrappedStatus).Mutate(func(s config.Status) config.Status {
rs := s.(*k8s.TCPRouteStatus)
rs.Parents = createRouteStatus(results, obj, rs.Parents)
return rs
})
}
type conversionResult struct {
error *ConfigError
routes []*istio.TCPRoute
}
convertRules := func(mesh bool) conversionResult {
res := conversionResult{}
for _, r := range route.Rules {
vs, err := convertTCPRoute(ctx, r, obj, !mesh)
// This was a hard error
if vs == nil {
res.error = err
return conversionResult{error: err}
}
// Got an error but also routes
if err != nil {
res.error = err
}
res.routes = append(res.routes, vs)
}
return res
}
meshResult, gwResult := buildMeshAndGatewayRoutes(parentRefs, convertRules)
reportStatus(slices.Map(parentRefs, func(r routeParentReference) RouteParentResult {
res := RouteParentResult{
OriginalReference: r.OriginalReference,
DeniedReason: r.DeniedReason,
RouteError: gwResult.error,
}
if r.IsMesh() {
res.RouteError = meshResult.error
}
return res
}))
vs := []config.Config{}
for _, parent := range filteredReferences(parentRefs) {
routes := gwResult.routes
vsHosts := []string{"*"}
if parent.IsMesh() {
routes = meshResult.routes
if parent.OriginalReference.Port != nil {
routes = augmentTCPPortMatch(routes, *parent.OriginalReference.Port)
}
if parent.InternalKind == gvk.ServiceEntry {
vsHosts = serviceEntryHosts(ctx.ServiceEntry,
string(parent.OriginalReference.Name),
string(ptr.OrDefault(parent.OriginalReference.Namespace, k8s.Namespace(obj.Namespace))))
} else {
vsHosts = []string{fmt.Sprintf("%s.%s.svc.%s",
parent.OriginalReference.Name, ptr.OrDefault(parent.OriginalReference.Namespace, k8s.Namespace(obj.Namespace)), ctx.Domain)}
}
}
for i, host := range vsHosts {
name := fmt.Sprintf("%s-tcp-%d-%s", obj.Name, i, constants.KubernetesGatewayName)
// Create one VS per hostname with a single hostname.
// This ensures we can treat each hostname independently, as the spec requires
vs = append(vs, config.Config{
Meta: config.Meta{
CreationTimestamp: obj.CreationTimestamp,
GroupVersionKind: gvk.VirtualService,
Name: name,
Annotations: routeMeta(obj),
Namespace: obj.Namespace,
Domain: ctx.Domain,
},
Spec: &istio.VirtualService{
// We can use wildcard here since each listener can have at most one route bound to it, so we have
// a single VS per Gateway.
Hosts: []string{host},
Gateways: []string{parent.InternalName},
Tcp: routes,
},
})
}
}
return vs
}
func buildTLSVirtualService(ctx configContext, obj config.Config) []config.Config {
route := obj.Spec.(*k8s.TLSRouteSpec)
parentRefs := extractParentReferenceInfo(ctx.GatewayReferences, route.ParentRefs, nil, gvk.TLSRoute, obj.Namespace)
reportStatus := func(results []RouteParentResult) {
obj.Status.(*kstatus.WrappedStatus).Mutate(func(s config.Status) config.Status {
rs := s.(*k8s.TLSRouteStatus)
rs.Parents = createRouteStatus(results, obj, rs.Parents)
return rs
})
}
type conversionResult struct {
error *ConfigError
routes []*istio.TLSRoute
}
convertRules := func(mesh bool) conversionResult {
res := conversionResult{}
for _, r := range route.Rules {
vs, err := convertTLSRoute(ctx, r, obj, !mesh)
// This was a hard error
if vs == nil {
res.error = err
return conversionResult{error: err}
}
// Got an error but also routes
if err != nil {
res.error = err
}
res.routes = append(res.routes, vs)
}
return res
}
meshResult, gwResult := buildMeshAndGatewayRoutes(parentRefs, convertRules)
reportStatus(slices.Map(parentRefs, func(r routeParentReference) RouteParentResult {
res := RouteParentResult{
OriginalReference: r.OriginalReference,
DeniedReason: r.DeniedReason,
RouteError: gwResult.error,
}
if r.IsMesh() {
res.RouteError = meshResult.error
}
return res
}))
vs := []config.Config{}
for _, parent := range filteredReferences(parentRefs) {
routes := gwResult.routes
vsHosts := hostnameToStringList(route.Hostnames)
if parent.IsMesh() {
routes = meshResult.routes
if parent.InternalKind == gvk.ServiceEntry {
vsHosts = serviceEntryHosts(ctx.ServiceEntry,
string(parent.OriginalReference.Name),
string(ptr.OrDefault(parent.OriginalReference.Namespace, k8s.Namespace(obj.Namespace))))
} else {
host := fmt.Sprintf("%s.%s.svc.%s",
parent.OriginalReference.Name, ptr.OrDefault(parent.OriginalReference.Namespace, k8s.Namespace(obj.Namespace)), ctx.Domain)
vsHosts = []string{host}
}
routes = augmentTLSPortMatch(routes, parent.OriginalReference.Port, vsHosts)
}
for i, host := range vsHosts {
name := fmt.Sprintf("%s-tls-%d-%s", obj.Name, i, constants.KubernetesGatewayName)
filteredRoutes := routes
if parent.IsMesh() {
filteredRoutes = compatibleRoutesForHost(routes, host)
}
// Create one VS per hostname with a single hostname.
// This ensures we can treat each hostname independently, as the spec requires
vs = append(vs, config.Config{
Meta: config.Meta{
CreationTimestamp: obj.CreationTimestamp,
GroupVersionKind: gvk.VirtualService,
Name: name,
Annotations: routeMeta(obj),
Namespace: obj.Namespace,
Domain: ctx.Domain,
},
Spec: &istio.VirtualService{
Hosts: []string{host},
Gateways: []string{parent.InternalName},
Tls: filteredRoutes,
},
})
}
}
return vs
}
func convertTCPRoute(ctx configContext, r k8s.TCPRouteRule, obj config.Config, enforceRefGrant bool) (*istio.TCPRoute, *ConfigError) {
if tcpWeightSum(r.BackendRefs) == 0 {
// The spec requires us to reject connections when there are no >0 weight backends
// We don't have a great way to do it. TODO: add a fault injection API for TCP?
return &istio.TCPRoute{
Route: []*istio.RouteDestination{{
Destination: &istio.Destination{
Host: "internal.cluster.local",
Subset: "zero-weight",
Port: &istio.PortSelector{Number: 65535},
},
Weight: 0,
}},
}, nil
}
dest, backendErr, err := buildTCPDestination(ctx, r.BackendRefs, obj.Namespace, enforceRefGrant, gvk.TCPRoute)
if err != nil {
return nil, err
}
return &istio.TCPRoute{
Route: dest,
}, backendErr
}
func convertTLSRoute(ctx configContext, r k8s.TLSRouteRule, obj config.Config, enforceRefGrant bool) (*istio.TLSRoute, *ConfigError) {
if tcpWeightSum(r.BackendRefs) == 0 {
// The spec requires us to reject connections when there are no >0 weight backends
// We don't have a great way to do it. TODO: add a fault injection API for TCP?
return &istio.TLSRoute{
Route: []*istio.RouteDestination{{
Destination: &istio.Destination{
Host: "internal.cluster.local",
Subset: "zero-weight",
Port: &istio.PortSelector{Number: 65535},
},
Weight: 0,
}},
}, nil
}
dest, backendErr, err := buildTCPDestination(ctx, r.BackendRefs, obj.Namespace, enforceRefGrant, gvk.TLSRoute)
if err != nil {
return nil, err
}
return &istio.TLSRoute{
Match: buildTLSMatch(obj.Spec.(*k8s.TLSRouteSpec).Hostnames),
Route: dest,
}, backendErr
}
func buildTCPDestination(
ctx configContext,
forwardTo []k8s.BackendRef,
ns string,
enforceRefGrant bool,
k config.GroupVersionKind,
) ([]*istio.RouteDestination, *ConfigError, *ConfigError) {
if forwardTo == nil {
return nil, nil, nil
}
weights := []int{}
action := []k8s.BackendRef{}
for _, w := range forwardTo {
wt := int(ptr.OrDefault(w.Weight, 1))
if wt == 0 {
continue
}
action = append(action, w)
weights = append(weights, wt)
}
if len(weights) == 1 {
weights = []int{0}
}
var invalidBackendErr *ConfigError
res := []*istio.RouteDestination{}
for i, fwd := range action {
dst, err := buildDestination(ctx, fwd, ns, enforceRefGrant, k)
if err != nil {
if isInvalidBackend(err) {
invalidBackendErr = err
// keep going, we will gracefully drop invalid backends
} else {
return nil, nil, err
}
}
res = append(res, &istio.RouteDestination{
Destination: dst,
Weight: int32(weights[i]),
})
}
return res, invalidBackendErr, nil
}
func buildTLSMatch(hostnames []k8s.Hostname) []*istio.TLSMatchAttributes {
// Currently, the spec only supports extensions beyond hostname, which are not currently implemented by Istio.
return []*istio.TLSMatchAttributes{{
SniHosts: hostnamesToStringListWithWildcard(hostnames),
}}
}
func hostnamesToStringListWithWildcard(h []k8s.Hostname) []string {
if len(h) == 0 {
return []string{"*"}
}
res := make([]string, 0, len(h))
for _, i := range h {
res = append(res, string(i))
}
return res
}
func weightSum(forwardTo []k8s.HTTPBackendRef) int {
sum := int32(0)
for _, w := range forwardTo {
sum += ptr.OrDefault(w.Weight, 1)
}
return int(sum)
}
func grpcWeightSum(forwardTo []k8s.GRPCBackendRef) int {
sum := int32(0)
for _, w := range forwardTo {
sum += ptr.OrDefault(w.Weight, 1)
}
return int(sum)
}
func tcpWeightSum(forwardTo []k8s.BackendRef) int {
sum := int32(0)
for _, w := range forwardTo {
sum += ptr.OrDefault(w.Weight, 1)
}
return int(sum)
}
func buildHTTPDestination(
ctx configContext,
forwardTo []k8s.HTTPBackendRef,
ns string,
enforceRefGrant bool,
) ([]*istio.HTTPRouteDestination, *ConfigError, *ConfigError) {
if forwardTo == nil {
return nil, nil, nil
}
weights := []int{}
action := []k8s.HTTPBackendRef{}
for _, w := range forwardTo {
wt := int(ptr.OrDefault(w.Weight, 1))
if wt == 0 {
continue
}
action = append(action, w)
weights = append(weights, wt)
}
if len(weights) == 1 {
weights = []int{0}
}
var invalidBackendErr *ConfigError
res := []*istio.HTTPRouteDestination{}
for i, fwd := range action {
dst, err := buildDestination(ctx, fwd.BackendRef, ns, enforceRefGrant, gvk.HTTPRoute)
if err != nil {
if isInvalidBackend(err) {
invalidBackendErr = err
// keep going, we will gracefully drop invalid backends
} else {
return nil, nil, err
}
}
rd := &istio.HTTPRouteDestination{
Destination: dst,
Weight: int32(weights[i]),
}
for _, filter := range fwd.Filters {
switch filter.Type {
case k8sv1.HTTPRouteFilterRequestHeaderModifier:
h := createHeadersFilter(filter.RequestHeaderModifier)
if h == nil {
continue
}
if rd.Headers == nil {
rd.Headers = &istio.Headers{}
}
rd.Headers.Request = h
case k8sv1.HTTPRouteFilterResponseHeaderModifier:
h := createHeadersFilter(filter.ResponseHeaderModifier)
if h == nil {
continue
}
if rd.Headers == nil {
rd.Headers = &istio.Headers{}
}
rd.Headers.Response = h
default:
return nil, nil, &ConfigError{Reason: InvalidFilter, Message: fmt.Sprintf("unsupported filter type %q", filter.Type)}
}
}
res = append(res, rd)
}
return res, invalidBackendErr, nil
}
func buildGRPCDestination(
ctx configContext,
forwardTo []k8s.GRPCBackendRef,
ns string,
enforceRefGrant bool,
) ([]*istio.HTTPRouteDestination, *ConfigError, *ConfigError) {
if forwardTo == nil {
return nil, nil, nil
}
weights := []int{}
action := []k8s.GRPCBackendRef{}
for _, w := range forwardTo {
wt := int(ptr.OrDefault(w.Weight, 1))
if wt == 0 {
continue
}
action = append(action, w)
weights = append(weights, wt)
}
if len(weights) == 1 {
weights = []int{0}
}
var invalidBackendErr *ConfigError
res := []*istio.HTTPRouteDestination{}
for i, fwd := range action {
dst, err := buildDestination(ctx, fwd.BackendRef, ns, enforceRefGrant, gvk.GRPCRoute)
if err != nil {
if isInvalidBackend(err) {
invalidBackendErr = err
// keep going, we will gracefully drop invalid backends
} else {
return nil, nil, err
}
}
rd := &istio.HTTPRouteDestination{
Destination: dst,
Weight: int32(weights[i]),
}
for _, filter := range fwd.Filters {
switch filter.Type {
case k8s.GRPCRouteFilterRequestHeaderModifier:
h := createHeadersFilter(filter.RequestHeaderModifier)
if h == nil {
continue
}
if rd.Headers == nil {
rd.Headers = &istio.Headers{}
}
rd.Headers.Request = h
case k8s.GRPCRouteFilterResponseHeaderModifier:
h := createHeadersFilter(filter.ResponseHeaderModifier)
if h == nil {
continue
}
if rd.Headers == nil {
rd.Headers = &istio.Headers{}
}
rd.Headers.Response = h
default:
return nil, nil, &ConfigError{Reason: InvalidFilter, Message: fmt.Sprintf("unsupported filter type %q", filter.Type)}
}
}
res = append(res, rd)
}
return res, invalidBackendErr, nil
}
func buildDestination(ctx configContext, to k8s.BackendRef, ns string, enforceRefGrant bool, k config.GroupVersionKind) (*istio.Destination, *ConfigError) {
// check if the reference is allowed
if enforceRefGrant {
refs := ctx.AllowedReferences
if toNs := to.Namespace; toNs != nil && string(*toNs) != ns {
if !refs.BackendAllowed(k, to.Name, *toNs, ns) {
return &istio.Destination{}, &ConfigError{
Reason: InvalidDestinationPermit,
Message: fmt.Sprintf("backendRef %v/%v not accessible to a %s in namespace %q (missing a ReferenceGrant?)", to.Name, *toNs, k.Kind, ns),
}
}
}
}
namespace := ptr.OrDefault((*string)(to.Namespace), ns)
var invalidBackendErr *ConfigError
if nilOrEqual((*string)(to.Group), "") && nilOrEqual((*string)(to.Kind), gvk.Service.Kind) {
// Service
if to.Port == nil {
// "Port is required when the referent is a Kubernetes Service."
return nil, &ConfigError{Reason: InvalidDestination, Message: "port is required in backendRef"}
}
if strings.Contains(string(to.Name), ".") {
return nil, &ConfigError{Reason: InvalidDestination, Message: "serviceName invalid; the name of the Service must be used, not the hostname."}
}
hostname := fmt.Sprintf("%s.%s.svc.%s", to.Name, namespace, ctx.Domain)
if ctx.Context.GetService(hostname, namespace) == nil {
invalidBackendErr = &ConfigError{Reason: InvalidDestinationNotFound, Message: fmt.Sprintf("backend(%s) not found", hostname)}
}
return &istio.Destination{
// TODO: implement ReferencePolicy for cross namespace
Host: hostname,
Port: &istio.PortSelector{Number: uint32(*to.Port)},
}, invalidBackendErr
}
if nilOrEqual((*string)(to.Group), features.MCSAPIGroup) && nilOrEqual((*string)(to.Kind), "ServiceImport") {
// Service import
hostname := fmt.Sprintf("%s.%s.svc.clusterset.local", to.Name, namespace)
if !features.EnableMCSHost {
// They asked for ServiceImport, but actually don't have full support enabled...
// No problem, we can just treat it as Service, which is already cross-cluster in this mode anyways
hostname = fmt.Sprintf("%s.%s.svc.%s", to.Name, namespace, ctx.Domain)
}
if to.Port == nil {
// We don't know where to send without port
return nil, &ConfigError{Reason: InvalidDestination, Message: "port is required in backendRef"}
}
if strings.Contains(string(to.Name), ".") {
return nil, &ConfigError{Reason: InvalidDestination, Message: "serviceName invalid; the name of the Service must be used, not the hostname."}
}
if ctx.Context.GetService(hostname, namespace) == nil {
invalidBackendErr = &ConfigError{Reason: InvalidDestinationNotFound, Message: fmt.Sprintf("backend(%s) not found", hostname)}
}
return &istio.Destination{
Host: hostname,
Port: &istio.PortSelector{Number: uint32(*to.Port)},
}, invalidBackendErr
}
if nilOrEqual((*string)(to.Group), gvk.ServiceEntry.Group) && nilOrEqual((*string)(to.Kind), "Hostname") {
// Hostname synthetic type
if to.Port == nil {
// We don't know where to send without port
return nil, &ConfigError{Reason: InvalidDestination, Message: "port is required in backendRef"}
}
if to.Namespace != nil {
return nil, &ConfigError{Reason: InvalidDestination, Message: "namespace may not be set with Hostname type"}
}
hostname := string(to.Name)
if ctx.Context.GetService(hostname, namespace) == nil {
invalidBackendErr = &ConfigError{Reason: InvalidDestinationNotFound, Message: fmt.Sprintf("backend(%s) not found", hostname)}
}
return &istio.Destination{
Host: string(to.Name),
Port: &istio.PortSelector{Number: uint32(*to.Port)},
}, invalidBackendErr
}
return &istio.Destination{}, &ConfigError{
Reason: InvalidDestinationKind,
Message: fmt.Sprintf("referencing unsupported backendRef: group %q kind %q", ptr.OrEmpty(to.Group), ptr.OrEmpty(to.Kind)),
}
}
// https://github.com/kubernetes-sigs/gateway-api/blob/cea484e38e078a2c1997d8c7a62f410a1540f519/apis/v1beta1/httproute_types.go#L207-L212
func isInvalidBackend(err *ConfigError) bool {
return err.Reason == InvalidDestinationPermit ||
err.Reason == InvalidDestinationNotFound ||
err.Reason == InvalidDestinationKind
}
func headerListToMap(hl []k8s.HTTPHeader) map[string]string {
if len(hl) == 0 {
return nil
}
res := map[string]string{}
for _, e := range hl {
k := strings.ToLower(string(e.Name))
if _, f := res[k]; f {
// "Subsequent entries with an equivalent header name MUST be ignored"
continue
}
res[k] = e.Value
}
return res
}
func createMirrorFilter(ctx configContext, filter *k8s.HTTPRequestMirrorFilter, ns string,
enforceRefGrant bool, k config.GroupVersionKind,
) (*istio.HTTPMirrorPolicy, *ConfigError) {
if filter == nil {
return nil, nil
}
var weightOne int32 = 1
dst, err := buildDestination(ctx, k8s.BackendRef{
BackendObjectReference: filter.BackendRef,
Weight: &weightOne,
}, ns, enforceRefGrant, k)
if err != nil {
return nil, err
}
return &istio.HTTPMirrorPolicy{Destination: dst}, nil
}
func createRewriteFilter(filter *k8s.HTTPURLRewriteFilter) *istio.HTTPRewrite {
if filter == nil {
return nil
}
rewrite := &istio.HTTPRewrite{}
if filter.Path != nil {
switch filter.Path.Type {
case k8sv1.PrefixMatchHTTPPathModifier:
rewrite.Uri = strings.TrimSuffix(*filter.Path.ReplacePrefixMatch, "/")
if rewrite.Uri == "" {
// `/` means removing the prefix
rewrite.Uri = "/"
}
case k8sv1.FullPathHTTPPathModifier:
rewrite.UriRegexRewrite = &istio.RegexRewrite{
Match: "/.*",
Rewrite: *filter.Path.ReplaceFullPath,
}
}
}
if filter.Hostname != nil {
rewrite.Authority = string(*filter.Hostname)
}
// Nothing done
if rewrite.Uri == "" && rewrite.UriRegexRewrite == nil && rewrite.Authority == "" {
return nil
}
return rewrite
}
func createRedirectFilter(filter *k8s.HTTPRequestRedirectFilter) *istio.HTTPRedirect {
if filter == nil {
return nil
}
resp := &istio.HTTPRedirect{}
if filter.StatusCode != nil {
// Istio allows 301, 302, 303, 307, 308.
// Gateway allows only 301 and 302.
resp.RedirectCode = uint32(*filter.StatusCode)
}
if filter.Hostname != nil {
resp.Authority = string(*filter.Hostname)
}
if filter.Scheme != nil {
// Both allow http and https
resp.Scheme = *filter.Scheme
}
if filter.Port != nil {
resp.RedirectPort = &istio.HTTPRedirect_Port{Port: uint32(*filter.Port)}
} else {
// "When empty, port (if specified) of the request is used."
// this differs from Istio default
if filter.Scheme != nil {
resp.RedirectPort = &istio.HTTPRedirect_DerivePort{DerivePort: istio.HTTPRedirect_FROM_PROTOCOL_DEFAULT}
} else {
resp.RedirectPort = &istio.HTTPRedirect_DerivePort{DerivePort: istio.HTTPRedirect_FROM_REQUEST_PORT}
}
}
if filter.Path != nil {
switch filter.Path.Type {
case k8sv1.FullPathHTTPPathModifier:
resp.Uri = *filter.Path.ReplaceFullPath
case k8sv1.PrefixMatchHTTPPathModifier:
resp.Uri = fmt.Sprintf("%%PREFIX()%%%s", *filter.Path.ReplacePrefixMatch)
}
}
return resp
}
func createHeadersFilter(filter *k8s.HTTPHeaderFilter) *istio.Headers_HeaderOperations {
if filter == nil {
return nil
}
return &istio.Headers_HeaderOperations{
Add: headerListToMap(filter.Add),
Remove: filter.Remove,
Set: headerListToMap(filter.Set),
}
}
// nolint: unparam
func createMethodMatch(match k8s.HTTPRouteMatch) (*istio.StringMatch, *ConfigError) {
if match.Method == nil {
return nil, nil
}
return &istio.StringMatch{
MatchType: &istio.StringMatch_Exact{Exact: string(*match.Method)},
}, nil
}
func createQueryParamsMatch(match k8s.HTTPRouteMatch) (map[string]*istio.StringMatch, *ConfigError) {
res := map[string]*istio.StringMatch{}
for _, qp := range match.QueryParams {
tp := k8sv1.QueryParamMatchExact
if qp.Type != nil {
tp = *qp.Type
}
switch tp {
case k8sv1.QueryParamMatchExact:
res[string(qp.Name)] = &istio.StringMatch{
MatchType: &istio.StringMatch_Exact{Exact: qp.Value},
}
case k8sv1.QueryParamMatchRegularExpression:
res[string(qp.Name)] = &istio.StringMatch{
MatchType: &istio.StringMatch_Regex{Regex: qp.Value},
}
default:
// Should never happen, unless a new field is added
return nil, &ConfigError{Reason: InvalidConfiguration, Message: fmt.Sprintf("unknown type: %q is not supported QueryParams type", tp)}
}
}
if len(res) == 0 {
return nil, nil
}
return res, nil
}
func createHeadersMatch(match k8s.HTTPRouteMatch) (map[string]*istio.StringMatch, *ConfigError) {
res := map[string]*istio.StringMatch{}
for _, header := range match.Headers {
tp := k8sv1.HeaderMatchExact
if header.Type != nil {
tp = *header.Type
}
switch tp {
case k8sv1.HeaderMatchExact:
res[string(header.Name)] = &istio.StringMatch{
MatchType: &istio.StringMatch_Exact{Exact: header.Value},
}
case k8sv1.HeaderMatchRegularExpression:
res[string(header.Name)] = &istio.StringMatch{
MatchType: &istio.StringMatch_Regex{Regex: header.Value},
}
default:
// Should never happen, unless a new field is added
return nil, &ConfigError{Reason: InvalidConfiguration, Message: fmt.Sprintf("unknown type: %q is not supported HeaderMatch type", tp)}
}
}
if len(res) == 0 {
return nil, nil
}
return res, nil
}
func createGRPCHeadersMatch(match k8s.GRPCRouteMatch) (map[string]*istio.StringMatch, *ConfigError) {
res := map[string]*istio.StringMatch{}
for _, header := range match.Headers {
tp := k8sv1.HeaderMatchExact
if header.Type != nil {
tp = *header.Type
}
switch tp {
case k8sv1.HeaderMatchExact:
res[string(header.Name)] = &istio.StringMatch{
MatchType: &istio.StringMatch_Exact{Exact: header.Value},
}
case k8sv1.HeaderMatchRegularExpression:
res[string(header.Name)] = &istio.StringMatch{
MatchType: &istio.StringMatch_Regex{Regex: header.Value},
}
default:
// Should never happen, unless a new field is added
return nil, &ConfigError{Reason: InvalidConfiguration, Message: fmt.Sprintf("unknown type: %q is not supported HeaderMatch type", tp)}
}
}
if len(res) == 0 {
return nil, nil
}
return res, nil
}
func createURIMatch(match k8s.HTTPRouteMatch) (*istio.StringMatch, *ConfigError) {
tp := k8sv1.PathMatchPathPrefix
if match.Path.Type != nil {
tp = *match.Path.Type
}
dest := "/"
if match.Path.Value != nil {
dest = *match.Path.Value
}
switch tp {
case k8sv1.PathMatchPathPrefix:
// "When specified, a trailing `/` is ignored."
if dest != "/" {
dest = strings.TrimSuffix(dest, "/")
}
return &istio.StringMatch{
MatchType: &istio.StringMatch_Prefix{Prefix: dest},
}, nil
case k8sv1.PathMatchExact:
return &istio.StringMatch{
MatchType: &istio.StringMatch_Exact{Exact: dest},
}, nil
case k8sv1.PathMatchRegularExpression:
return &istio.StringMatch{
MatchType: &istio.StringMatch_Regex{Regex: dest},
}, nil
default:
// Should never happen, unless a new field is added
return nil, &ConfigError{Reason: InvalidConfiguration, Message: fmt.Sprintf("unknown type: %q is not supported Path match type", tp)}
}
}
func createGRPCURIMatch(match k8s.GRPCRouteMatch) (*istio.StringMatch, *ConfigError) {
m := match.Method
if m == nil {
return nil, nil
}
tp := k8s.GRPCMethodMatchExact
if m.Type != nil {
tp = *m.Type
}
if m.Method == nil && m.Service == nil {
// Should never happen, invalid per spec
return nil, &ConfigError{Reason: InvalidConfiguration, Message: "gRPC match must have method or service defined"}
}
// gRPC format is /<Service>/<Method>. Since we don't natively understand this, convert to various string matches
switch tp {
case k8s.GRPCMethodMatchExact:
if m.Method == nil {
return &istio.StringMatch{
MatchType: &istio.StringMatch_Prefix{Prefix: fmt.Sprintf("/%s/", *m.Service)},
}, nil
}
if m.Service == nil {
return &istio.StringMatch{
MatchType: &istio.StringMatch_Regex{Regex: fmt.Sprintf("/[^/]+/%s", *m.Method)},
}, nil
}
return &istio.StringMatch{
MatchType: &istio.StringMatch_Exact{Exact: fmt.Sprintf("/%s/%s", *m.Service, *m.Method)},
}, nil
case k8s.GRPCMethodMatchRegularExpression:
if m.Method == nil {
return &istio.StringMatch{
MatchType: &istio.StringMatch_Regex{Regex: fmt.Sprintf("/%s/.+", *m.Service)},
}, nil
}
if m.Service == nil {
return &istio.StringMatch{
MatchType: &istio.StringMatch_Regex{Regex: fmt.Sprintf("/[^/]+/%s", *m.Method)},
}, nil
}
return &istio.StringMatch{
MatchType: &istio.StringMatch_Regex{Regex: fmt.Sprintf("/%s/%s", *m.Service, *m.Method)},
}, nil
default:
// Should never happen, unless a new field is added
return nil, &ConfigError{Reason: InvalidConfiguration, Message: fmt.Sprintf("unknown type: %q is not supported Path match type", tp)}
}
}
// getGatewayClass finds all gateway class that are owned by Istio
// Response is ClassName -> Controller type
func getGatewayClasses(r GatewayResources) map[string]k8s.GatewayController {
res := map[string]k8s.GatewayController{}
// Setup builtin ones - these can be overridden possibly
for name, controller := range builtinClasses {
res[string(name)] = controller
}
for _, obj := range r.GatewayClass {
gwc := obj.Spec.(*k8s.GatewayClassSpec)
_, known := classInfos[gwc.ControllerName]
if !known {
continue
}
res[obj.Name] = gwc.ControllerName
// Set status. If we created it, it may already be there. If not, set it again
obj.Status.(*kstatus.WrappedStatus).Mutate(func(s config.Status) config.Status {
gcs := s.(*k8s.GatewayClassStatus)
*gcs = GetClassStatus(gcs, obj.Generation)
return gcs
})
}
return res
}
// parentKey holds info about a parentRef (eg route binding to a Gateway). This is a mirror of
// k8s.ParentReference in a form that can be stored in a map
type parentKey struct {
Kind config.GroupVersionKind
// Name is the original name of the resource (eg Kubernetes Gateway name)
Name string
// Namespace is the namespace of the resource
Namespace string
}
type parentReference struct {
parentKey
SectionName k8s.SectionName
Port k8sv1.PortNumber
}
var meshGVK = config.GroupVersionKind{
Group: gvk.KubernetesGateway.Group,
Version: gvk.KubernetesGateway.Version,
Kind: "Mesh",
}
var meshParentKey = parentKey{
Kind: meshGVK,
Name: "istio",
}
type configContext struct {
GatewayResources
AllowedReferences AllowedReferences
GatewayReferences map[parentKey][]*parentInfo
// key: referenced resources(e.g. secrets), value: gateway-api resources(e.g. gateways)
resourceReferences map[model.ConfigKey][]model.ConfigKey
}
// parentInfo holds info about a "parent" - something that can be referenced as a ParentRef in the API.
// Today, this is just Gateway and Mesh.
type parentInfo struct {
// InternalName refers to the internal name we can reference it by. For example, "mesh" or "my-ns/my-gateway"
InternalName string
// AllowedKinds indicates which kinds can be admitted by this parent
AllowedKinds []k8s.RouteGroupKind
// Hostnames is the hostnames that must be match to reference to the parent. For gateway this is listener hostname
// Format is ns/hostname
Hostnames []string
// OriginalHostname is the unprocessed form of Hostnames; how it appeared in users' config
OriginalHostname string
// AttachedRoutes keeps track of how many routes are attached to this parent. This is tracked for status.
// Because this is mutate in the route generation, parentInfo must be passed as a pointer
AttachedRoutes int32
// ReportAttachedRoutes is a callback that should be triggered once all AttachedRoutes are computed, to
// actually store the attached route count in the status
ReportAttachedRoutes func()
SectionName k8s.SectionName
Port k8sv1.PortNumber
}
// routeParentReference holds information about a route's parent reference
type routeParentReference struct {
// InternalName refers to the internal name of the parent we can reference it by. For example, "mesh" or "my-ns/my-gateway"
InternalName string
// InternalKind is the Group/Kind of the parent
InternalKind config.GroupVersionKind
// DeniedReason, if present, indicates why the reference was not valid
DeniedReason *ParentError
// OriginalReference contains the original reference
OriginalReference k8s.ParentReference
// Hostname is the hostname match of the parent, if any
Hostname string
}
func (r routeParentReference) IsMesh() bool {
return r.InternalName == "mesh"
}
func filteredReferences(parents []routeParentReference) []routeParentReference {
ret := make([]routeParentReference, 0, len(parents))
for _, p := range parents {
if p.DeniedReason != nil {
// We should filter this out
continue
}
ret = append(ret, p)
}
// To ensure deterministic order, sort them
sort.Slice(ret, func(i, j int) bool {
return ret[i].InternalName < ret[j].InternalName
})
return ret
}
func getDefaultName(name string, kgw *k8s.GatewaySpec) string {
return fmt.Sprintf("%v-%v", name, kgw.GatewayClassName)
}
func convertGateways(r configContext) ([]config.Config, map[parentKey][]*parentInfo, sets.String) {
// result stores our generated Istio Gateways
result := []config.Config{}
// gwMap stores an index to access parentInfo (which corresponds to a Kubernetes Gateway)
gwMap := map[parentKey][]*parentInfo{}
// namespaceLabelReferences keeps track of all namespace label keys referenced by Gateways. This is
// used to ensure we handle namespace updates for those keys.
namespaceLabelReferences := sets.New[string]()
classes := getGatewayClasses(r.GatewayResources)
for _, obj := range r.Gateway {
obj := obj
kgw := obj.Spec.(*k8s.GatewaySpec)
controllerName, f := classes[string(kgw.GatewayClassName)]
if !f {
// No gateway class found, this may be meant for another controller; should be skipped.
continue
}
classInfo, f := classInfos[controllerName]
if !f {
continue
}
if classInfo.disableRouteGeneration {
// We found it, but don't want to handle this class
continue
}
servers := []*istio.Server{}
// Extract the addresses. A gateway will bind to a specific Service
gatewayServices, err := extractGatewayServices(r.GatewayResources, kgw, obj)
if len(gatewayServices) == 0 && err != nil {
// Short circuit if its a hard failure
reportGatewayStatus(r, obj, classInfo, gatewayServices, servers, err)
continue
}
for i, l := range kgw.Listeners {
i := i
namespaceLabelReferences.InsertAll(getNamespaceLabelReferences(l.AllowedRoutes)...)
server, programmed := buildListener(r, obj, l, i, controllerName)
servers = append(servers, server)
if controllerName == constants.ManagedGatewayMeshController {
// Waypoint doesn't actually convert the routes to VirtualServices
continue
}
meta := parentMeta(obj, &l.Name)
meta[constants.InternalGatewaySemantics] = constants.GatewaySemanticsGateway
meta[model.InternalGatewayServiceAnnotation] = strings.Join(gatewayServices, ",")
// Each listener generates an Istio Gateway with a single Server. This allows binding to a specific listener.
gatewayConfig := config.Config{
Meta: config.Meta{
CreationTimestamp: obj.CreationTimestamp,
GroupVersionKind: gvk.Gateway,
Name: kubeconfig.InternalGatewayName(obj.Name, string(l.Name)),
Annotations: meta,
Namespace: obj.Namespace,
Domain: r.Domain,
},
Spec: &istio.Gateway{
Servers: []*istio.Server{server},
},
}
ref := parentKey{
Kind: gvk.KubernetesGateway,
Name: obj.Name,
Namespace: obj.Namespace,
}
if _, f := gwMap[ref]; !f {
gwMap[ref] = []*parentInfo{}
}
allowed, _ := generateSupportedKinds(l)
pri := &parentInfo{
InternalName: obj.Namespace + "/" + gatewayConfig.Name,
AllowedKinds: allowed,
Hostnames: server.Hosts,
OriginalHostname: string(ptr.OrEmpty(l.Hostname)),
SectionName: l.Name,
Port: l.Port,
}
pri.ReportAttachedRoutes = func() {
reportListenerAttachedRoutes(i, obj, pri.AttachedRoutes)
}
gwMap[ref] = append(gwMap[ref], pri)
if programmed {
result = append(result, gatewayConfig)
}
}
// If "gateway.istio.io/alias-for" annotation is present, any Route
// that binds to the gateway will bind to its alias instead.
// The typical usage is when the original gateway is not managed by the gateway controller
// but the ( generated ) alias is. This allows people to build their own
// gateway controllers on top of Istio Gateway Controller.
if obj.Annotations != nil && obj.Annotations[gatewayAliasForAnnotationKey] != "" {
ref := parentKey{
Kind: gvk.KubernetesGateway,
Name: obj.Annotations[gatewayAliasForAnnotationKey],
Namespace: obj.Namespace,
}
alias := parentKey{
Kind: gvk.KubernetesGateway,
Name: obj.Name,
Namespace: obj.Namespace,
}
gwMap[ref] = gwMap[alias]
}
reportGatewayStatus(r, obj, classInfo, gatewayServices, servers, err)
}
// Insert a parent for Mesh references.
gwMap[meshParentKey] = []*parentInfo{
{
InternalName: "mesh",
// Mesh has no configurable AllowedKinds, so allow all supported
AllowedKinds: []k8s.RouteGroupKind{
{Group: (*k8s.Group)(ptr.Of(gvk.HTTPRoute.Group)), Kind: k8s.Kind(gvk.HTTPRoute.Kind)},
{Group: (*k8s.Group)(ptr.Of(gvk.GRPCRoute.Group)), Kind: k8s.Kind(gvk.GRPCRoute.Kind)},
{Group: (*k8s.Group)(ptr.Of(gvk.TCPRoute.Group)), Kind: k8s.Kind(gvk.TCPRoute.Kind)},
{Group: (*k8s.Group)(ptr.Of(gvk.TLSRoute.Group)), Kind: k8s.Kind(gvk.TLSRoute.Kind)},
},
},
}
return result, gwMap, namespaceLabelReferences
}
// Gateway currently requires a listener (https://github.com/kubernetes-sigs/gateway-api/pull/1596).
// We don't *really* care about the listener, but it may make sense to add a warning if users do not
// configure it in an expected way so that we have consistency and can make changes in the future as needed.
// We could completely reject but that seems more likely to cause pain.
func unexpectedWaypointListener(l k8s.Listener) bool {
if l.Port != 15008 {
return true
}
if l.Protocol != k8s.ProtocolType(protocol.HBONE) {
return true
}
return false
}
func getListenerNames(obj config.Config) sets.Set[k8s.SectionName] {
res := sets.New[k8s.SectionName]()
for _, l := range obj.Spec.(*k8s.GatewaySpec).Listeners {
res.Insert(l.Name)
}
return res
}
func reportGatewayStatus(
r configContext,
obj config.Config,
classInfo classInfo,
gatewayServices []string,
servers []*istio.Server,
gatewayErr *ConfigError,
) {
// TODO: we lose address if servers is empty due to an error
internal, internalIP, external, pending, warnings, allUsable := r.Context.ResolveGatewayInstances(obj.Namespace, gatewayServices, servers)
// Setup initial conditions to the success state. If we encounter errors, we will update this.
// We have two status
// Accepted: is the configuration valid. We only have errors in listeners, and the status is not supposed to
// be tied to listeners, so this is always accepted
// Programmed: is the data plane "ready" (note: eventually consistent)
gatewayConditions := map[string]*condition{
string(k8sv1.GatewayConditionAccepted): {
reason: string(k8sv1.GatewayReasonAccepted),
message: "Resource accepted",
},
string(k8sv1.GatewayConditionProgrammed): {
reason: string(k8sv1.GatewayReasonProgrammed),
message: "Resource programmed",
},
}
if gatewayErr != nil {
gatewayConditions[string(k8sv1.GatewayConditionAccepted)].error = gatewayErr
}
if len(internal) > 0 {
msg := fmt.Sprintf("Resource programmed, assigned to service(s) %s", humanReadableJoin(internal))
gatewayConditions[string(k8sv1.GatewayReasonProgrammed)].message = msg
}
if len(gatewayServices) == 0 {
gatewayConditions[string(k8sv1.GatewayReasonProgrammed)].error = &ConfigError{
Reason: InvalidAddress,
Message: "Failed to assign to any requested addresses",
}
} else if len(warnings) > 0 {
var msg string
var reason string
if len(internal) != 0 {
msg = fmt.Sprintf("Assigned to service(s) %s, but failed to assign to all requested addresses: %s",
humanReadableJoin(internal), strings.Join(warnings, "; "))
} else {
msg = fmt.Sprintf("Failed to assign to any requested addresses: %s", strings.Join(warnings, "; "))
}
if allUsable {
reason = string(k8sv1.GatewayReasonAddressNotAssigned)
} else {
reason = string(k8sv1.GatewayReasonAddressNotUsable)
}
gatewayConditions[string(k8sv1.GatewayConditionProgrammed)].error = &ConfigError{
// TODO: this only checks Service ready, we should also check Deployment ready?
Reason: reason,
Message: msg,
}
}
obj.Status.(*kstatus.WrappedStatus).Mutate(func(s config.Status) config.Status {
gs := s.(*k8s.GatewayStatus)
addressesToReport := external
addrType := k8s.IPAddressType
if len(addressesToReport) == 0 {
// There are no external addresses, so report the internal ones
// TODO: should we always report both?
if classInfo.addressType == k8s.IPAddressType {
addressesToReport = internalIP
} else {
addrType = k8s.HostnameAddressType
for _, hostport := range internal {
svchost, _, _ := net.SplitHostPort(hostport)
if !slices.Contains(pending, svchost) && !slices.Contains(addressesToReport, svchost) {
addressesToReport = append(addressesToReport, svchost)
}
}
}
}
// Do not report an address until we are ready. But once we are ready, never remove the address.
if len(addressesToReport) > 0 {
gs.Addresses = make([]k8sv1.GatewayStatusAddress, 0, len(addressesToReport))
for _, addr := range addressesToReport {
if _, err := netip.ParseAddr(addr); err == nil {
addrType = k8s.IPAddressType
} else {
addrType = k8s.HostnameAddressType
}
gs.Addresses = append(gs.Addresses, k8sv1.GatewayStatusAddress{
Value: addr,
Type: &addrType,
})
}
}
// Prune listeners that have been removed
haveListeners := getListenerNames(obj)
listeners := make([]k8s.ListenerStatus, 0, len(gs.Listeners))
for _, l := range gs.Listeners {
if haveListeners.Contains(l.Name) {
haveListeners.Delete(l.Name)
listeners = append(listeners, l)
}
}
gs.Listeners = listeners
gs.Conditions = setConditions(obj.Generation, gs.Conditions, gatewayConditions)
return gs
})
}
// IsManaged checks if a Gateway is managed (ie we create the Deployment and Service) or unmanaged.
// This is based on the address field of the spec. If address is set with a Hostname type, it should point to an existing
// Service that handles the gateway traffic. If it is not set, or refers to only a single IP, we will consider it managed and provision the Service.
// If there is an IP, we will set the `loadBalancerIP` type.
// While there is no defined standard for this in the API yet, it is tracked in https://github.com/kubernetes-sigs/gateway-api/issues/892.
// So far, this mirrors how out of clusters work (address set means to use existing IP, unset means to provision one),
// and there has been growing consensus on this model for in cluster deployments.
//
// Currently, the supported options are:
// * 1 Hostname value. This can be short Service name ingress, or FQDN ingress.ns.svc.cluster.local, example.com. If its a non-k8s FQDN it is a ServiceEntry.
// * 1 IP address. This is managed, with IP explicit
// * Nothing. This is managed, with IP auto assigned
//
// Not supported:
// Multiple hostname/IP - It is feasible but preference is to create multiple Gateways. This would also break the 1:1 mapping of GW:Service
// Mixed hostname and IP - doesn't make sense; user should define the IP in service
// NamedAddress - Service has no concept of named address. For cloud's that have named addresses they can be configured by annotations,
//
// which users can add to the Gateway.
func IsManaged(gw *k8s.GatewaySpec) bool {
if len(gw.Addresses) == 0 {
return true
}
if len(gw.Addresses) > 1 {
return false
}
if t := gw.Addresses[0].Type; t == nil || *t == k8s.IPAddressType {
return true
}
return false
}
func extractGatewayServices(r GatewayResources, kgw *k8s.GatewaySpec, obj config.Config) ([]string, *ConfigError) {
if IsManaged(kgw) {
name := model.GetOrDefault(obj.Annotations[gatewayNameOverride], getDefaultName(obj.Name, kgw))
return []string{fmt.Sprintf("%s.%s.svc.%v", name, obj.Namespace, r.Domain)}, nil
}
gatewayServices := []string{}
skippedAddresses := []string{}
for _, addr := range kgw.Addresses {
if addr.Type != nil && *addr.Type != k8s.HostnameAddressType {
// We only support HostnameAddressType. Keep track of invalid ones so we can report in status.
skippedAddresses = append(skippedAddresses, addr.Value)
continue
}
// TODO: For now we are using Addresses. There has been some discussion of allowing inline
// parameters on the class field like a URL, in which case we will probably just use that. See
// https://github.com/kubernetes-sigs/gateway-api/pull/614
fqdn := addr.Value
if !strings.Contains(fqdn, ".") {
// Short name, expand it
fqdn = fmt.Sprintf("%s.%s.svc.%s", fqdn, obj.Namespace, r.Domain)
}
gatewayServices = append(gatewayServices, fqdn)
}
if len(skippedAddresses) > 0 {
// Give error but return services, this is a soft failure
return gatewayServices, &ConfigError{
Reason: InvalidAddress,
Message: fmt.Sprintf("only Hostname is supported, ignoring %v", skippedAddresses),
}
}
if _, f := obj.Annotations[serviceTypeOverride]; f {
// Give error but return services, this is a soft failure
// Remove entirely in 1.20
return gatewayServices, &ConfigError{
Reason: DeprecateFieldUsage,
Message: fmt.Sprintf("annotation %v is deprecated, use Spec.Infrastructure.Routeability", serviceTypeOverride),
}
}
return gatewayServices, nil
}
// getNamespaceLabelReferences fetches all label keys used in namespace selectors. Return order may not be stable.
func getNamespaceLabelReferences(routes *k8s.AllowedRoutes) []string {
if routes == nil || routes.Namespaces == nil || routes.Namespaces.Selector == nil {
return nil
}
res := []string{}
for k := range routes.Namespaces.Selector.MatchLabels {
res = append(res, k)
}
for _, me := range routes.Namespaces.Selector.MatchExpressions {
if me.Operator == metav1.LabelSelectorOpNotIn || me.Operator == metav1.LabelSelectorOpDoesNotExist {
// Over-matching is fine because this only controls the set of namespace
// label change events to watch and the actual binding enforcement happens
// by checking the intersection of the generated VirtualService.spec.hosts
// and Istio Gateway.spec.servers.hosts arrays - we just can't miss
// potentially relevant namespace label events here.
res = append(res, "*")
}
res = append(res, me.Key)
}
return res
}
func buildListener(r configContext, obj config.Config, l k8s.Listener, listenerIndex int, controllerName k8s.GatewayController) (*istio.Server, bool) {
listenerConditions := map[string]*condition{
string(k8sv1.ListenerConditionAccepted): {
reason: string(k8sv1.ListenerReasonAccepted),
message: "No errors found",
},
string(k8sv1.ListenerConditionProgrammed): {
reason: string(k8sv1.ListenerReasonProgrammed),
message: "No errors found",
},
string(k8sv1.ListenerConditionConflicted): {
reason: string(k8sv1.ListenerReasonNoConflicts),
message: "No errors found",
status: kstatus.StatusFalse,
},
string(k8sv1.ListenerConditionResolvedRefs): {
reason: string(k8sv1.ListenerReasonResolvedRefs),
message: "No errors found",
},
}
ok := true
tls, err := buildTLS(r, l.TLS, obj, kube.IsAutoPassthrough(obj.Labels, l))
if err != nil {
listenerConditions[string(k8sv1.ListenerConditionResolvedRefs)].error = err
listenerConditions[string(k8sv1.GatewayConditionProgrammed)].error = &ConfigError{
Reason: string(k8sv1.GatewayReasonInvalid),
Message: "Bad TLS configuration",
}
ok = false
}
hostnames := buildHostnameMatch(obj.Namespace, r.GatewayResources, l)
server := &istio.Server{
Port: &istio.Port{
// Name is required. We only have one server per Gateway, so we can just name them all the same
Name: "default",
Number: uint32(l.Port),
Protocol: listenerProtocolToIstio(l.Protocol),
},
Hosts: hostnames,
Tls: tls,
}
if controllerName == constants.ManagedGatewayMeshController {
if unexpectedWaypointListener(l) {
listenerConditions[string(k8sv1.ListenerConditionAccepted)].error = &ConfigError{
Reason: string(k8sv1.ListenerReasonUnsupportedProtocol),
Message: `Expected a single listener on port 15008 with protocol "HBONE"`,
}
}
}
reportListenerCondition(listenerIndex, l, obj, listenerConditions)
return server, ok
}
func listenerProtocolToIstio(protocol k8s.ProtocolType) string {
// Currently, all gateway-api protocols are valid Istio protocols.
return string(protocol)
}
func buildTLS(ctx configContext, tls *k8s.GatewayTLSConfig, gw config.Config, isAutoPassthrough bool) (*istio.ServerTLSSettings, *ConfigError) {
if tls == nil {
return nil, nil
}
// Explicitly not supported: file mounted
// Not yet implemented: TLS mode, https redirect, max protocol version, SANs, CipherSuites, VerifyCertificate
out := &istio.ServerTLSSettings{
HttpsRedirect: false,
}
mode := k8sv1.TLSModeTerminate
if tls.Mode != nil {
mode = *tls.Mode
}
namespace := gw.Namespace
switch mode {
case k8sv1.TLSModeTerminate:
out.Mode = istio.ServerTLSSettings_SIMPLE
if tls.Options != nil && tls.Options[gatewayTLSTerminateModeKey] == "MUTUAL" {
out.Mode = istio.ServerTLSSettings_MUTUAL
}
if len(tls.CertificateRefs) != 1 {
// This is required in the API, should be rejected in validation
return out, &ConfigError{Reason: InvalidTLS, Message: "exactly 1 certificateRefs should be present for TLS termination"}
}
cred, err := buildSecretReference(ctx, tls.CertificateRefs[0], gw)
if err != nil {
return out, err
}
credNs := ptr.OrDefault((*string)(tls.CertificateRefs[0].Namespace), namespace)
sameNamespace := credNs == namespace
if !sameNamespace && !ctx.AllowedReferences.SecretAllowed(creds.ToResourceName(cred), namespace) {
return out, &ConfigError{
Reason: InvalidListenerRefNotPermitted,
Message: fmt.Sprintf(
"certificateRef %v/%v not accessible to a Gateway in namespace %q (missing a ReferenceGrant?)",
tls.CertificateRefs[0].Name, credNs, namespace,
),
}
}
out.CredentialName = cred
case k8sv1.TLSModePassthrough:
out.Mode = istio.ServerTLSSettings_PASSTHROUGH
if isAutoPassthrough {
out.Mode = istio.ServerTLSSettings_AUTO_PASSTHROUGH
}
}
return out, nil
}
func buildSecretReference(ctx configContext, ref k8s.SecretObjectReference, gw config.Config) (string, *ConfigError) {
if !nilOrEqual((*string)(ref.Group), gvk.Secret.Group) || !nilOrEqual((*string)(ref.Kind), gvk.Secret.Kind) {
return "", &ConfigError{Reason: InvalidTLS, Message: fmt.Sprintf("invalid certificate reference %v, only secret is allowed", objectReferenceString(ref))}
}
secret := model.ConfigKey{
Kind: kind.Secret,
Name: string(ref.Name),
Namespace: ptr.OrDefault((*string)(ref.Namespace), gw.Namespace),
}
ctx.resourceReferences[secret] = append(ctx.resourceReferences[secret], model.ConfigKey{
Kind: kind.KubernetesGateway,
Namespace: gw.Namespace,
Name: gw.Name,
})
if ctx.Credentials != nil {
if certInfo, err := ctx.Credentials.GetCertInfo(secret.Name, secret.Namespace); err != nil {
return "", &ConfigError{
Reason: InvalidTLS,
Message: fmt.Sprintf("invalid certificate reference %v, %v", objectReferenceString(ref), err),
}
} else if _, err = tls.X509KeyPair(certInfo.Cert, certInfo.Key); err != nil {
return "", &ConfigError{
Reason: InvalidTLS,
Message: fmt.Sprintf("invalid certificate reference %v, the certificate is malformed: %v", objectReferenceString(ref), err),
}
}
}
return creds.ToKubernetesGatewayResource(secret.Namespace, secret.Name), nil
}
func objectReferenceString(ref k8s.SecretObjectReference) string {
return fmt.Sprintf("%s/%s/%s.%s",
ptr.OrEmpty(ref.Group),
ptr.OrEmpty(ref.Kind),
ref.Name,
ptr.OrEmpty(ref.Namespace))
}
func parentRefString(ref k8s.ParentReference) string {
return fmt.Sprintf("%s/%s/%s/%s/%d.%s",
ptr.OrEmpty(ref.Group),
ptr.OrEmpty(ref.Kind),
ref.Name,
ptr.OrEmpty(ref.SectionName),
ptr.OrEmpty(ref.Port),
ptr.OrEmpty(ref.Namespace))
}
// buildHostnameMatch generates a Gateway.spec.servers.hosts section from a listener
func buildHostnameMatch(localNamespace string, r GatewayResources, l k8s.Listener) []string {
// We may allow all hostnames or a specific one
hostname := "*"
if l.Hostname != nil {
hostname = string(*l.Hostname)
}
resp := []string{}
for _, ns := range namespacesFromSelector(localNamespace, r, l.AllowedRoutes) {
// This check is necessary to prevent adding a hostname with an invalid empty namespace
if len(ns) > 0 {
resp = append(resp, fmt.Sprintf("%s/%s", ns, hostname))
}
}
// If nothing matched use ~ namespace (match nothing). We need this since its illegal to have an
// empty hostname list, but we still need the Gateway provisioned to ensure status is properly set and
// SNI matches are established; we just don't want to actually match any routing rules (yet).
if len(resp) == 0 {
return []string{"~/" + hostname}
}
return resp
}
// namespacesFromSelector determines a list of allowed namespaces for a given AllowedRoutes
func namespacesFromSelector(localNamespace string, r GatewayResources, lr *k8s.AllowedRoutes) []string {
// Default is to allow only the same namespace
if lr == nil || lr.Namespaces == nil || lr.Namespaces.From == nil || *lr.Namespaces.From == k8sv1.NamespacesFromSame {
return []string{localNamespace}
}
if *lr.Namespaces.From == k8sv1.NamespacesFromAll {
return []string{"*"}
}
if lr.Namespaces.Selector == nil {
// Should never happen, invalid config
return []string{"*"}
}
// gateway-api has selectors, but Istio Gateway just has a list of names. We will run the selector
// against all namespaces and get a list of matching namespaces that can be converted into a list
// Istio can handle.
ls, err := metav1.LabelSelectorAsSelector(lr.Namespaces.Selector)
if err != nil {
return nil
}
namespaces := []string{}
for _, ns := range r.Namespaces {
if ls.Matches(toNamespaceSet(ns.Name, ns.Labels)) {
namespaces = append(namespaces, ns.Name)
}
}
// Ensure stable order
sort.Strings(namespaces)
return namespaces
}
func nilOrEqual(have *string, expected string) bool {
return have == nil || *have == expected
}
func humanReadableJoin(ss []string) string {
switch len(ss) {
case 0:
return ""
case 1:
return ss[0]
case 2:
return ss[0] + " and " + ss[1]
default:
return strings.Join(ss[:len(ss)-1], ", ") + ", and " + ss[len(ss)-1]
}
}
// NamespaceNameLabel represents that label added automatically to namespaces is newer Kubernetes clusters
const NamespaceNameLabel = "kubernetes.io/metadata.name"
// toNamespaceSet converts a set of namespace labels to a Set that can be used to select against.
func toNamespaceSet(name string, labels map[string]string) klabels.Set {
// If namespace label is not set, implicitly insert it to support older Kubernetes versions
if labels[NamespaceNameLabel] == name {
// Already set, avoid copies
return labels
}
// First we need a copy to not modify the underlying object
ret := make(map[string]string, len(labels)+1)
for k, v := range labels {
ret[k] = v
}
ret[NamespaceNameLabel] = name
return ret
}
func (kr GatewayResources) FuzzValidate() bool {
for _, gwc := range kr.GatewayClass {
if gwc.Spec == nil {
return false
}
}
for _, rp := range kr.ReferenceGrant {
if rp.Spec == nil {
return false
}
}
for _, hr := range kr.HTTPRoute {
if hr.Spec == nil {
return false
}
}
for _, hr := range kr.GRPCRoute {
if hr.Spec == nil {
return false
}
}
for _, tr := range kr.TLSRoute {
if tr.Spec == nil {
return false
}
}
for _, g := range kr.Gateway {
if g.Spec == nil {
return false
}
}
for _, tr := range kr.TCPRoute {
if tr.Spec == nil {
return false
}
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
gateway "sigs.k8s.io/gateway-api/apis/v1beta1"
"sigs.k8s.io/yaml"
"istio.io/api/label"
meshapi "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/gvr"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/inject"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/kube/namespace"
istiolog "istio.io/istio/pkg/log"
"istio.io/istio/pkg/revisions"
"istio.io/istio/pkg/test/util/tmpl"
"istio.io/istio/pkg/test/util/yml"
"istio.io/istio/pkg/util/sets"
)
// DeploymentController implements a controller that materializes a Gateway into an in cluster gateway proxy
// to serve requests from. This is implemented with a Deployment and Service today.
// The implementation makes a few non-obvious choices - namely using Server Side Apply from go templates
// and not using controller-runtime.
//
// controller-runtime has a number of constraints that make it inappropriate for usage here, despite this
// seeming to be the bread and butter of the library:
// * It is not readily possible to bring existing Informers, which would require extra watches (#1668)
// * Goroutine leaks (#1655)
// * Excessive API-server calls at startup which have no benefit to us (#1603)
// * Hard to use with SSA (#1669)
// While these can be worked around, at some point it isn't worth the effort.
//
// Server Side Apply with go templates is an odd choice (no one likes YAML templating...) but is one of the few
// remaining options after all others are ruled out.
// - Merge patch/Update cannot be used. If we always enforce that our object is *exactly* the same as
// the in-cluster object we will get in endless loops due to other controllers that like to add annotations, etc.
// If we chose to allow any unknown fields, then we would never be able to remove fields we added, as
// we cannot tell if we created it or someone else did. SSA fixes these issues
// - SSA using client-go Apply libraries is almost a good choice, but most third-party clients (Istio, MCS, and gateway-api)
// do not provide these libraries.
// - SSA using standard API types doesn't work well either: https://github.com/kubernetes-sigs/controller-runtime/issues/1669
// - This leaves YAML templates, converted to unstructured types and Applied with the dynamic client.
type DeploymentController struct {
client kube.Client
clusterID cluster.ID
env *model.Environment
queue controllers.Queue
patcher patcher
gateways kclient.Client[*gateway.Gateway]
gatewayClasses kclient.Client[*gateway.GatewayClass]
clients map[schema.GroupVersionResource]getter
injectConfig func() inject.WebhookConfig
deployments kclient.Client[*appsv1.Deployment]
services kclient.Client[*corev1.Service]
serviceAccounts kclient.Client[*corev1.ServiceAccount]
namespaces kclient.Client[*corev1.Namespace]
tagWatcher revisions.TagWatcher
revision string
}
// Patcher is a function that abstracts patching logic. This is largely because client-go fakes do not handle patching
type patcher func(gvr schema.GroupVersionResource, name string, namespace string, data []byte, subresources ...string) error
// classInfo holds information about a gateway class
type classInfo struct {
// controller name for this class
controller string
// description for this class
description string
// The key in the templates to use for this class
templates string
// defaultServiceType sets the default service type if one is not explicit set
defaultServiceType corev1.ServiceType
// disableRouteGeneration, if set, will make it so the controller ignores this class.
disableRouteGeneration bool
// addressType is the default address type to report
addressType gateway.AddressType
}
var classInfos = getClassInfos()
var builtinClasses = getBuiltinClasses()
func getBuiltinClasses() map[gateway.ObjectName]gateway.GatewayController {
res := map[gateway.ObjectName]gateway.GatewayController{
defaultClassName: constants.ManagedGatewayController,
}
if features.MultiNetworkGatewayAPI {
res[constants.RemoteGatewayClassName] = constants.UnmanagedGatewayController
}
if features.EnableAmbientControllers {
res[constants.WaypointGatewayClassName] = constants.ManagedGatewayMeshController
}
return res
}
func getClassInfos() map[gateway.GatewayController]classInfo {
m := map[gateway.GatewayController]classInfo{
constants.ManagedGatewayController: {
controller: constants.ManagedGatewayController,
description: "The default Istio GatewayClass",
templates: "kube-gateway",
defaultServiceType: corev1.ServiceTypeLoadBalancer,
addressType: gateway.HostnameAddressType,
},
}
if features.MultiNetworkGatewayAPI {
m[constants.UnmanagedGatewayController] = classInfo{
// This represents a gateway that our control plane cannot discover directly via the API server.
// We shouldn't generate Istio resources for it. We aren't programming this gateway.
controller: constants.UnmanagedGatewayController,
description: "Remote to this cluster. Does not deploy or affect configuration.",
disableRouteGeneration: true,
addressType: gateway.HostnameAddressType,
}
}
if features.EnableAmbientControllers {
m[constants.ManagedGatewayMeshController] = classInfo{
controller: constants.ManagedGatewayMeshController,
description: "The default Istio waypoint GatewayClass",
templates: "waypoint",
defaultServiceType: corev1.ServiceTypeClusterIP,
addressType: gateway.IPAddressType,
}
}
return m
}
// NewDeploymentController constructs a DeploymentController and registers required informers.
// The controller will not start until Run() is called.
func NewDeploymentController(client kube.Client, clusterID cluster.ID, env *model.Environment,
webhookConfig func() inject.WebhookConfig, injectionHandler func(fn func()), tw revisions.TagWatcher, revision string,
nsFilter namespace.DiscoveryNamespacesFilter,
) *DeploymentController {
var filter namespace.DiscoveryFilter
if nsFilter != nil {
filter = nsFilter.Filter
}
gateways := kclient.NewFiltered[*gateway.Gateway](client, kclient.Filter{ObjectFilter: filter})
gatewayClasses := kclient.New[*gateway.GatewayClass](client)
dc := &DeploymentController{
client: client,
clusterID: clusterID,
clients: map[schema.GroupVersionResource]getter{},
env: env,
patcher: func(gvr schema.GroupVersionResource, name string, namespace string, data []byte, subresources ...string) error {
c := client.Dynamic().Resource(gvr).Namespace(namespace)
t := true
_, err := c.Patch(context.Background(), name, types.ApplyPatchType, data, metav1.PatchOptions{
Force: &t,
FieldManager: constants.ManagedGatewayController,
}, subresources...)
return err
},
gateways: gateways,
gatewayClasses: gatewayClasses,
injectConfig: webhookConfig,
tagWatcher: tw,
revision: revision,
}
dc.queue = controllers.NewQueue("gateway deployment",
controllers.WithReconciler(dc.Reconcile),
controllers.WithMaxAttempts(5))
// Set up a handler that will add the parent Gateway object onto the queue.
// The queue will only handle Gateway objects; if child resources (Service, etc) are updated we re-add
// the Gateway to the queue and reconcile the state of the world.
parentHandler := controllers.ObjectHandler(controllers.EnqueueForParentHandler(dc.queue, gvk.KubernetesGateway))
dc.services = kclient.NewFiltered[*corev1.Service](client, kclient.Filter{ObjectFilter: filter})
dc.services.AddEventHandler(parentHandler)
dc.clients[gvr.Service] = NewUntypedWrapper(dc.services)
dc.deployments = kclient.NewFiltered[*appsv1.Deployment](client, kclient.Filter{ObjectFilter: filter})
dc.deployments.AddEventHandler(parentHandler)
dc.clients[gvr.Deployment] = NewUntypedWrapper(dc.deployments)
dc.serviceAccounts = kclient.NewFiltered[*corev1.ServiceAccount](client, kclient.Filter{ObjectFilter: filter})
dc.serviceAccounts.AddEventHandler(parentHandler)
dc.clients[gvr.ServiceAccount] = NewUntypedWrapper(dc.serviceAccounts)
dc.namespaces = kclient.NewFiltered[*corev1.Namespace](client, kclient.Filter{ObjectFilter: filter})
dc.namespaces.AddEventHandler(controllers.ObjectHandler(func(o controllers.Object) {
// TODO: make this more intelligent, checking if something we care about has changed
// requeue this namespace
for _, gw := range dc.gateways.List(o.GetName(), klabels.Everything()) {
dc.queue.AddObject(gw)
}
}))
gateways.AddEventHandler(controllers.ObjectHandler(dc.queue.AddObject))
gatewayClasses.AddEventHandler(controllers.ObjectHandler(func(o controllers.Object) {
for _, g := range dc.gateways.List(metav1.NamespaceAll, klabels.Everything()) {
if string(g.Spec.GatewayClassName) == o.GetName() {
dc.queue.AddObject(g)
}
}
}))
// On injection template change, requeue all gateways
injectionHandler(func() {
for _, gw := range dc.gateways.List(metav1.NamespaceAll, klabels.Everything()) {
dc.queue.AddObject(gw)
}
})
dc.tagWatcher.AddHandler(dc.HandleTagChange)
return dc
}
func (d *DeploymentController) Run(stop <-chan struct{}) {
kube.WaitForCacheSync(
"deployment controller",
stop,
d.namespaces.HasSynced,
d.deployments.HasSynced,
d.services.HasSynced,
d.serviceAccounts.HasSynced,
d.gateways.HasSynced,
d.gatewayClasses.HasSynced,
d.tagWatcher.HasSynced,
)
d.queue.Run(stop)
controllers.ShutdownAll(d.namespaces, d.deployments, d.services, d.serviceAccounts, d.gateways, d.gatewayClasses)
}
// Reconcile takes in the name of a Gateway and ensures the cluster is in the desired state
func (d *DeploymentController) Reconcile(req types.NamespacedName) error {
log := log.WithLabels("gateway", req)
gw := d.gateways.Get(req.Name, req.Namespace)
if gw == nil {
log.Debugf("gateway no longer exists")
// we'll ignore not-found errors, since they can't be fixed by an immediate
// requeue (we'll need to wait for a new notification), and we can get them
// on deleted requests.
return nil
}
var controller gateway.GatewayController
if gc := d.gatewayClasses.Get(string(gw.Spec.GatewayClassName), ""); gc != nil {
controller = gc.Spec.ControllerName
} else {
if builtin, f := builtinClasses[gw.Spec.GatewayClassName]; f {
controller = builtin
}
}
ci, f := classInfos[controller]
if !f {
log.Debugf("skipping unknown controller %q", controller)
return nil
}
// find the tag or revision indicated by the object
selectedTag, ok := gw.Labels[label.IoIstioRev.Name]
if !ok {
ns := d.namespaces.Get(gw.Namespace, "")
if ns == nil {
log.Debugf("gateway is not for this revision, skipping")
return nil
}
selectedTag = ns.Labels[label.IoIstioRev.Name]
}
myTags := d.tagWatcher.GetMyTags()
if !myTags.Contains(selectedTag) && !(selectedTag == "" && myTags.Contains("default")) {
log.Debugf("gateway is not for this revision, skipping")
return nil
}
// TODO: Here we could check if the tag is set and matches no known tags, and handle that if we are default.
// Matched class, reconcile it
return d.configureIstioGateway(log, *gw, ci)
}
func (d *DeploymentController) configureIstioGateway(log *istiolog.Scope, gw gateway.Gateway, gi classInfo) error {
// If user explicitly sets addresses, we are assuming they are pointing to an existing deployment.
// We will not manage it in this case
if gi.templates == "" {
log.Debug("skip gateway class without template")
return nil
}
if !IsManaged(&gw.Spec) {
log.Debug("skip disabled gateway")
return nil
}
existingControllerVersion, overwriteControllerVersion, shouldHandle := ManagedGatewayControllerVersion(gw)
if !shouldHandle {
log.Debugf("skipping gateway which is managed by controller version %v", existingControllerVersion)
return nil
}
log.Info("reconciling")
var ns *corev1.Namespace
if d.namespaces != nil {
ns = d.namespaces.Get(gw.Namespace, "")
}
proxyUID, proxyGID := inject.GetProxyIDs(ns)
defaultName := getDefaultName(gw.Name, &gw.Spec)
serviceType := gi.defaultServiceType
if o, f := gw.Annotations[serviceTypeOverride]; f {
serviceType = corev1.ServiceType(o)
}
// TODO: Codify this API (i.e how to know if a specific gateway is an Istio waypoint gateway)
isWaypointGateway := strings.Contains(string(gw.Spec.GatewayClassName), "waypoint")
// Default the network label for waypoints if not explicitly set in gateway's labels
network := d.injectConfig().Values.Struct().GetGlobal().GetNetwork()
if _, ok := gw.GetLabels()[label.TopologyNetwork.Name]; !ok && network != "" && isWaypointGateway {
if gw.Labels == nil {
gw.Labels = make(map[string]string)
}
gw.Labels[label.TopologyNetwork.Name] = d.injectConfig().Values.Struct().GetGlobal().GetNetwork()
}
input := TemplateInput{
Gateway: &gw,
DeploymentName: model.GetOrDefault(gw.Annotations[gatewayNameOverride], defaultName),
ServiceAccount: model.GetOrDefault(gw.Annotations[gatewaySAOverride], defaultName),
Ports: extractServicePorts(gw),
ClusterID: d.clusterID.String(),
KubeVersion: kube.GetVersionAsInt(d.client),
Revision: d.revision,
ServiceType: serviceType,
ProxyUID: proxyUID,
ProxyGID: proxyGID,
InfrastructureLabels: gw.GetLabels(),
InfrastructureAnnotations: gw.GetAnnotations(),
}
d.setGatewayNameLabel(&input)
// Default to the gateway labels/annotations and overwrite if infrastructure labels/annotations are set
gwInfra := gw.Spec.Infrastructure
if gwInfra != nil && gwInfra.Labels != nil {
infraLabels := make(map[string]string, len(gwInfra.Labels))
for k, v := range gw.Spec.Infrastructure.Labels {
if strings.HasPrefix(string(k), "gateway.networking.k8s.io/") {
continue // ignore this prefix to avoid conflicts
}
infraLabels[string(k)] = string(v)
}
// Default the network label for waypoints if not explicitly set in infra labels
// We do this a second time here for correctness since if infra labels are set (according to the gwapi spec),
// the gateway's labels are ignored.
if _, ok := infraLabels[label.TopologyNetwork.Name]; !ok && network != "" && isWaypointGateway {
infraLabels[label.TopologyNetwork.Name] = network
}
input.InfrastructureLabels = infraLabels
}
if gwInfra != nil && gwInfra.Annotations != nil {
infraAnnotations := make(map[string]string, len(gwInfra.Annotations))
for k, v := range gw.Spec.Infrastructure.Annotations {
if strings.HasPrefix(string(k), "gateway.networking.k8s.io/") {
continue // ignore this prefix to avoid conflicts
}
infraAnnotations[string(k)] = string(v)
}
input.InfrastructureAnnotations = infraAnnotations
}
if overwriteControllerVersion {
log.Debugf("write controller version, existing=%v", existingControllerVersion)
if err := d.setGatewayControllerVersion(gw); err != nil {
return fmt.Errorf("update gateway annotation: %v", err)
}
} else {
log.Debugf("controller version existing=%v, no action needed", existingControllerVersion)
}
rendered, err := d.render(gi.templates, input)
if err != nil {
return fmt.Errorf("failed to render template: %v", err)
}
for _, t := range rendered {
if err := d.apply(gi.controller, t); err != nil {
return fmt.Errorf("apply failed: %v", err)
}
}
log.Info("gateway updated")
return nil
}
const (
// ControllerVersionAnnotation is an annotation added to the Gateway by the controller specifying
// the "controller version". The original intent of this was to work around
// https://github.com/istio/istio/issues/44164, where we needed to transition from a global owner
// to a per-revision owner. The newer version number allows forcing ownership, even if the other
// version was otherwise expected to control the Gateway.
// The version number has no meaning other than "larger numbers win".
// Numbers are used to future-proof in case we need to do another migration in the future.
ControllerVersionAnnotation = "gateway.istio.io/controller-version"
// ControllerVersion is the current version of our controller logic. Known versions are:
//
// * 1.17 and older: version 1 OR no version at all, depending on patch release
// * 1.18+: version 5
//
// 2, 3, and 4 were intentionally skipped to allow for the (unlikely) event we need to insert
// another version between these
ControllerVersion = 5
)
// ManagedGatewayControllerVersion determines the version of the controller managing this Gateway,
// and if we should manage this.
// See ControllerVersionAnnotation for motivations.
func ManagedGatewayControllerVersion(gw gateway.Gateway) (existing string, takeOver bool, manage bool) {
cur, f := gw.Annotations[ControllerVersionAnnotation]
if !f {
// No current owner, we should take it over.
return "", true, true
}
curNum, err := strconv.Atoi(cur)
if err != nil {
// We cannot parse it - must be some new schema we don't know about. We should assume we do not manage it.
// In theory, this should never happen, unless we decide a number was a bad idea in the future.
return cur, false, false
}
if curNum > ControllerVersion {
// A newer version owns this gateway, let them handle it
return cur, false, false
}
if curNum == ControllerVersion {
// We already manage this at this version
// We will manage it, but no need to attempt to apply the version annotation, which could race with newer versions
return cur, false, true
}
// We are either newer or the same version of the last owner - we can take over. We need to actually
// re-apply the annotation
return cur, true, true
}
type derivedInput struct {
TemplateInput
// Inserted from injection config
ProxyImage string
ProxyConfig *meshapi.ProxyConfig
MeshConfig *meshapi.MeshConfig
Values map[string]any
}
func (d *DeploymentController) render(templateName string, mi TemplateInput) ([]string, error) {
cfg := d.injectConfig()
template := cfg.Templates[templateName]
if template == nil {
return nil, fmt.Errorf("no %q template defined", templateName)
}
labelToMatch := map[string]string{constants.GatewayNameLabel: mi.Name, constants.DeprecatedGatewayNameLabel: mi.Name}
proxyConfig := d.env.GetProxyConfigOrDefault(mi.Namespace, labelToMatch, nil, cfg.MeshConfig)
input := derivedInput{
TemplateInput: mi,
ProxyImage: inject.ProxyImage(
cfg.Values.Struct(),
proxyConfig.GetImage(),
mi.Annotations,
),
ProxyConfig: proxyConfig,
MeshConfig: cfg.MeshConfig,
Values: cfg.Values.Map(),
}
results, err := tmpl.Execute(template, input)
if err != nil {
return nil, err
}
return yml.SplitString(results), nil
}
func (d *DeploymentController) setGatewayControllerVersion(gws gateway.Gateway) error {
patch := fmt.Sprintf(`{"apiVersion":"gateway.networking.k8s.io/v1beta1","kind":"Gateway","metadata":{"annotations":{"%s":"%d"}}}`,
ControllerVersionAnnotation, ControllerVersion)
log.Debugf("applying %v", patch)
return d.patcher(gvr.KubernetesGateway, gws.GetName(), gws.GetNamespace(), []byte(patch))
}
// apply server-side applies a template to the cluster.
func (d *DeploymentController) apply(controller string, yml string) error {
data := map[string]any{}
err := yaml.Unmarshal([]byte(yml), &data)
if err != nil {
return err
}
us := unstructured.Unstructured{Object: data}
// set managed-by label
clabel := strings.ReplaceAll(controller, "/", "-")
err = unstructured.SetNestedField(us.Object, clabel, "metadata", "labels", constants.ManagedGatewayLabel)
if err != nil {
return err
}
gvr, err := controllers.UnstructuredToGVR(us)
if err != nil {
return err
}
j, err := json.Marshal(us.Object)
if err != nil {
return err
}
canManage, resourceVersion := d.canManage(gvr, us.GetName(), us.GetNamespace())
if !canManage {
log.Debugf("skipping %v/%v/%v, already managed", gvr, us.GetName(), us.GetNamespace())
return nil
}
// Ensure our canManage assertion is not stale
us.SetResourceVersion(resourceVersion)
log.Debugf("applying %v", string(j))
if err := d.patcher(gvr, us.GetName(), us.GetNamespace(), j); err != nil {
return fmt.Errorf("patch %v/%v/%v: %v", us.GroupVersionKind(), us.GetNamespace(), us.GetName(), err)
}
return nil
}
func (d *DeploymentController) HandleTagChange(newTags sets.String) {
for _, gw := range d.gateways.List(metav1.NamespaceAll, klabels.Everything()) {
d.queue.AddObject(gw)
}
}
// canManage checks if a resource we are about to write should be managed by us. If the resource already exists
// but does not have the ManagedGatewayLabel, we won't overwrite it.
// This ensures we don't accidentally take over some resource we weren't supposed to, which could cause outages.
// Note K8s doesn't have a perfect way to "conditionally SSA", but its close enough (https://github.com/kubernetes/kubernetes/issues/116156).
func (d *DeploymentController) canManage(gvr schema.GroupVersionResource, name, namespace string) (bool, string) {
store, f := d.clients[gvr]
if !f {
log.Warnf("unknown GVR %v", gvr)
// Even though we don't know what it is, allow users to put the resource. We won't be able to
// protect against overwrites though.
return true, ""
}
obj := store.Get(name, namespace)
if obj == nil {
// no object, we can manage it
return true, ""
}
_, managed := obj.GetLabels()[constants.ManagedGatewayLabel]
// If object already exists, we can only manage it if it has the label
return managed, obj.GetResourceVersion()
}
// setGatewayNameLabel sets either the new or deprecated gateway name label
// based on the template input
func (d *DeploymentController) setGatewayNameLabel(ti *TemplateInput) {
ti.GatewayNameLabel = constants.GatewayNameLabel // default to the new gateway name label
store, f := d.clients[gvr.Deployment] // Use deployment since those matchlabels are immutable
if !f {
log.Warnf("deployment gvr not found in deployment controller clients; defaulting to the new gateway name label")
return
}
dep := store.Get(ti.DeploymentName, ti.Namespace)
if dep == nil {
log.Debugf("deployment %s/%s not found in store; using to the new gateway name label", ti.DeploymentName, ti.Namespace)
return
}
// Base label choice on the deployment's selector
_, exists := dep.(*appsv1.Deployment).Spec.Selector.MatchLabels[constants.DeprecatedGatewayNameLabel]
if !exists {
// The old label doesn't already exist on the deployment; use the new label
return
}
// The old label exists on the deployment; use the old label
ti.GatewayNameLabel = constants.DeprecatedGatewayNameLabel
}
type TemplateInput struct {
*gateway.Gateway
DeploymentName string
ServiceAccount string
Ports []corev1.ServicePort
ServiceType corev1.ServiceType
ClusterID string
KubeVersion int
Revision string
ProxyUID int64
ProxyGID int64
InfrastructureLabels map[string]string
InfrastructureAnnotations map[string]string
GatewayNameLabel string
}
func extractServicePorts(gw gateway.Gateway) []corev1.ServicePort {
tcp := strings.ToLower(string(protocol.TCP))
svcPorts := make([]corev1.ServicePort, 0, len(gw.Spec.Listeners)+1)
svcPorts = append(svcPorts, corev1.ServicePort{
Name: "status-port",
Port: int32(15021),
AppProtocol: &tcp,
})
portNums := sets.New[int32]()
for i, l := range gw.Spec.Listeners {
if portNums.Contains(int32(l.Port)) {
continue
}
portNums.Insert(int32(l.Port))
name := string(l.Name)
if name == "" {
// Should not happen since name is required, but in case an invalid resource gets in...
name = fmt.Sprintf("%s-%d", strings.ToLower(string(l.Protocol)), i)
}
appProtocol := strings.ToLower(string(l.Protocol))
svcPorts = append(svcPorts, corev1.ServicePort{
Name: name,
Port: int32(l.Port),
AppProtocol: &appProtocol,
})
}
return svcPorts
}
// UntypedWrapper wraps a typed reader to an untyped one, since Go cannot do it automatically.
type UntypedWrapper[T controllers.ComparableObject] struct {
reader kclient.Reader[T]
}
type getter interface {
Get(name, namespace string) controllers.Object
}
func NewUntypedWrapper[T controllers.ComparableObject](c kclient.Client[T]) getter {
return UntypedWrapper[T]{c}
}
func (u UntypedWrapper[T]) Get(name, namespace string) controllers.Object {
// DO NOT return u.reader.Get directly, or we run into issues with https://go.dev/tour/methods/12
res := u.reader.Get(name, namespace)
if controllers.IsNil(res) {
return nil
}
return res
}
var _ getter = UntypedWrapper[*corev1.Service]{}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"github.com/hashicorp/go-multierror"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8sv1 "sigs.k8s.io/gateway-api/apis/v1"
k8s "sigs.k8s.io/gateway-api/apis/v1alpha2"
gateway "sigs.k8s.io/gateway-api/apis/v1beta1"
"istio.io/istio/pilot/pkg/model/kstatus"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/util/istiomultierror"
)
// ClassController is a controller that creates the default Istio GatewayClass(s). This will not
// continually reconcile the full state of the GatewayClass object, and instead only create the class
// if it doesn't exist. This allows users to manage it through other means or modify it as they wish.
// If it is deleted, however, it will be added back.
// This controller intentionally does not do leader election for simplicity. Because we only create
// and not update there is no need; the first controller to create the GatewayClass wins.
type ClassController struct {
queue controllers.Queue
classes kclient.Client[*gateway.GatewayClass]
}
func NewClassController(kc kube.Client) *ClassController {
gc := &ClassController{}
gc.queue = controllers.NewQueue("gateway class",
controllers.WithReconciler(gc.Reconcile),
controllers.WithMaxAttempts(25))
gc.classes = kclient.New[*gateway.GatewayClass](kc)
gc.classes.AddEventHandler(controllers.FilteredObjectHandler(gc.queue.AddObject, func(o controllers.Object) bool {
_, f := builtinClasses[gateway.ObjectName(o.GetName())]
return f
}))
return gc
}
func (c *ClassController) Run(stop <-chan struct{}) {
// Ensure we initially reconcile the current state
c.queue.Add(types.NamespacedName{})
c.queue.Run(stop)
}
func (c *ClassController) Reconcile(types.NamespacedName) error {
err := istiomultierror.New()
for class := range builtinClasses {
err = multierror.Append(err, c.reconcileClass(class))
}
return err.ErrorOrNil()
}
func (c *ClassController) reconcileClass(class gateway.ObjectName) error {
if c.classes.Get(string(class), "") != nil {
log.Debugf("GatewayClass/%v already exists, no action", class)
return nil
}
controller := builtinClasses[class]
classInfo, f := classInfos[controller]
if !f {
// Should only happen when ambient is disabled; otherwise builtinClasses and classInfos should be consistent
return nil
}
gc := &gateway.GatewayClass{
ObjectMeta: metav1.ObjectMeta{
Name: string(class),
},
Spec: gateway.GatewayClassSpec{
ControllerName: gateway.GatewayController(classInfo.controller),
Description: &classInfo.description,
},
}
_, err := c.classes.Create(gc)
if err != nil && !kerrors.IsConflict(err) {
return err
} else if err != nil && kerrors.IsConflict(err) {
// This is not really an error, just a race condition
log.Infof("Attempted to create GatewayClass/%v, but it was already created", class)
}
if err != nil {
return err
}
return nil
}
func GetClassStatus(existing *k8s.GatewayClassStatus, gen int64) k8s.GatewayClassStatus {
if existing == nil {
existing = &k8s.GatewayClassStatus{}
}
existing.Conditions = kstatus.UpdateConditionIfChanged(existing.Conditions, metav1.Condition{
Type: string(k8sv1.GatewayClassConditionStatusAccepted),
Status: kstatus.StatusTrue,
ObservedGeneration: gen,
LastTransitionTime: metav1.Now(),
Reason: string(k8sv1.GatewayClassConditionStatusAccepted),
Message: "Handled by Istio controller",
})
return *existing
}
/*
Copyright Istio Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gateway
type gatewayGeneration struct {
inner any
}
func (g *gatewayGeneration) SetObservedGeneration(i int64) {
// Intentionally blank. The observed generation of a gateway
// status type is contained in the individual conditions
// not at the top level, and is the responsibility
// of the condition functions to update.
}
func (g *gatewayGeneration) Unwrap() any {
return g.inner
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
corev1 "k8s.io/api/core/v1"
k8s "sigs.k8s.io/gateway-api/apis/v1alpha2"
"istio.io/istio/pilot/pkg/credentials"
"istio.io/istio/pilot/pkg/model"
creds "istio.io/istio/pilot/pkg/model/credentials"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/util/sets"
)
const (
defaultClassName = "istio"
gatewayAliasForAnnotationKey = "gateway.istio.io/alias-for"
gatewayTLSTerminateModeKey = "gateway.istio.io/tls-terminate-mode"
gatewayNameOverride = "gateway.istio.io/name-override"
gatewaySAOverride = "gateway.istio.io/service-account"
serviceTypeOverride = "networking.istio.io/service-type"
)
// GatewayResources stores all gateway resources used for our conversion.
type GatewayResources struct {
GatewayClass []config.Config
Gateway []config.Config
HTTPRoute []config.Config
GRPCRoute []config.Config
TCPRoute []config.Config
TLSRoute []config.Config
ReferenceGrant []config.Config
ServiceEntry []config.Config
// Namespaces stores all namespace in the cluster, keyed by name
Namespaces map[string]*corev1.Namespace
// Credentials stores all credentials in the cluster
Credentials credentials.Controller
// Domain for the cluster. Typically, cluster.local
Domain string
Context GatewayContext
}
type Grants struct {
AllowAll bool
AllowedNames sets.String
}
type AllowedReferences map[Reference]map[Reference]*Grants
func (refs AllowedReferences) SecretAllowed(resourceName string, namespace string) bool {
p, err := creds.ParseResourceName(resourceName, "", "", "")
if err != nil {
log.Warnf("failed to parse resource name %q: %v", resourceName, err)
return false
}
from := Reference{Kind: gvk.KubernetesGateway, Namespace: k8s.Namespace(namespace)}
to := Reference{Kind: gvk.Secret, Namespace: k8s.Namespace(p.Namespace)}
allow := refs[from][to]
if allow == nil {
return false
}
return allow.AllowAll || allow.AllowedNames.Contains(p.Name)
}
func (refs AllowedReferences) BackendAllowed(
k config.GroupVersionKind,
backendName k8s.ObjectName,
backendNamespace k8s.Namespace,
routeNamespace string,
) bool {
from := Reference{Kind: k, Namespace: k8s.Namespace(routeNamespace)}
to := Reference{Kind: gvk.Service, Namespace: backendNamespace}
allow := refs[from][to]
if allow == nil {
return false
}
return allow.AllowAll || allow.AllowedNames.Contains(string(backendName))
}
// IstioResources stores all outputs of our conversion
type IstioResources struct {
Gateway []config.Config
VirtualService []config.Config
// AllowedReferences stores all allowed references, from Reference -> to Reference(s)
AllowedReferences AllowedReferences
// ReferencedNamespaceKeys stores the label key of all namespace selections. This allows us to quickly
// determine if a namespace update could have impacted any Gateways. See namespaceEvent.
ReferencedNamespaceKeys sets.String
// ResourceReferences stores all resources referenced by gateway-api resources. This allows us to quickly
// determine if a resource update could have impacted any Gateways.
// key: referenced resources(e.g. secrets), value: gateway-api resources(e.g. gateways)
ResourceReferences map[model.ConfigKey][]model.ConfigKey
}
// Reference stores a reference to a namespaced GVK, as used by ReferencePolicy
type Reference struct {
Kind config.GroupVersionKind
Namespace k8s.Namespace
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ingress provides a read-only view of Kubernetes ingress resources
// as an ingress rule configuration type store
package ingress
import (
"errors"
"fmt"
"sort"
"sync"
corev1 "k8s.io/api/core/v1"
knetworking "k8s.io/api/networking/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/model"
kubecontroller "istio.io/istio/pilot/pkg/serviceregistry/kube/controller"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/util/sets"
)
// In 1.0, the Gateway is defined in the namespace where the actual controller runs, and needs to be managed by
// user.
// The gateway is named by appending "-istio-autogenerated-k8s-ingress" to the name of the ingress.
//
// Currently the gateway namespace is hardcoded to istio-system (model.IstioIngressNamespace)
//
// VirtualServices are also auto-generated in the model.IstioIngressNamespace.
//
// The sync of Ingress objects to IP is done by status.go
// the 'ingress service' name is used to get the IP of the Service
// If ingress service is empty, it falls back to NodeExternalIP list, selected using the labels.
// This is using 'namespace' of pilot - but seems to be broken (never worked), since it uses Pilot's pod labels
// instead of the ingress labels.
// Follows mesh.IngressControllerMode setting to enable - OFF|STRICT|DEFAULT.
// STRICT requires "kubernetes.io/ingress.class" == mesh.IngressClass
// DEFAULT allows Ingress without explicit class.
// In 1.1:
// - K8S_INGRESS_NS - namespace of the Gateway that will act as ingress.
// - labels of the gateway set to "app=ingressgateway" for node_port, service set to 'ingressgateway' (matching default install)
// If we need more flexibility - we can add it (but likely we'll deprecate ingress support first)
// -
var schemas = collection.SchemasFor(
collections.VirtualService,
collections.Gateway)
// Control needs RBAC permissions to write to Pods.
type controller struct {
meshWatcher mesh.Holder
domainSuffix string
queue controllers.Queue
virtualServiceHandlers []model.EventHandler
gatewayHandlers []model.EventHandler
mutex sync.RWMutex
// processed ingresses
ingresses map[types.NamespacedName]*knetworking.Ingress
classes kclient.Client[*knetworking.IngressClass]
ingress kclient.Client[*knetworking.Ingress]
services kclient.Client[*corev1.Service]
}
var IngressNamespace = env.Register("K8S_INGRESS_NS", constants.IstioIngressNamespace, "").Get()
var errUnsupportedOp = errors.New("unsupported operation: the ingress config store is a read-only view")
// NewController creates a new Kubernetes controller
func NewController(client kube.Client, meshWatcher mesh.Holder,
options kubecontroller.Options,
) model.ConfigStoreController {
ingress := kclient.NewFiltered[*knetworking.Ingress](client, kclient.Filter{ObjectFilter: options.GetFilter()})
classes := kclient.New[*knetworking.IngressClass](client)
services := kclient.NewFiltered[*corev1.Service](client, kclient.Filter{ObjectFilter: options.GetFilter()})
c := &controller{
meshWatcher: meshWatcher,
domainSuffix: options.DomainSuffix,
ingresses: make(map[types.NamespacedName]*knetworking.Ingress),
ingress: ingress,
classes: classes,
services: services,
}
c.queue = controllers.NewQueue("ingress",
controllers.WithReconciler(c.onEvent),
controllers.WithMaxAttempts(5))
c.ingress.AddEventHandler(controllers.ObjectHandler(c.queue.AddObject))
// We watch service changes to detect service port number change to trigger
// re-convert ingress to new-vs.
c.services.AddEventHandler(controllers.FromEventHandler(func(o controllers.Event) {
c.onServiceEvent(o)
}))
return c
}
func (c *controller) Run(stop <-chan struct{}) {
kube.WaitForCacheSync("ingress", stop, c.ingress.HasSynced, c.services.HasSynced, c.classes.HasSynced)
c.queue.Run(stop)
controllers.ShutdownAll(c.ingress, c.services, c.classes)
}
func (c *controller) shouldProcessIngress(mesh *meshconfig.MeshConfig, i *knetworking.Ingress) bool {
var class *knetworking.IngressClass
if i.Spec.IngressClassName != nil {
c := c.classes.Get(*i.Spec.IngressClassName, "")
if c == nil {
return false
}
class = c
}
return shouldProcessIngressWithClass(mesh, i, class)
}
// shouldProcessIngressUpdate checks whether we should renotify registered handlers about an update event
func (c *controller) shouldProcessIngressUpdate(ing *knetworking.Ingress) bool {
// ingress add/update
shouldProcess := c.shouldProcessIngress(c.meshWatcher.Mesh(), ing)
item := config.NamespacedName(ing)
if shouldProcess {
// record processed ingress
c.mutex.Lock()
c.ingresses[item] = ing
c.mutex.Unlock()
return true
}
c.mutex.Lock()
_, preProcessed := c.ingresses[item]
// previous processed but should not currently, delete it
if preProcessed && !shouldProcess {
delete(c.ingresses, item)
} else {
c.ingresses[item] = ing
}
c.mutex.Unlock()
return preProcessed
}
func (c *controller) onEvent(item types.NamespacedName) error {
event := model.EventUpdate
ing := c.ingress.Get(item.Name, item.Namespace)
if ing == nil {
event = model.EventDelete
c.mutex.Lock()
ing = c.ingresses[item]
delete(c.ingresses, item)
c.mutex.Unlock()
if ing == nil {
// It was a delete and we didn't have an existing known ingress, no action
return nil
}
}
// we should check need process only when event is not delete,
// if it is delete event, and previously processed, we need to process too.
if event != model.EventDelete {
shouldProcess := c.shouldProcessIngressUpdate(ing)
if !shouldProcess {
return nil
}
}
vsmetadata := config.Meta{
Name: item.Name + "-" + "virtualservice",
Namespace: item.Namespace,
GroupVersionKind: gvk.VirtualService,
// Set this label so that we do not compare configs and just push.
Labels: map[string]string{constants.AlwaysPushLabel: "true"},
}
gatewaymetadata := config.Meta{
Name: item.Name + "-" + "gateway",
Namespace: item.Namespace,
GroupVersionKind: gvk.Gateway,
// Set this label so that we do not compare configs and just push.
Labels: map[string]string{constants.AlwaysPushLabel: "true"},
}
// Trigger updates for Gateway and VirtualService
// TODO: we could be smarter here and only trigger when real changes were found
for _, f := range c.virtualServiceHandlers {
f(config.Config{Meta: vsmetadata}, config.Config{Meta: vsmetadata}, event)
}
for _, f := range c.gatewayHandlers {
f(config.Config{Meta: gatewaymetadata}, config.Config{Meta: gatewaymetadata}, event)
}
return nil
}
func (c *controller) onServiceEvent(input any) {
event := input.(controllers.Event)
curSvc := event.Latest().(*corev1.Service)
// This is shortcut. We only care about the port number change if we receive service update event.
if event.Event == controllers.EventUpdate {
oldSvc := event.Old.(*corev1.Service)
oldPorts := extractPorts(oldSvc.Spec.Ports)
curPorts := extractPorts(curSvc.Spec.Ports)
// If the ports don't change, we do nothing.
if oldPorts.Equals(curPorts) {
return
}
}
// We care about add, delete and ports changed event of services that are referred
// by ingress using port name.
namespacedName := config.NamespacedName(curSvc).String()
for _, ingress := range c.ingress.List(curSvc.GetNamespace(), klabels.Everything()) {
referredSvcSet := extractServicesByPortNameType(ingress)
if referredSvcSet.Contains(namespacedName) {
c.queue.AddObject(ingress)
}
}
}
func (c *controller) RegisterEventHandler(kind config.GroupVersionKind, f model.EventHandler) {
switch kind {
case gvk.VirtualService:
c.virtualServiceHandlers = append(c.virtualServiceHandlers, f)
case gvk.Gateway:
c.gatewayHandlers = append(c.gatewayHandlers, f)
}
}
func (c *controller) HasSynced() bool {
return c.queue.HasSynced()
}
func (c *controller) Schemas() collection.Schemas {
// TODO: are these two config descriptors right?
return schemas
}
func (c *controller) Get(typ config.GroupVersionKind, name, namespace string) *config.Config {
return nil
}
// sortIngressByCreationTime sorts the list of config objects in ascending order by their creation time (if available).
func sortIngressByCreationTime(ingr []*knetworking.Ingress) []*knetworking.Ingress {
sort.Slice(ingr, func(i, j int) bool {
// If creation time is the same, then behavior is nondeterministic. In this case, we can
// pick an arbitrary but consistent ordering based on name and namespace, which is unique.
// CreationTimestamp is stored in seconds, so this is not uncommon.
if ingr[i].CreationTimestamp == ingr[j].CreationTimestamp {
in := ingr[i].Name + "." + ingr[i].Namespace
jn := ingr[j].Name + "." + ingr[j].Namespace
return in < jn
}
return ingr[i].CreationTimestamp.Before(&ingr[j].CreationTimestamp)
})
return ingr
}
func (c *controller) List(typ config.GroupVersionKind, namespace string) []config.Config {
if typ != gvk.Gateway &&
typ != gvk.VirtualService {
return nil
}
out := make([]config.Config, 0)
ingressByHost := map[string]*config.Config{}
for _, ingress := range sortIngressByCreationTime(c.ingress.List(namespace, klabels.Everything())) {
process := c.shouldProcessIngress(c.meshWatcher.Mesh(), ingress)
if !process {
continue
}
switch typ {
case gvk.VirtualService:
ConvertIngressVirtualService(*ingress, c.domainSuffix, ingressByHost, c.services)
case gvk.Gateway:
gateways := ConvertIngressV1alpha3(*ingress, c.meshWatcher.Mesh(), c.domainSuffix)
out = append(out, gateways)
}
}
if typ == gvk.VirtualService {
for _, obj := range ingressByHost {
out = append(out, *obj)
}
}
return out
}
// extractServicesByPortNameType extract services that are of port name type in the specified ingress resource.
func extractServicesByPortNameType(ingress *knetworking.Ingress) sets.String {
services := sets.String{}
for _, rule := range ingress.Spec.Rules {
if rule.HTTP == nil {
continue
}
for _, route := range rule.HTTP.Paths {
if route.Backend.Service == nil {
continue
}
if route.Backend.Service.Port.Name != "" {
services.Insert(types.NamespacedName{
Namespace: ingress.GetNamespace(),
Name: route.Backend.Service.Name,
}.String())
}
}
}
return services
}
func extractPorts(ports []corev1.ServicePort) sets.String {
result := sets.String{}
for _, port := range ports {
// the format is port number|port name.
result.Insert(fmt.Sprintf("%d|%s", port.Port, port.Name))
}
return result
}
func (c *controller) Create(_ config.Config) (string, error) {
return "", errUnsupportedOp
}
func (c *controller) Update(_ config.Config) (string, error) {
return "", errUnsupportedOp
}
func (c *controller) UpdateStatus(config.Config) (string, error) {
return "", errUnsupportedOp
}
func (c *controller) Patch(_ config.Config, _ config.PatchFunc) (string, error) {
return "", errUnsupportedOp
}
func (c *controller) Delete(_ config.GroupVersionKind, _, _ string, _ *string) error {
return errUnsupportedOp
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ingress
import (
"errors"
"fmt"
"sort"
"strconv"
"strings"
"github.com/hashicorp/go-multierror"
corev1 "k8s.io/api/core/v1"
knetworking "k8s.io/api/networking/v1"
"istio.io/api/annotation"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/log"
)
const (
IstioIngressController = "istio.io/ingress-controller"
)
var errNotFound = errors.New("item not found")
// EncodeIngressRuleName encodes an ingress rule name for a given ingress resource name,
// as well as the position of the rule and path specified within it, counting from 1.
// ruleNum == pathNum == 0 indicates the default backend specified for an ingress.
func EncodeIngressRuleName(ingressName string, ruleNum, pathNum int) string {
return fmt.Sprintf("%s-%d-%d", ingressName, ruleNum, pathNum)
}
// decodeIngressRuleName decodes an ingress rule name previously encoded with EncodeIngressRuleName.
func decodeIngressRuleName(name string) (ingressName string, ruleNum, pathNum int, err error) {
parts := strings.Split(name, "-")
if len(parts) < 3 {
err = fmt.Errorf("could not decode string into ingress rule name: %s", name)
return
}
ingressName = strings.Join(parts[0:len(parts)-2], "-")
ruleNum, ruleErr := strconv.Atoi(parts[len(parts)-2])
pathNum, pathErr := strconv.Atoi(parts[len(parts)-1])
if pathErr != nil || ruleErr != nil {
err = multierror.Append(
fmt.Errorf("could not decode string into ingress rule name: %s", name),
pathErr, ruleErr)
return
}
return
}
// ConvertIngressV1alpha3 converts from ingress spec to Istio Gateway
func ConvertIngressV1alpha3(ingress knetworking.Ingress, mesh *meshconfig.MeshConfig, domainSuffix string) config.Config {
gateway := &networking.Gateway{}
gateway.Selector = getIngressGatewaySelector(mesh.IngressSelector, mesh.IngressService)
for i, tls := range ingress.Spec.TLS {
if tls.SecretName == "" {
log.Infof("invalid ingress rule %s:%s for hosts %q, no secretName defined", ingress.Namespace, ingress.Name, tls.Hosts)
continue
}
// TODO validation when multiple wildcard tls secrets are given
if len(tls.Hosts) == 0 {
tls.Hosts = []string{"*"}
}
gateway.Servers = append(gateway.Servers, &networking.Server{
Port: &networking.Port{
Number: 443,
Protocol: string(protocol.HTTPS),
Name: fmt.Sprintf("https-443-ingress-%s-%s-%d", ingress.Name, ingress.Namespace, i),
},
Hosts: tls.Hosts,
Tls: &networking.ServerTLSSettings{
HttpsRedirect: false,
Mode: networking.ServerTLSSettings_SIMPLE,
CredentialName: tls.SecretName,
},
})
}
gateway.Servers = append(gateway.Servers, &networking.Server{
Port: &networking.Port{
Number: 80,
Protocol: string(protocol.HTTP),
Name: fmt.Sprintf("http-80-ingress-%s-%s", ingress.Name, ingress.Namespace),
},
Hosts: []string{"*"},
})
gatewayConfig := config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.Gateway,
Name: ingress.Name + "-" + constants.IstioIngressGatewayName + "-" + ingress.Namespace,
Namespace: IngressNamespace,
Domain: domainSuffix,
},
Spec: gateway,
}
return gatewayConfig
}
// ConvertIngressVirtualService converts from ingress spec to Istio VirtualServices
func ConvertIngressVirtualService(ingress knetworking.Ingress, domainSuffix string,
ingressByHost map[string]*config.Config, services kclient.Client[*corev1.Service],
) {
// Ingress allows a single host - if missing '*' is assumed
// We need to merge all rules with a particular host across
// all ingresses, and return a separate VirtualService for each
// host.
for _, rule := range ingress.Spec.Rules {
if rule.HTTP == nil {
log.Infof("invalid ingress rule %s:%s for host %q, no paths defined", ingress.Namespace, ingress.Name, rule.Host)
continue
}
host := rule.Host
namePrefix := strings.Replace(host, ".", "-", -1)
if host == "" {
host = "*"
}
virtualService := &networking.VirtualService{
Hosts: []string{host},
Gateways: []string{fmt.Sprintf("%s/%s-%s-%s", IngressNamespace, ingress.Name, constants.IstioIngressGatewayName, ingress.Namespace)},
}
httpRoutes := make([]*networking.HTTPRoute, 0, len(rule.HTTP.Paths))
for _, httpPath := range rule.HTTP.Paths {
httpMatch := &networking.HTTPMatchRequest{}
if httpPath.PathType != nil {
switch *httpPath.PathType {
case knetworking.PathTypeExact:
httpMatch.Uri = &networking.StringMatch{
MatchType: &networking.StringMatch_Exact{Exact: httpPath.Path},
}
case knetworking.PathTypePrefix:
// Optimize common case of / to not needed regex
httpMatch.Uri = &networking.StringMatch{
MatchType: &networking.StringMatch_Prefix{Prefix: httpPath.Path},
}
default:
// Fallback to the legacy string matching
// If the httpPath.Path is a wildcard path, Uri will be nil
httpMatch.Uri = createFallbackStringMatch(httpPath.Path)
}
} else {
httpMatch.Uri = createFallbackStringMatch(httpPath.Path)
}
httpRoute := ingressBackendToHTTPRoute(&httpPath.Backend, ingress.Namespace, domainSuffix, services)
if httpRoute == nil {
log.Infof("invalid ingress rule %s:%s for host %q, no backend defined for path", ingress.Namespace, ingress.Name, rule.Host)
continue
}
// Only create a match if Uri is not nil. HttpMatchRequest cannot be empty
if httpMatch.Uri != nil {
httpRoute.Match = []*networking.HTTPMatchRequest{httpMatch}
}
httpRoutes = append(httpRoutes, httpRoute)
}
virtualService.Http = httpRoutes
virtualServiceConfig := config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.VirtualService,
Name: namePrefix + "-" + ingress.Name + "-" + constants.IstioIngressGatewayName,
Namespace: ingress.Namespace,
Domain: domainSuffix,
Annotations: map[string]string{constants.InternalRouteSemantics: constants.RouteSemanticsIngress},
},
Spec: virtualService,
}
old, f := ingressByHost[host]
if f {
vs := old.Spec.(*networking.VirtualService)
vs.Http = append(vs.Http, httpRoutes...)
} else {
ingressByHost[host] = &virtualServiceConfig
}
// sort routes to meet ingress route precedence requirements
// see https://kubernetes.io/docs/concepts/services-networking/ingress/#multiple-matches
vs := ingressByHost[host].Spec.(*networking.VirtualService)
sort.SliceStable(vs.Http, func(i, j int) bool {
var r1Len, r2Len int
var r1Ex, r2Ex bool
if vs.Http[i].Match != nil || len(vs.Http[i].Match) != 0 {
r1Len, r1Ex = getMatchURILength(vs.Http[i].Match[0])
}
if vs.Http[j].Match != nil || len(vs.Http[j].Match) != 0 {
r2Len, r2Ex = getMatchURILength(vs.Http[j].Match[0])
}
// TODO: default at the end
if r1Len == r2Len {
return r1Ex && !r2Ex
}
return r1Len > r2Len
})
}
// Matches * and "/". Currently not supported - would conflict
// with any other explicit VirtualService.
if ingress.Spec.DefaultBackend != nil {
log.Infof("Ignore default wildcard ingress, use VirtualService %s:%s",
ingress.Namespace, ingress.Name)
}
}
// getMatchURILength returns the length of matching path, and whether the match type is EXACT
func getMatchURILength(match *networking.HTTPMatchRequest) (length int, exact bool) {
uri := match.GetUri()
switch uri.GetMatchType().(type) {
case *networking.StringMatch_Exact:
return len(uri.GetExact()), true
case *networking.StringMatch_Prefix:
return len(uri.GetPrefix()), false
}
// should not happen
return -1, false
}
func ingressBackendToHTTPRoute(backend *knetworking.IngressBackend, namespace string,
domainSuffix string, services kclient.Client[*corev1.Service],
) *networking.HTTPRoute {
if backend == nil {
return nil
}
port := &networking.PortSelector{}
if backend.Service == nil {
log.Infof("backend service must be specified")
return nil
}
if backend.Service.Port.Number > 0 {
port.Number = uint32(backend.Service.Port.Number)
} else {
resolvedPort, err := resolveNamedPort(backend, namespace, services)
if err != nil {
log.Infof("failed to resolve named port %s, error: %v", backend.Service.Port.Name, err)
return nil
}
port.Number = uint32(resolvedPort)
}
return &networking.HTTPRoute{
Route: []*networking.HTTPRouteDestination{
{
Destination: &networking.Destination{
Host: fmt.Sprintf("%s.%s.svc.%s", backend.Service.Name, namespace, domainSuffix),
Port: port,
},
Weight: 100,
},
},
}
}
func resolveNamedPort(backend *knetworking.IngressBackend, namespace string, services kclient.Client[*corev1.Service]) (int32, error) {
svc := services.Get(backend.Service.Name, namespace)
if svc == nil {
return 0, errNotFound
}
for _, port := range svc.Spec.Ports {
if port.Name == backend.Service.Port.Name {
return port.Port, nil
}
}
return 0, errNotFound
}
// shouldProcessIngress determines whether the given knetworking resource should be processed
// by the controller, based on its knetworking class annotation or, in more recent versions of
// kubernetes (v1.18+), based on the Ingress's specified IngressClass
// See https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class
func shouldProcessIngressWithClass(mesh *meshconfig.MeshConfig, ingress *knetworking.Ingress, ingressClass *knetworking.IngressClass) bool {
if class, exists := ingress.Annotations[annotation.IoKubernetesIngressClass.Name]; exists {
switch mesh.IngressControllerMode {
case meshconfig.MeshConfig_OFF:
return false
case meshconfig.MeshConfig_STRICT:
return class == mesh.IngressClass
case meshconfig.MeshConfig_DEFAULT:
return class == mesh.IngressClass
default:
log.Warnf("invalid ingress synchronization mode: %v", mesh.IngressControllerMode)
return false
}
} else if ingressClass != nil {
return ingressClass.Spec.Controller == IstioIngressController
} else {
switch mesh.IngressControllerMode {
case meshconfig.MeshConfig_OFF:
return false
case meshconfig.MeshConfig_STRICT:
return false
case meshconfig.MeshConfig_DEFAULT:
return true
default:
log.Warnf("invalid ingress synchronization mode: %v", mesh.IngressControllerMode)
return false
}
}
}
func createFallbackStringMatch(s string) *networking.StringMatch {
// If the string is empty or a wildcard, return nil
if s == "" || s == "*" || s == "/*" || s == ".*" {
return nil
}
// Note that this implementation only converts prefix and exact matches, not regexps.
// Replace e.g. "foo.*" with prefix match
if strings.HasSuffix(s, ".*") {
return &networking.StringMatch{
MatchType: &networking.StringMatch_Prefix{Prefix: strings.TrimSuffix(s, ".*")},
}
}
if strings.HasSuffix(s, "/*") {
return &networking.StringMatch{
MatchType: &networking.StringMatch_Prefix{Prefix: strings.TrimSuffix(s, "/*")},
}
}
// Replace e.g. "foo" with a exact match
return &networking.StringMatch{
MatchType: &networking.StringMatch_Exact{Exact: s},
}
}
func getIngressGatewaySelector(ingressSelector, ingressService string) map[string]string {
// Setup the selector for the gateway
if ingressSelector != "" {
// If explicitly defined, use this one
return labels.Instance{constants.IstioLabel: ingressSelector}
} else if ingressService != "istio-ingressgateway" && ingressService != "" {
// Otherwise, we will use the ingress service as the default. It is common for the selector and service
// to be the same, so this removes the need for two configurations
// However, if its istio-ingressgateway we need to use the old values for backwards compatibility
return labels.Instance{constants.IstioLabel: ingressService}
}
// If we have neither an explicitly defined ingressSelector or ingressService then use a selector
// pointing to the ingressgateway from the default installation
return labels.Instance{constants.IstioLabel: constants.IstioIngressLabelValue}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ingress
import (
"fmt"
"sort"
"strings"
corev1 "k8s.io/api/core/v1"
knetworking "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
kubecontroller "istio.io/istio/pilot/pkg/serviceregistry/kube/controller"
istiolabels "istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/mesh"
kubelib "istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/log"
netutil "istio.io/istio/pkg/util/net"
)
var statusLog = log.RegisterScope("ingress status", "")
// StatusSyncer keeps the status IP in each Ingress resource updated
type StatusSyncer struct {
meshConfig mesh.Watcher
queue controllers.Queue
ingresses kclient.Client[*knetworking.Ingress]
ingressClasses kclient.Client[*knetworking.IngressClass]
pods kclient.Client[*corev1.Pod]
services kclient.Client[*corev1.Service]
nodes kclient.Client[*corev1.Node]
}
// Run the syncer until stopCh is closed
func (s *StatusSyncer) Run(stopCh <-chan struct{}) {
s.queue.Run(stopCh)
controllers.ShutdownAll(s.services, s.nodes, s.pods, s.ingressClasses, s.ingresses)
}
// NewStatusSyncer creates a new instance
func NewStatusSyncer(meshHolder mesh.Watcher, kc kubelib.Client, options kubecontroller.Options) *StatusSyncer {
c := &StatusSyncer{
meshConfig: meshHolder,
ingresses: kclient.NewFiltered[*knetworking.Ingress](kc, kclient.Filter{ObjectFilter: options.GetFilter()}),
ingressClasses: kclient.New[*knetworking.IngressClass](kc),
pods: kclient.NewFiltered[*corev1.Pod](kc, kclient.Filter{
ObjectFilter: options.GetFilter(),
ObjectTransform: kubelib.StripPodUnusedFields,
}),
services: kclient.NewFiltered[*corev1.Service](kc, kclient.Filter{ObjectFilter: options.GetFilter()}),
nodes: kclient.NewFiltered[*corev1.Node](kc, kclient.Filter{
ObjectTransform: kubelib.StripNodeUnusedFields,
}),
}
c.queue = controllers.NewQueue("ingress status",
controllers.WithReconciler(c.Reconcile),
controllers.WithMaxAttempts(5))
// For any ingress change, enqueue it - we may need to update the status.
c.ingresses.AddEventHandler(controllers.ObjectHandler(c.queue.AddObject))
// For any class change, sync all ingress; the handler will filter non-matching ones already
c.ingressClasses.AddEventHandler(controllers.ObjectHandler(func(o controllers.Object) {
// Just sync them all
c.enqueueAll()
}))
// For services, we queue all Ingress if its the ingress service
c.services.AddEventHandler(controllers.ObjectHandler(func(o controllers.Object) {
if o.GetName() == c.meshConfig.Mesh().IngressService && o.GetNamespace() == IngressNamespace {
c.enqueueAll()
}
}))
// For pods, we enqueue all Ingress if its part of the ingress service
c.pods.AddEventHandler(controllers.ObjectHandler(func(o controllers.Object) {
if c.meshConfig.Mesh().IngressService != "" {
// Ingress Service takes precedence
return
}
ingressSelector := c.meshConfig.Mesh().IngressSelector
// get all pods acting as ingress gateways
igSelector := getIngressGatewaySelector(ingressSelector, "")
if istiolabels.Instance(igSelector).SubsetOf(o.GetLabels()) {
// Ingress selector matches this pod, enqueue everything
c.enqueueAll()
}
}))
// Mesh may have changed ingress fields, enqueue everything
c.meshConfig.AddMeshHandler(c.enqueueAll)
return c
}
// runningAddresses returns a list of IP addresses and/or FQDN in the namespace
// where the ingress controller is currently running
func (s *StatusSyncer) runningAddresses() []string {
addrs := make([]string, 0)
ingressService := s.meshConfig.Mesh().IngressService
ingressSelector := s.meshConfig.Mesh().IngressSelector
if ingressService != "" {
svc := s.services.Get(ingressService, IngressNamespace)
if svc == nil {
return nil
}
if svc.Spec.Type == corev1.ServiceTypeExternalName {
addrs = append(addrs, svc.Spec.ExternalName)
return addrs
}
for _, ip := range svc.Status.LoadBalancer.Ingress {
if ip.IP == "" {
addrs = append(addrs, ip.Hostname)
} else {
addrs = append(addrs, ip.IP)
}
}
addrs = append(addrs, svc.Spec.ExternalIPs...)
return addrs
}
// get all pods acting as ingress gateways
igSelector := getIngressGatewaySelector(ingressSelector, ingressService)
igPods := s.pods.List(IngressNamespace, labels.SelectorFromSet(igSelector))
for _, pod := range igPods {
// only Running pods are valid
if pod.Status.Phase != corev1.PodRunning {
continue
}
// Find node external IP
node := s.nodes.Get(pod.Spec.NodeName, "")
if node == nil {
continue
}
for _, address := range node.Status.Addresses {
if address.Type == corev1.NodeExternalIP {
if address.Address != "" && !addressInSlice(address.Address, addrs) {
addrs = append(addrs, address.Address)
}
}
}
}
return addrs
}
func addressInSlice(addr string, list []string) bool {
for _, v := range list {
if v == addr {
return true
}
}
return false
}
// sliceToStatus converts a slice of IP and/or hostnames to LoadBalancerIngress
func sliceToStatus(endpoints []string) []knetworking.IngressLoadBalancerIngress {
lbi := make([]knetworking.IngressLoadBalancerIngress, 0, len(endpoints))
for _, ep := range endpoints {
if !netutil.IsValidIPAddress(ep) {
lbi = append(lbi, knetworking.IngressLoadBalancerIngress{Hostname: ep})
} else {
lbi = append(lbi, knetworking.IngressLoadBalancerIngress{IP: ep})
}
}
sort.SliceStable(lbi, lessLoadBalancerIngress(lbi))
return lbi
}
func lessLoadBalancerIngress(addrs []knetworking.IngressLoadBalancerIngress) func(int, int) bool {
return func(a, b int) bool {
switch strings.Compare(addrs[a].Hostname, addrs[b].Hostname) {
case -1:
return true
case 1:
return false
}
return addrs[a].IP < addrs[b].IP
}
}
func ingressSliceEqual(lhs, rhs []knetworking.IngressLoadBalancerIngress) bool {
if len(lhs) != len(rhs) {
return false
}
for i := range lhs {
if lhs[i].IP != rhs[i].IP {
return false
}
if lhs[i].Hostname != rhs[i].Hostname {
return false
}
}
return true
}
// shouldTargetIngress determines whether the status watcher should target a given ingress resource
func (s *StatusSyncer) shouldTargetIngress(ingress *knetworking.Ingress) bool {
var ingressClass *knetworking.IngressClass
if ingress.Spec.IngressClassName != nil {
ingressClass = s.ingressClasses.Get(*ingress.Spec.IngressClassName, "")
}
return shouldProcessIngressWithClass(s.meshConfig.Mesh(), ingress, ingressClass)
}
func (s *StatusSyncer) enqueueAll() {
for _, ing := range s.ingresses.List(metav1.NamespaceAll, labels.Everything()) {
s.queue.AddObject(ing)
}
}
func (s *StatusSyncer) Reconcile(key types.NamespacedName) error {
log := statusLog.WithLabels("ingress", key)
ing := s.ingresses.Get(key.Name, key.Namespace)
if ing == nil {
log.Debugf("ingress removed, no action")
return nil
}
shouldTarget := s.shouldTargetIngress(ing)
if !shouldTarget {
log.Debugf("ingress not selected, no action")
return nil
}
curIPs := ing.Status.LoadBalancer.Ingress
sort.SliceStable(curIPs, lessLoadBalancerIngress(curIPs))
wantIPs := sliceToStatus(s.runningAddresses())
if ingressSliceEqual(wantIPs, curIPs) {
log.Debugf("ingress has no change, no action")
return nil
}
log.Infof("updating IPs (%v)", wantIPs)
ing = ing.DeepCopy()
ing.Status.LoadBalancer.Ingress = wantIPs
_, err := s.ingresses.UpdateStatus(ing)
if err != nil {
return fmt.Errorf("error updating ingress status: %v", err)
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memory
import (
"fmt"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/slices"
)
// Controller is an implementation of ConfigStoreController.
type Controller struct {
monitor Monitor
configStore model.ConfigStore
hasSynced func() bool
// If meshConfig.DiscoverySelectors are specified, the namespacesFilter tracks the namespaces this controller watches.
namespacesFilter func(obj interface{}) bool
}
// NewController return an implementation of ConfigStoreController
// This is a client-side monitor that dispatches events as the changes are being
// made on the client.
func NewController(cs model.ConfigStore) *Controller {
out := &Controller{
configStore: cs,
monitor: NewMonitor(cs),
}
return out
}
// NewSyncController return an implementation of model.ConfigStoreController which processes events synchronously
func NewSyncController(cs model.ConfigStore) *Controller {
out := &Controller{
configStore: cs,
monitor: NewSyncMonitor(cs),
}
return out
}
func (c *Controller) RegisterHasSyncedHandler(cb func() bool) {
c.hasSynced = cb
}
func (c *Controller) RegisterEventHandler(kind config.GroupVersionKind, f model.EventHandler) {
c.monitor.AppendEventHandler(kind, f)
}
// HasSynced return whether store has synced
// It can be controlled externally (such as by the data source),
// otherwise it'll always consider synced.
func (c *Controller) HasSynced() bool {
if c.hasSynced != nil {
return c.hasSynced()
}
return true
}
func (c *Controller) Run(stop <-chan struct{}) {
c.monitor.Run(stop)
}
func (c *Controller) Schemas() collection.Schemas {
return c.configStore.Schemas()
}
func (c *Controller) Get(kind config.GroupVersionKind, key, namespace string) *config.Config {
if c.namespacesFilter != nil && !c.namespacesFilter(namespace) {
return nil
}
return c.configStore.Get(kind, key, namespace)
}
func (c *Controller) Create(config config.Config) (revision string, err error) {
if revision, err = c.configStore.Create(config); err == nil {
c.monitor.ScheduleProcessEvent(ConfigEvent{
config: config,
event: model.EventAdd,
})
}
return
}
func (c *Controller) Update(config config.Config) (newRevision string, err error) {
oldconfig := c.configStore.Get(config.GroupVersionKind, config.Name, config.Namespace)
if newRevision, err = c.configStore.Update(config); err == nil {
c.monitor.ScheduleProcessEvent(ConfigEvent{
old: *oldconfig,
config: config,
event: model.EventUpdate,
})
}
return
}
func (c *Controller) UpdateStatus(config config.Config) (newRevision string, err error) {
oldconfig := c.configStore.Get(config.GroupVersionKind, config.Name, config.Namespace)
if newRevision, err = c.configStore.UpdateStatus(config); err == nil {
c.monitor.ScheduleProcessEvent(ConfigEvent{
old: *oldconfig,
config: config,
event: model.EventUpdate,
})
}
return
}
func (c *Controller) Patch(orig config.Config, patchFn config.PatchFunc) (newRevision string, err error) {
cfg, typ := patchFn(orig.DeepCopy())
switch typ {
case types.MergePatchType:
case types.JSONPatchType:
default:
return "", fmt.Errorf("unsupported merge type: %s", typ)
}
if newRevision, err = c.configStore.Patch(cfg, patchFn); err == nil {
c.monitor.ScheduleProcessEvent(ConfigEvent{
old: orig,
config: cfg,
event: model.EventUpdate,
})
}
return
}
func (c *Controller) Delete(kind config.GroupVersionKind, key, namespace string, resourceVersion *string) error {
if config := c.Get(kind, key, namespace); config != nil {
if err := c.configStore.Delete(kind, key, namespace, resourceVersion); err != nil {
return err
}
c.monitor.ScheduleProcessEvent(ConfigEvent{
config: *config,
event: model.EventDelete,
})
return nil
}
return fmt.Errorf("delete: config %v/%v/%v does not exist", kind, namespace, key)
}
func (c *Controller) List(kind config.GroupVersionKind, namespace string) []config.Config {
configs := c.configStore.List(kind, namespace)
if c.namespacesFilter != nil {
return slices.Filter(configs, func(config config.Config) bool {
return c.namespacesFilter(config)
})
}
return configs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memory
import (
"istio.io/istio/pilot/pkg/model"
config2 "istio.io/istio/pkg/config"
"istio.io/istio/pkg/log"
)
const (
// BufferSize specifies the buffer size of event channel
BufferSize = 100
)
// Handler specifies a function to apply on a Config for a given event type
type Handler func(config2.Config, config2.Config, model.Event)
// Monitor provides methods of manipulating changes in the config store
type Monitor interface {
Run(<-chan struct{})
AppendEventHandler(config2.GroupVersionKind, Handler)
ScheduleProcessEvent(ConfigEvent)
}
// ConfigEvent defines the event to be processed
type ConfigEvent struct {
config config2.Config
old config2.Config
event model.Event
}
type configStoreMonitor struct {
store model.ConfigStore
handlers map[config2.GroupVersionKind][]Handler
eventCh chan ConfigEvent
// If enabled, events will be handled synchronously
sync bool
}
// NewMonitor returns new Monitor implementation with a default event buffer size.
func NewMonitor(store model.ConfigStore) Monitor {
return newBufferedMonitor(store, BufferSize, false)
}
// NewMonitor returns new Monitor implementation which will process events synchronously
func NewSyncMonitor(store model.ConfigStore) Monitor {
return newBufferedMonitor(store, BufferSize, true)
}
// NewBufferedMonitor returns new Monitor implementation with the specified event buffer size
func newBufferedMonitor(store model.ConfigStore, bufferSize int, sync bool) Monitor {
handlers := make(map[config2.GroupVersionKind][]Handler)
for _, s := range store.Schemas().All() {
handlers[s.GroupVersionKind()] = make([]Handler, 0)
}
return &configStoreMonitor{
store: store,
handlers: handlers,
eventCh: make(chan ConfigEvent, bufferSize),
sync: sync,
}
}
func (m *configStoreMonitor) ScheduleProcessEvent(configEvent ConfigEvent) {
if m.sync {
m.processConfigEvent(configEvent)
} else {
m.eventCh <- configEvent
}
}
func (m *configStoreMonitor) Run(stop <-chan struct{}) {
if m.sync {
<-stop
return
}
for {
select {
case <-stop:
return
case ce, ok := <-m.eventCh:
if ok {
m.processConfigEvent(ce)
}
}
}
}
func (m *configStoreMonitor) processConfigEvent(ce ConfigEvent) {
if _, exists := m.handlers[ce.config.GroupVersionKind]; !exists {
log.Warnf("Config GroupVersionKind %s does not exist in config store", ce.config.GroupVersionKind)
return
}
m.applyHandlers(ce.old, ce.config, ce.event)
}
func (m *configStoreMonitor) AppendEventHandler(typ config2.GroupVersionKind, h Handler) {
m.handlers[typ] = append(m.handlers[typ], h)
}
func (m *configStoreMonitor) applyHandlers(old config2.Config, config config2.Config, e model.Event) {
for _, f := range m.handlers[config.GroupVersionKind] {
f(old, config, e)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package memory provides an in-memory volatile config store implementation
package memory
import (
"errors"
"fmt"
"sync"
"time"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collection"
)
var (
errNotFound = errors.New("item not found")
errAlreadyExists = errors.New("item already exists")
// TODO: can we make this compatible with kerror.IsConflict without imports the library?
errConflict = errors.New("conflicting resource version, try again")
)
const ResourceVersion string = "ResourceVersion"
// Make creates an in-memory config store from a config schemas
// It is with validation
func Make(schemas collection.Schemas) model.ConfigStore {
return newStore(schemas, false)
}
// MakeSkipValidation creates an in-memory config store from a config schemas
// It is without validation
func MakeSkipValidation(schemas collection.Schemas) model.ConfigStore {
return newStore(schemas, true)
}
func newStore(schemas collection.Schemas, skipValidation bool) model.ConfigStore {
out := store{
schemas: schemas,
data: make(map[config.GroupVersionKind]map[string]map[string]any),
skipValidation: skipValidation,
}
for _, s := range schemas.All() {
out.data[s.GroupVersionKind()] = make(map[string]map[string]any)
}
return &out
}
type store struct {
schemas collection.Schemas
data map[config.GroupVersionKind]map[string]map[string]any
skipValidation bool
mutex sync.RWMutex
}
func (cr *store) Schemas() collection.Schemas {
return cr.schemas
}
func (cr *store) Get(kind config.GroupVersionKind, name, namespace string) *config.Config {
cr.mutex.RLock()
defer cr.mutex.RUnlock()
_, ok := cr.data[kind]
if !ok {
return nil
}
ns, exists := cr.data[kind][namespace]
if !exists {
return nil
}
out, exists := ns[name]
if !exists {
return nil
}
config := out.(config.Config)
return &config
}
func (cr *store) List(kind config.GroupVersionKind, namespace string) []config.Config {
cr.mutex.RLock()
defer cr.mutex.RUnlock()
data, exists := cr.data[kind]
if !exists {
return nil
}
var size int
if namespace == "" {
for _, ns := range data {
size += len(ns)
}
} else {
size = len(data[namespace])
}
out := make([]config.Config, 0, size)
if namespace == "" {
for _, ns := range data {
for _, value := range ns {
out = append(out, value.(config.Config))
}
}
} else {
ns, exists := data[namespace]
if !exists {
return nil
}
for _, value := range ns {
out = append(out, value.(config.Config))
}
}
return out
}
func (cr *store) Delete(kind config.GroupVersionKind, name, namespace string, resourceVersion *string) error {
cr.mutex.Lock()
defer cr.mutex.Unlock()
data, ok := cr.data[kind]
if !ok {
return fmt.Errorf("unknown type %v", kind)
}
ns, exists := data[namespace]
if !exists {
return errNotFound
}
_, exists = ns[name]
if !exists {
return errNotFound
}
delete(ns, name)
return nil
}
func (cr *store) Create(cfg config.Config) (string, error) {
cr.mutex.Lock()
defer cr.mutex.Unlock()
kind := cfg.GroupVersionKind
s, ok := cr.schemas.FindByGroupVersionKind(kind)
if !ok {
return "", fmt.Errorf("unknown type %v", kind)
}
if !cr.skipValidation {
if _, err := s.ValidateConfig(cfg); err != nil {
return "", err
}
}
ns, exists := cr.data[kind][cfg.Namespace]
if !exists {
ns = map[string]any{}
cr.data[kind][cfg.Namespace] = ns
}
_, exists = ns[cfg.Name]
if !exists {
tnow := time.Now()
if cfg.ResourceVersion == "" {
cfg.ResourceVersion = tnow.String()
}
// Set the creation timestamp, if not provided.
if cfg.CreationTimestamp.IsZero() {
cfg.CreationTimestamp = tnow
}
ns[cfg.Name] = cfg
return cfg.ResourceVersion, nil
}
return "", errAlreadyExists
}
func (cr *store) Update(cfg config.Config) (string, error) {
cr.mutex.Lock()
defer cr.mutex.Unlock()
kind := cfg.GroupVersionKind
s, ok := cr.schemas.FindByGroupVersionKind(kind)
if !ok {
return "", fmt.Errorf("unknown type %v", kind)
}
if !cr.skipValidation {
if _, err := s.ValidateConfig(cfg); err != nil {
return "", err
}
}
ns, exists := cr.data[kind][cfg.Namespace]
if !exists {
return "", errNotFound
}
existing, exists := ns[cfg.Name]
if !exists {
return "", errNotFound
}
if hasConflict(existing.(config.Config), cfg) {
return "", errConflict
}
if cfg.Annotations != nil && cfg.Annotations[ResourceVersion] != "" {
cfg.ResourceVersion = cfg.Annotations[ResourceVersion]
delete(cfg.Annotations, ResourceVersion)
} else {
cfg.ResourceVersion = time.Now().String()
}
ns[cfg.Name] = cfg
return cfg.ResourceVersion, nil
}
func (cr *store) UpdateStatus(cfg config.Config) (string, error) {
return cr.Update(cfg)
}
func (cr *store) Patch(orig config.Config, patchFn config.PatchFunc) (string, error) {
cr.mutex.Lock()
defer cr.mutex.Unlock()
gvk := orig.GroupVersionKind
s, ok := cr.schemas.FindByGroupVersionKind(gvk)
if !ok {
return "", fmt.Errorf("unknown type %v", gvk)
}
cfg, _ := patchFn(orig)
if !cr.skipValidation {
if _, err := s.ValidateConfig(cfg); err != nil {
return "", err
}
}
_, ok = cr.data[gvk]
if !ok {
return "", errNotFound
}
ns, exists := cr.data[gvk][orig.Namespace]
if !exists {
return "", errNotFound
}
rev := time.Now().String()
cfg.ResourceVersion = rev
ns[cfg.Name] = cfg
return rev, nil
}
// hasConflict checks if the two resources have a conflict, which will block Update calls
func hasConflict(existing, replacement config.Config) bool {
if replacement.ResourceVersion == "" {
// We don't care about resource version, so just always overwrite
return false
}
// We set a resource version but its not matched, it is a conflict
if replacement.ResourceVersion != existing.ResourceVersion {
return true
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package monitor
import (
"os"
"path/filepath"
"sort"
"istio.io/istio/pilot/pkg/config/kube/crd"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/collections"
)
var supportedExtensions = map[string]bool{
".yaml": true,
".yml": true,
}
// FileSnapshot holds a reference to a file directory that contains crd
// config and filter criteria for which of those configs will be parsed.
type FileSnapshot struct {
root string
domainSuffix string
configTypeFilter map[config.GroupVersionKind]bool
}
// NewFileSnapshot returns a snapshotter.
// If no types are provided in the descriptor, all Istio types will be allowed.
func NewFileSnapshot(root string, schemas collection.Schemas, domainSuffix string) *FileSnapshot {
snapshot := &FileSnapshot{
root: root,
domainSuffix: domainSuffix,
configTypeFilter: make(map[config.GroupVersionKind]bool),
}
ss := schemas.All()
if len(ss) == 0 {
ss = collections.Pilot.All()
}
for _, k := range ss {
if _, ok := collections.Pilot.FindByGroupVersionKind(k.GroupVersionKind()); ok {
snapshot.configTypeFilter[k.GroupVersionKind()] = true
}
}
return snapshot
}
// ReadConfigFiles parses files in the root directory and returns a sorted slice of
// eligible model.Config. This can be used as a configFunc when creating a Monitor.
func (f *FileSnapshot) ReadConfigFiles() ([]*config.Config, error) {
var result []*config.Config
err := filepath.Walk(f.root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
} else if !supportedExtensions[filepath.Ext(path)] || (info.Mode()&os.ModeType) != 0 {
return nil
}
data, err := os.ReadFile(path)
if err != nil {
log.Warnf("Failed to read %s: %v", path, err)
return err
}
configs, err := parseInputs(data, f.domainSuffix)
if err != nil {
log.Warnf("Failed to parse %s: %v", path, err)
return err
}
// Filter any unsupported types before appending to the result.
for _, cfg := range configs {
if !f.configTypeFilter[cfg.GroupVersionKind] {
continue
}
result = append(result, cfg)
}
return nil
})
if err != nil {
log.Warnf("failure during filepath.Walk: %v", err)
}
// Sort by the config IDs.
sort.Sort(byKey(result))
return result, err
}
// parseInputs is identical to crd.ParseInputs, except that it returns an array of config pointers.
func parseInputs(data []byte, domainSuffix string) ([]*config.Config, error) {
configs, _, err := crd.ParseInputs(string(data))
// Convert to an array of pointers.
refs := make([]*config.Config, len(configs))
for i := range configs {
refs[i] = &configs[i]
refs[i].Domain = domainSuffix
}
return refs, err
}
// byKey is an array of config objects that is capable or sorting by Namespace, GroupVersionKind, and Name.
type byKey []*config.Config
func (rs byKey) Len() int {
return len(rs)
}
func (rs byKey) Swap(i, j int) {
rs[i], rs[j] = rs[j], rs[i]
}
func (rs byKey) Less(i, j int) bool {
return compareIds(rs[i], rs[j]) < 0
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package monitor
import (
"os"
"path/filepath"
"reflect"
"strings"
"time"
"github.com/fsnotify/fsnotify"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
istiolog "istio.io/istio/pkg/log"
)
// Monitor will poll a config function in order to update a ConfigStore as
// changes are found.
type Monitor struct {
name string
root string
store model.ConfigStore
configs []*config.Config
getSnapshotFunc func() ([]*config.Config, error)
// channel to trigger updates on
// generally set to a file watch, but used in tests as well
updateCh chan struct{}
}
var log = istiolog.RegisterScope("monitor", "file configuration monitor")
// NewMonitor creates a Monitor and will delegate to a passed in controller.
// The controller holds a reference to the actual store.
// Any func that returns a []*model.Config can be used with the Monitor
func NewMonitor(name string, delegateStore model.ConfigStore, getSnapshotFunc func() ([]*config.Config, error), root string) *Monitor {
monitor := &Monitor{
name: name,
root: root,
store: delegateStore,
getSnapshotFunc: getSnapshotFunc,
}
return monitor
}
const watchDebounceDelay = 50 * time.Millisecond
// Trigger notifications when a file is mutated
func fileTrigger(path string, ch chan struct{}, stop <-chan struct{}) error {
if path == "" {
return nil
}
fs, err := fsnotify.NewWatcher()
if err != nil {
return err
}
watcher := recursiveWatcher{fs}
if err = watcher.watchRecursive(path); err != nil {
return err
}
go func() {
defer watcher.Close()
var debounceC <-chan time.Time
for {
select {
case <-debounceC:
debounceC = nil
ch <- struct{}{}
case e := <-watcher.Events:
s, err := os.Stat(e.Name)
if err == nil && s != nil && s.IsDir() {
// If it's a directory, add a watch for it so we see nested files.
if e.Op&fsnotify.Create != 0 {
log.Debugf("add watch for %v: %v", s.Name(), watcher.watchRecursive(e.Name))
}
}
// Can't stat a deleted directory, so attempt to remove it. If it fails it is not a problem
if e.Op&fsnotify.Remove != 0 {
_ = watcher.Remove(e.Name)
}
if debounceC == nil {
debounceC = time.After(watchDebounceDelay)
}
case err := <-watcher.Errors:
log.Warnf("Error watching file trigger: %v %v", path, err)
return
case signal := <-stop:
log.Infof("Shutting down file watcher: %v %v", path, signal)
return
}
}
}()
return nil
}
// recursiveWatcher wraps a fsnotify wrapper to add a best-effort recursive directory watching in user
// space. See https://github.com/fsnotify/fsnotify/issues/18. The implementation is inherently racy,
// as files added to a directory immediately after creation may not trigger events; as such it is only useful
// when an event causes a full reconciliation, rather than acting on an individual event
type recursiveWatcher struct {
*fsnotify.Watcher
}
// watchRecursive adds all directories under the given one to the watch list.
func (m recursiveWatcher) watchRecursive(path string) error {
err := filepath.Walk(path, func(walkPath string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if fi.IsDir() {
if err = m.Watcher.Add(walkPath); err != nil {
return err
}
}
return nil
})
return err
}
// Start starts a new Monitor. Immediately checks the Monitor getSnapshotFunc
// and updates the controller. It then kicks off an asynchronous event loop that
// periodically polls the getSnapshotFunc for changes until a close event is sent.
func (m *Monitor) Start(stop <-chan struct{}) {
m.checkAndUpdate()
c := make(chan struct{}, 1)
m.updateCh = c
if err := fileTrigger(m.root, m.updateCh, stop); err != nil {
log.Errorf("Unable to setup FileTrigger for %s: %v", m.root, err)
}
// Run the close loop asynchronously.
go func() {
for {
select {
case <-c:
log.Infof("Triggering reload of file configuration")
m.checkAndUpdate()
case <-stop:
return
}
}
}()
}
func (m *Monitor) checkAndUpdate() {
newConfigs, err := m.getSnapshotFunc()
// If an error exists then log it and return to running the check and update
// Do not edit the local []*model.config until the connection has been reestablished
// The error will only come from a directory read error or a gRPC connection error
if err != nil {
log.Warnf("checkAndUpdate Error Caught %s: %v\n", m.name, err)
return
}
// make a deep copy of newConfigs to prevent data race
copyConfigs := make([]*config.Config, 0)
for _, config := range newConfigs {
cpy := config.DeepCopy()
copyConfigs = append(copyConfigs, &cpy)
}
// Compare the new list to the previous one and detect changes.
oldLen := len(m.configs)
newLen := len(newConfigs)
oldIndex, newIndex := 0, 0
for oldIndex < oldLen && newIndex < newLen {
oldConfig := m.configs[oldIndex]
newConfig := newConfigs[newIndex]
if v := compareIds(oldConfig, newConfig); v < 0 {
m.deleteConfig(oldConfig)
oldIndex++
} else if v > 0 {
m.createConfig(newConfig)
newIndex++
} else {
// version may change without content changing
oldConfig.Meta.ResourceVersion = newConfig.Meta.ResourceVersion
if !reflect.DeepEqual(oldConfig, newConfig) {
m.updateConfig(newConfig)
}
oldIndex++
newIndex++
}
}
// Detect remaining deletions
for ; oldIndex < oldLen; oldIndex++ {
m.deleteConfig(m.configs[oldIndex])
}
// Detect remaining additions
for ; newIndex < newLen; newIndex++ {
m.createConfig(newConfigs[newIndex])
}
// Save the updated list.
m.configs = copyConfigs
}
func (m *Monitor) createConfig(c *config.Config) {
if _, err := m.store.Create(*c); err != nil {
log.Warnf("Failed to create config %s %s/%s: %v (%+v)", c.GroupVersionKind, c.Namespace, c.Name, err, *c)
}
}
func (m *Monitor) updateConfig(c *config.Config) {
// Set the resource version and create timestamp based on the existing config.
if prev := m.store.Get(c.GroupVersionKind, c.Name, c.Namespace); prev != nil {
c.ResourceVersion = prev.ResourceVersion
c.CreationTimestamp = prev.CreationTimestamp
}
if _, err := m.store.Update(*c); err != nil {
log.Warnf("Failed to update config (%+v): %v ", *c, err)
}
}
func (m *Monitor) deleteConfig(c *config.Config) {
if err := m.store.Delete(c.GroupVersionKind, c.Name, c.Namespace, nil); err != nil {
log.Warnf("Failed to delete config (%+v): %v ", *c, err)
}
}
// compareIds compares the IDs (i.e. Namespace, GroupVersionKind, and Name) of the two configs and returns
// 0 if a == b, -1 if a < b, and 1 if a > b. Used for sorting config arrays.
func compareIds(a, b *config.Config) int {
return strings.Compare(a.Key(), b.Key())
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"fmt"
"sync"
"istio.io/istio/pilot/pkg/credentials"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/kube/multicluster"
"istio.io/istio/pkg/log"
)
// Multicluster structure holds the remote kube Controllers and multicluster specific attributes.
type Multicluster struct {
remoteKubeControllers map[cluster.ID]*CredentialsController
m sync.Mutex // protects remoteKubeControllers
configCluster cluster.ID
secretHandlers []func(name string, namespace string)
}
var _ credentials.MulticlusterController = &Multicluster{}
func NewMulticluster(configCluster cluster.ID) *Multicluster {
m := &Multicluster{
remoteKubeControllers: map[cluster.ID]*CredentialsController{},
configCluster: configCluster,
}
return m
}
func (m *Multicluster) ClusterAdded(cluster *multicluster.Cluster, _ <-chan struct{}) {
log.Infof("initializing Kubernetes credential reader for cluster %v", cluster.ID)
sc := NewCredentialsController(cluster.Client)
m.m.Lock()
defer m.m.Unlock()
m.addCluster(cluster, sc)
}
func (m *Multicluster) ClusterUpdated(cluster *multicluster.Cluster, _ <-chan struct{}) {
sc := NewCredentialsController(cluster.Client)
m.m.Lock()
defer m.m.Unlock()
m.deleteCluster(cluster.ID)
m.addCluster(cluster, sc)
}
func (m *Multicluster) ClusterDeleted(key cluster.ID) {
m.m.Lock()
defer m.m.Unlock()
delete(m.remoteKubeControllers, key)
}
func (m *Multicluster) addCluster(cluster *multicluster.Cluster, sc *CredentialsController) {
m.remoteKubeControllers[cluster.ID] = sc
for _, onCredential := range m.secretHandlers {
sc.AddEventHandler(onCredential)
}
}
func (m *Multicluster) deleteCluster(key cluster.ID) {
delete(m.remoteKubeControllers, key)
}
func (m *Multicluster) ForCluster(clusterID cluster.ID) (credentials.Controller, error) {
m.m.Lock()
defer m.m.Unlock()
if _, f := m.remoteKubeControllers[clusterID]; !f {
return nil, fmt.Errorf("cluster %v is not configured", clusterID)
}
agg := &AggregateController{}
agg.controllers = []*CredentialsController{}
agg.authController = m.remoteKubeControllers[clusterID]
if clusterID != m.configCluster {
// If the request cluster is not the config cluster, we will append it and use it for auth
// This means we will prioritize the proxy cluster, then the config cluster for credential lookup
// Authorization will always use the proxy cluster.
agg.controllers = append(agg.controllers, m.remoteKubeControllers[clusterID])
}
agg.controllers = append(agg.controllers, m.remoteKubeControllers[m.configCluster])
return agg, nil
}
func (m *Multicluster) AddSecretHandler(h func(name string, namespace string)) {
m.secretHandlers = append(m.secretHandlers, h)
m.m.Lock()
defer m.m.Unlock()
for _, c := range m.remoteKubeControllers {
c.AddEventHandler(h)
}
}
type AggregateController struct {
// controllers to use to look up certs. Generally this will consistent of the primary (config) cluster
// and a single remote cluster where the proxy resides
controllers []*CredentialsController
authController *CredentialsController
}
var _ credentials.Controller = &AggregateController{}
func (a *AggregateController) GetCertInfo(name, namespace string) (certInfo *credentials.CertInfo, err error) {
// Search through all clusters, find first non-empty result
var firstError error
for _, c := range a.controllers {
certInfo, err := c.GetCertInfo(name, namespace)
if err != nil {
if firstError == nil {
firstError = err
}
} else {
return certInfo, nil
}
}
return nil, firstError
}
func (a *AggregateController) GetCaCert(name, namespace string) (certInfo *credentials.CertInfo, err error) {
// Search through all clusters, find first non-empty result
var firstError error
for _, c := range a.controllers {
k, err := c.GetCaCert(name, namespace)
if err != nil {
if firstError == nil {
firstError = err
}
} else {
return k, nil
}
}
return nil, firstError
}
func (a *AggregateController) Authorize(serviceAccount, namespace string) error {
return a.authController.Authorize(serviceAccount, namespace)
}
func (a *AggregateController) AddEventHandler(f func(name string, namespace string)) {
// no ops
}
func (a *AggregateController) GetDockerCredential(name, namespace string) ([]byte, error) {
// Search through all clusters, find first non-empty result
var firstError error
for _, c := range a.controllers {
k, err := c.GetDockerCredential(name, namespace)
if err != nil {
if firstError == nil {
firstError = err
}
} else {
return k, nil
}
}
return nil, firstError
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"time"
authorizationv1 "k8s.io/api/authorization/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
sa "k8s.io/apiserver/pkg/authentication/serviceaccount"
authorizationv1client "k8s.io/client-go/kubernetes/typed/authorization/v1"
"istio.io/istio/pilot/pkg/credentials"
securitymodel "istio.io/istio/pilot/pkg/security/model"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/log"
)
const (
// The ID/name for the certificate chain in kubernetes generic secret.
GenericScrtCert = "cert"
// The ID/name for the private key in kubernetes generic secret.
GenericScrtKey = "key"
// The ID/name for the CA certificate in kubernetes generic secret.
GenericScrtCaCert = "cacert"
// The ID/name for the CRL in kubernetes generic secret.
GenericScrtCRL = "crl"
// The ID/name for the certificate chain in kubernetes tls secret.
TLSSecretCert = "tls.crt"
// The ID/name for the k8sKey in kubernetes tls secret.
TLSSecretKey = "tls.key"
// The ID/name for the certificate OCSP staple in kubernetes tls secret
TLSSecretOcspStaple = "tls.ocsp-staple"
// The ID/name for the CA certificate in kubernetes tls secret
TLSSecretCaCert = "ca.crt"
// The ID/name for the CRL in kubernetes tls secret.
TLSSecretCrl = "ca.crl"
)
type CredentialsController struct {
secrets kclient.Client[*v1.Secret]
sar authorizationv1client.SubjectAccessReviewInterface
mu sync.RWMutex
authorizationCache map[authorizationKey]authorizationResponse
}
type authorizationKey string
type authorizationResponse struct {
expiration time.Time
authorized error
}
var _ credentials.Controller = &CredentialsController{}
func NewCredentialsController(kc kube.Client) *CredentialsController {
// We only care about TLS certificates and docker config for Wasm image pulling.
// Unfortunately, it is not as simple as selecting type=kubernetes.io/tls and type=kubernetes.io/dockerconfigjson.
// Because of legacy reasons and supporting an extra ca.crt, we also support generic types.
// Its also likely users have started to use random types and expect them to continue working.
// This makes the assumption we will never care about Helm secrets or SA token secrets - two common
// large secrets in clusters.
// This is a best effort optimization only; the code would behave correctly if we watched all secrets.
fieldSelector := fields.AndSelectors(
fields.OneTermNotEqualSelector("type", "helm.sh/release.v1"),
fields.OneTermNotEqualSelector("type", string(v1.SecretTypeServiceAccountToken))).String()
secrets := kclient.NewFiltered[*v1.Secret](kc, kclient.Filter{
FieldSelector: fieldSelector,
})
return &CredentialsController{
secrets: secrets,
sar: kc.Kube().AuthorizationV1().SubjectAccessReviews(),
authorizationCache: make(map[authorizationKey]authorizationResponse),
}
}
const cacheTTL = time.Minute
// clearExpiredCache iterates through the cache and removes all expired entries. Should be called with mutex held.
func (s *CredentialsController) clearExpiredCache() {
for k, v := range s.authorizationCache {
if v.expiration.Before(time.Now()) {
delete(s.authorizationCache, k)
}
}
}
// cachedAuthorization checks the authorization cache
// nolint
func (s *CredentialsController) cachedAuthorization(user string) (error, bool) {
key := authorizationKey(user)
s.mu.Lock()
defer s.mu.Unlock()
s.clearExpiredCache()
// No need to check expiration, we will evict expired entries above
got, f := s.authorizationCache[key]
if !f {
return nil, false
}
return got.authorized, true
}
// cachedAuthorization checks the authorization cache
func (s *CredentialsController) insertCache(user string, response error) {
s.mu.Lock()
defer s.mu.Unlock()
key := authorizationKey(user)
expDelta := cacheTTL
if response == nil {
// Cache success a bit longer, there is no need to quickly revoke access
expDelta *= 5
}
log.Debugf("cached authorization for user %s: %v", user, response)
s.authorizationCache[key] = authorizationResponse{
expiration: time.Now().Add(expDelta),
authorized: response,
}
}
func (s *CredentialsController) Authorize(serviceAccount, namespace string) error {
user := sa.MakeUsername(namespace, serviceAccount)
if cached, f := s.cachedAuthorization(user); f {
return cached
}
err := func() error {
resp, err := s.sar.Create(context.Background(), &authorizationv1.SubjectAccessReview{
ObjectMeta: metav1.ObjectMeta{},
Spec: authorizationv1.SubjectAccessReviewSpec{
ResourceAttributes: &authorizationv1.ResourceAttributes{
Namespace: namespace,
Verb: "list",
Resource: "secrets",
},
User: user,
},
}, metav1.CreateOptions{})
if err != nil {
return err
}
if !resp.Status.Allowed {
return fmt.Errorf("%s/%s is not authorized to read secrets: %v", serviceAccount, namespace, resp.Status.Reason)
}
return nil
}()
s.insertCache(user, err)
return err
}
func (s *CredentialsController) GetCertInfo(name, namespace string) (certInfo *credentials.CertInfo, err error) {
k8sSecret := s.secrets.Get(name, namespace)
if k8sSecret == nil {
return nil, fmt.Errorf("secret %v/%v not found", namespace, name)
}
return ExtractCertInfo(k8sSecret)
}
func (s *CredentialsController) GetCaCert(name, namespace string) (certInfo *credentials.CertInfo, err error) {
k8sSecret := s.secrets.Get(name, namespace)
if k8sSecret == nil {
strippedName := strings.TrimSuffix(name, securitymodel.SdsCaSuffix)
// Could not fetch cert, look for secret without -cacert suffix
k8sSecret := s.secrets.Get(strippedName, namespace)
if k8sSecret == nil {
return nil, fmt.Errorf("secret %v/%v not found", namespace, strippedName)
}
return extractRoot(k8sSecret)
}
return extractRoot(k8sSecret)
}
func (s *CredentialsController) GetDockerCredential(name, namespace string) ([]byte, error) {
k8sSecret := s.secrets.Get(name, namespace)
if k8sSecret == nil {
return nil, fmt.Errorf("secret %v/%v not found", namespace, name)
}
if k8sSecret.Type != v1.SecretTypeDockerConfigJson {
return nil, fmt.Errorf("type of secret %v/%v is not %v", namespace, name, v1.SecretTypeDockerConfigJson)
}
if cred, found := k8sSecret.Data[v1.DockerConfigJsonKey]; found {
return cred, nil
}
return nil, fmt.Errorf("cannot find docker config at secret %v/%v", namespace, name)
}
func hasKeys(d map[string][]byte, keys ...string) bool {
for _, k := range keys {
_, f := d[k]
if !f {
return false
}
}
return true
}
func hasValue(d map[string][]byte, keys ...string) bool {
for _, k := range keys {
v := d[k]
if len(v) == 0 {
return false
}
}
return true
}
// ExtractCertInfo extracts server key, certificate, and OCSP staple
func ExtractCertInfo(scrt *v1.Secret) (certInfo *credentials.CertInfo, err error) {
ret := &credentials.CertInfo{}
if hasValue(scrt.Data, GenericScrtCert, GenericScrtKey) {
ret.Cert = scrt.Data[GenericScrtCert]
ret.Key = scrt.Data[GenericScrtKey]
ret.CRL = scrt.Data[GenericScrtCRL]
return ret, nil
}
if hasValue(scrt.Data, TLSSecretCert, TLSSecretKey) {
ret.Cert = scrt.Data[TLSSecretCert]
ret.Key = scrt.Data[TLSSecretKey]
ret.Staple = scrt.Data[TLSSecretOcspStaple]
ret.CRL = scrt.Data[TLSSecretCrl]
return ret, nil
}
// No cert found. Try to generate a helpful error message
if hasKeys(scrt.Data, GenericScrtCert, GenericScrtKey) {
return nil, fmt.Errorf("found keys %q and %q, but they were empty", GenericScrtCert, GenericScrtKey)
}
if hasKeys(scrt.Data, TLSSecretCert, TLSSecretKey) {
return nil, fmt.Errorf("found keys %q and %q, but they were empty", TLSSecretCert, TLSSecretKey)
}
found := truncatedKeysMessage(scrt.Data)
return nil, fmt.Errorf("found secret, but didn't have expected keys (%s and %s) or (%s and %s); found: %s",
GenericScrtCert, GenericScrtKey, TLSSecretCert, TLSSecretKey, found)
}
func truncatedKeysMessage(data map[string][]byte) string {
keys := []string{}
for k := range data {
keys = append(keys, k)
}
sort.Strings(keys)
if len(keys) < 3 {
return strings.Join(keys, ", ")
}
return fmt.Sprintf("%s, and %d more...", strings.Join(keys[:3], ", "), len(keys)-3)
}
// extractRoot extracts the root certificate
func extractRoot(scrt *v1.Secret) (certInfo *credentials.CertInfo, err error) {
ret := &credentials.CertInfo{}
if hasValue(scrt.Data, GenericScrtCaCert) {
ret.Cert = scrt.Data[GenericScrtCaCert]
ret.CRL = scrt.Data[GenericScrtCRL]
return ret, nil
}
if hasValue(scrt.Data, TLSSecretCaCert) {
ret.Cert = scrt.Data[TLSSecretCaCert]
ret.CRL = scrt.Data[TLSSecretCrl]
return ret, nil
}
// No cert found. Try to generate a helpful error message
if hasKeys(scrt.Data, GenericScrtCaCert) {
return nil, fmt.Errorf("found key %q, but it was empty", GenericScrtCaCert)
}
if hasKeys(scrt.Data, TLSSecretCaCert) {
return nil, fmt.Errorf("found key %q, but it was empty", TLSSecretCaCert)
}
found := truncatedKeysMessage(scrt.Data)
return nil, fmt.Errorf("found secret, but didn't have expected keys %s or %s; found: %s",
GenericScrtCaCert, TLSSecretCaCert, found)
}
func (s *CredentialsController) AddEventHandler(h func(name string, namespace string)) {
// register handler before informer starts
s.secrets.AddEventHandler(controllers.ObjectHandler(func(o controllers.Object) {
h(o.GetName(), o.GetNamespace())
}))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package features
import (
"time"
"go.uber.org/atomic"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/log"
)
// Define experimental features here.
var (
// FilterGatewayClusterConfig controls if a subset of clusters(only those required) should be pushed to gateways
FilterGatewayClusterConfig = env.Register("PILOT_FILTER_GATEWAY_CLUSTER_CONFIG", false,
"If enabled, Pilot will send only clusters that referenced in gateway virtual services attached to gateway").Get()
SendUnhealthyEndpoints = atomic.NewBool(env.Register(
"PILOT_SEND_UNHEALTHY_ENDPOINTS",
false,
"If enabled, Pilot will include unhealthy endpoints in EDS pushes and even if they are sent Envoy does not use them for load balancing."+
" To avoid, sending traffic to non ready endpoints, enabling this flag, disables panic threshold in Envoy i.e. Envoy does not load balance requests"+
" to unhealthy/non-ready hosts even if the percentage of healthy hosts fall below minimum health percentage(panic threshold).",
).Get())
EnablePersistentSessionFilter = env.Register(
"PILOT_ENABLE_PERSISTENT_SESSION_FILTER",
false,
"If enabled, Istiod sets up persistent session filter for listeners, if services have 'PILOT_PERSISTENT_SESSION_LABEL' set.",
).Get()
PersistentSessionLabel = env.Register(
"PILOT_PERSISTENT_SESSION_LABEL",
"istio.io/persistent-session",
"If not empty, services with this label will use cookie based persistent sessions",
).Get()
PersistentSessionHeaderLabel = env.Register(
"PILOT_PERSISTENT_SESSION_HEADER_LABEL",
"istio.io/persistent-session-header",
"If not empty, services with this label will use header based persistent sessions",
).Get()
DrainingLabel = env.Register(
"PILOT_DRAINING_LABEL",
"istio.io/draining",
"If not empty, endpoints with the label value present will be sent with status DRAINING.",
).Get()
EnableDistributionTracking = env.Register(
"PILOT_ENABLE_CONFIG_DISTRIBUTION_TRACKING",
false,
"If enabled, Pilot will assign meaningful nonces to each Envoy configuration message, and allow "+
"users to interrogate which envoy has which config from the debug interface.",
).Get()
DistributionHistoryRetention = env.Register(
"PILOT_DISTRIBUTION_HISTORY_RETENTION",
time.Minute*1,
"If enabled, Pilot will keep track of old versions of distributed config for this duration.",
).Get()
MCSAPIGroup = env.Register("MCS_API_GROUP", "multicluster.x-k8s.io",
"The group to be used for the Kubernetes Multi-Cluster Services (MCS) API.").Get()
MCSAPIVersion = env.Register("MCS_API_VERSION", "v1alpha1",
"The version to be used for the Kubernetes Multi-Cluster Services (MCS) API.").Get()
EnableMCSAutoExport = env.Register(
"ENABLE_MCS_AUTO_EXPORT",
false,
"If enabled, istiod will automatically generate Kubernetes "+
"Multi-Cluster Services (MCS) ServiceExport resources for every "+
"service in the mesh. Services defined to be cluster-local in "+
"MeshConfig are excluded.",
).Get()
EnableMCSServiceDiscovery = env.Register(
"ENABLE_MCS_SERVICE_DISCOVERY",
false,
"If enabled, istiod will enable Kubernetes Multi-Cluster "+
"Services (MCS) service discovery mode. In this mode, service "+
"endpoints in a cluster will only be discoverable within the "+
"same cluster unless explicitly exported via ServiceExport.").Get()
EnableMCSHost = env.Register(
"ENABLE_MCS_HOST",
false,
"If enabled, istiod will configure a Kubernetes Multi-Cluster "+
"Services (MCS) host (<svc>.<namespace>.svc.clusterset.local) "+
"for each service exported (via ServiceExport) in at least one "+
"cluster. Clients must, however, be able to successfully lookup "+
"these DNS hosts. That means that either Istio DNS interception "+
"must be enabled or an MCS controller must be used. Requires "+
"that ENABLE_MCS_SERVICE_DISCOVERY also be enabled.").Get() &&
EnableMCSServiceDiscovery
EnableMCSClusterLocal = env.Register(
"ENABLE_MCS_CLUSTER_LOCAL",
false,
"If enabled, istiod will treat the host "+
"`<svc>.<namespace>.svc.cluster.local` as defined by the "+
"Kubernetes Multi-Cluster Services (MCS) spec. In this mode, "+
"requests to `cluster.local` will be routed to only those "+
"endpoints residing within the same cluster as the client. "+
"Requires that both ENABLE_MCS_SERVICE_DISCOVERY and "+
"ENABLE_MCS_HOST also be enabled.").Get() &&
EnableMCSHost
EnableAnalysis = env.Register(
"PILOT_ENABLE_ANALYSIS",
false,
"If enabled, pilot will run istio analyzers and write analysis errors to the Status field of any "+
"Istio Resources",
).Get()
AnalysisInterval = func() time.Duration {
val, _ := env.Register(
"PILOT_ANALYSIS_INTERVAL",
10*time.Second,
"If analysis is enabled, pilot will run istio analyzers using this value as interval in seconds "+
"Istio Resources",
).Lookup()
if val < 1*time.Second {
log.Warnf("PILOT_ANALYSIS_INTERVAL %s is too small, it will be set to default 10 seconds", val.String())
return 10 * time.Second
}
return val
}()
EnableStatus = env.Register(
"PILOT_ENABLE_STATUS",
false,
"If enabled, pilot will update the CRD Status field of all istio resources with reconciliation status.",
).Get()
EnableGatewayAPI = env.Register("PILOT_ENABLE_GATEWAY_API", true,
"If this is set to true, support for Kubernetes gateway-api (github.com/kubernetes-sigs/gateway-api) will "+
" be enabled. In addition to this being enabled, the gateway-api CRDs need to be installed.").Get()
EnableAlphaGatewayAPI = env.Register("PILOT_ENABLE_ALPHA_GATEWAY_API", false,
"If this is set to true, support for alpha APIs in the Kubernetes gateway-api (github.com/kubernetes-sigs/gateway-api) will "+
" be enabled. In addition to this being enabled, the gateway-api CRDs need to be installed.").Get()
EnableGatewayAPIStatus = env.Register("PILOT_ENABLE_GATEWAY_API_STATUS", true,
"If this is set to true, gateway-api resources will have status written to them").Get()
EnableGatewayAPIDeploymentController = env.Register("PILOT_ENABLE_GATEWAY_API_DEPLOYMENT_CONTROLLER", true,
"If this is set to true, gateway-api resources will automatically provision in cluster deployment, services, etc").Get()
EnableGatewayAPIGatewayClassController = env.Register("PILOT_ENABLE_GATEWAY_API_GATEWAYCLASS_CONTROLLER", true,
"If this is set to true, istiod will create and manage its default GatewayClasses").Get()
// EnableHBONE provides a global Pilot flag for enabling HBONE.
// Generally, this could be a per-proxy setting (and is, via ENABLE_HBONE node metadata).
// However, there are some code paths that impact all clients, hence the global flag.
// Warning: do not enable by default until endpoint_builder.go caching is fixed (and possibly other locations).
EnableHBONE = env.Register(
"PILOT_ENABLE_HBONE",
false,
"If enabled, HBONE support can be configured for proxies. "+
"Note: proxies must opt in on a per-proxy basis with ENABLE_HBONE to actually get HBONE config, in addition to this flag.").Get()
EnableAmbientControllers = env.Register(
"PILOT_ENABLE_AMBIENT_CONTROLLERS",
false,
"If enabled, controllers required for ambient will run. This is required to run ambient mesh.").Get()
DeltaXds = env.Register("ISTIO_DELTA_XDS", false,
"If enabled, pilot will only send the delta configs as opposed to the state of the world on a "+
"Resource Request. This feature uses the delta xds api, but does not currently send the actual deltas.").Get()
EnableQUICListeners = env.Register("PILOT_ENABLE_QUIC_LISTENERS", false,
"If true, QUIC listeners will be generated wherever there are listeners terminating TLS on gateways "+
"if the gateway service exposes a UDP port with the same number (for example 443/TCP and 443/UDP)").Get()
EnableTLSOnSidecarIngress = env.Register("ENABLE_TLS_ON_SIDECAR_INGRESS", false,
"If enabled, the TLS configuration on Sidecar.ingress will take effect").Get()
VerifySDSCertificate = env.Register("VERIFY_SDS_CERTIFICATE", true,
"If enabled, certificates fetched from SDS server will be verified before sending back to proxy.").Get()
EnableHCMInternalNetworks = env.Register("ENABLE_HCM_INTERNAL_NETWORKS", false,
"If enable, endpoints defined in mesh networks will be configured as internal addresses in Http Connection Manager").Get()
EnableEnhancedResourceScoping = env.Register("ENABLE_ENHANCED_RESOURCE_SCOPING", false,
"If enabled, meshConfig.discoverySelectors will limit the CustomResource configurations(like Gateway,VirtualService,DestinationRule,Ingress, etc)"+
"that can be processed by pilot. This will also restrict the root-ca certificate distribution.").Get()
EnableLeaderElection = env.Register("ENABLE_LEADER_ELECTION", true,
"If enabled (default), starts a leader election client and gains leadership before executing controllers. "+
"If false, it assumes that only one instance of istiod is running and skips leader election.").Get()
EnableSidecarServiceInboundListenerMerge = env.Register(
"PILOT_ALLOW_SIDECAR_SERVICE_INBOUND_LISTENER_MERGE",
false,
"If set, it allows creating inbound listeners for service ports and sidecar ingress listeners ",
).Get()
EnableDualStack = env.RegisterBoolVar("ISTIO_DUAL_STACK", false,
"If true, Istio will enable the Dual Stack feature.").Get()
EnableOptimizedServicePush = env.RegisterBoolVar("ISTIO_ENABLE_OPTIMIZED_SERVICE_PUSH", true,
"If enabled, Istiod will not push changes on arbitrary annotation change.").Get()
// This is used in injection templates, it is not unused.
EnableNativeSidecars = env.Register("ENABLE_NATIVE_SIDECARS", false,
"If set, used Kubernetes native Sidecar container support. Requires SidecarContainer feature flag.")
OptimizedConfigRebuild = env.Register("ENABLE_OPTIMIZED_CONFIG_REBUILD", true,
"If enabled, pilot will only rebuild config for resources that have changed").Get()
PersistOldestWinsHeuristicForVirtualServiceHostMatching = env.Register("PERSIST_OLDEST_FIRST_HEURISTIC_FOR_VIRTUAL_SERVICE_HOST_MATCHING", false,
"If enabled, istiod will persist the oldest first heuristic for subtly conflicting traffic policy selection"+
"(such as with overlapping wildcard hosts)").Get()
)
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package features
import (
"strings"
"time"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/jwt"
"istio.io/istio/pkg/util/sets"
)
var (
// HTTP10 will add "accept_http_10" to http outbound listeners. Can also be set only for specific sidecars via meta.
HTTP10 = env.Register(
"PILOT_HTTP10",
false,
"Enables the use of HTTP 1.0 in the outbound HTTP listeners, to support legacy applications.",
).Get()
ScopeGatewayToNamespace = env.Register(
"PILOT_SCOPE_GATEWAY_TO_NAMESPACE",
false,
"If enabled, a gateway workload can only select gateway resources in the same namespace. "+
"Gateways with same selectors in different namespaces will not be applicable.",
).Get()
JwksFetchMode = func() jwt.JwksFetchMode {
v := env.Register(
"PILOT_JWT_ENABLE_REMOTE_JWKS",
"false",
"Mode of fetching JWKs from JwksUri in RequestAuthentication. Supported value: "+
"istiod, false, hybrid, true, envoy. The client fetching JWKs is as following: "+
"istiod/false - Istiod; hybrid/true - Envoy and fallback to Istiod if JWKs server is external; "+
"envoy - Envoy.",
).Get()
return jwt.ConvertToJwksFetchMode(v)
}()
// IstiodServiceCustomHost allow user to bring a custom address or multiple custom addresses for istiod server
// for examples: 1. istiod.mycompany.com 2. istiod.mycompany.com,istiod-canary.mycompany.com
IstiodServiceCustomHost = env.Register("ISTIOD_CUSTOM_HOST", "",
"Custom host name of istiod that istiod signs the server cert. "+
"Multiple custom host names are supported, and multiple values are separated by commas.").Get()
PilotCertProvider = env.Register("PILOT_CERT_PROVIDER", constants.CertProviderIstiod,
"The provider of Pilot DNS certificate.").Get()
JwtPolicy = env.Register("JWT_POLICY", jwt.PolicyThirdParty,
"The JWT validation policy.").Get()
ClusterName = env.Register("CLUSTER_ID", "Kubernetes",
"Defines the cluster and service registry that this Istiod instance belongs to").Get()
ExternalIstiod = env.Register("EXTERNAL_ISTIOD", false,
"If this is set to true, one Istiod will control remote clusters including CA.").Get()
EnableCAServer = env.Register("ENABLE_CA_SERVER", true,
"If this is set to false, will not create CA server in istiod.").Get()
EnableDebugOnHTTP = env.Register("ENABLE_DEBUG_ON_HTTP", true,
"If this is set to false, the debug interface will not be enabled, recommended for production").Get()
EnableUnsafeAdminEndpoints = env.Register("UNSAFE_ENABLE_ADMIN_ENDPOINTS", false,
"If this is set to true, dangerous admin endpoints will be exposed on the debug interface. Not recommended for production.").Get()
EnableServiceEntrySelectPods = env.Register("PILOT_ENABLE_SERVICEENTRY_SELECT_PODS", true,
"If enabled, service entries with selectors will select pods from the cluster. "+
"It is safe to disable it if you are quite sure you don't need this feature").Get()
EnableK8SServiceSelectWorkloadEntries = env.RegisterBoolVar("PILOT_ENABLE_K8S_SELECT_WORKLOAD_ENTRIES", true,
"If enabled, Kubernetes services with selectors will select workload entries with matching labels. "+
"It is safe to disable it if you are quite sure you don't need this feature").Get()
InjectionWebhookConfigName = env.Register("INJECTION_WEBHOOK_CONFIG_NAME", "istio-sidecar-injector",
"Name of the mutatingwebhookconfiguration to patch, if istioctl is not used.").Get()
ValidationWebhookConfigName = env.Register("VALIDATION_WEBHOOK_CONFIG_NAME", "istio-istio-system",
"If not empty, the controller will automatically patch validatingwebhookconfiguration when the CA certificate changes. "+
"Only works in kubernetes environment.").Get()
RemoteClusterTimeout = env.Register(
"PILOT_REMOTE_CLUSTER_TIMEOUT",
30*time.Second,
"After this timeout expires, pilot can become ready without syncing data from clusters added via remote-secrets. "+
"Setting the timeout to 0 disables this behavior.",
).Get()
DisableMxALPN = env.Register("PILOT_DISABLE_MX_ALPN", false,
"If true, pilot will not put istio-peer-exchange ALPN into TLS handshake configuration.",
).Get()
ALPNFilter = env.Register("PILOT_ENABLE_ALPN_FILTER", true,
"If true, pilot will add Istio ALPN filters, required for proper protocol sniffing.",
).Get()
WorkloadEntryAutoRegistration = env.Register("PILOT_ENABLE_WORKLOAD_ENTRY_AUTOREGISTRATION", true,
"Enables auto-registering WorkloadEntries based on associated WorkloadGroups upon XDS connection by the workload.").Get()
WorkloadEntryCleanupGracePeriod = env.Register("PILOT_WORKLOAD_ENTRY_GRACE_PERIOD", 10*time.Second,
"The amount of time an auto-registered workload can remain disconnected from all Pilot instances before the "+
"associated WorkloadEntry is cleaned up.").Get()
WorkloadEntryHealthChecks = env.Register("PILOT_ENABLE_WORKLOAD_ENTRY_HEALTHCHECKS", true,
"Enables automatic health checks of WorkloadEntries based on the config provided in the associated WorkloadGroup").Get()
WorkloadEntryCrossCluster = env.Register("PILOT_ENABLE_CROSS_CLUSTER_WORKLOAD_ENTRY", true,
"If enabled, pilot will read WorkloadEntry from other clusters, selectable by Services in that cluster.").Get()
WasmRemoteLoadConversion = env.Register("ISTIO_AGENT_ENABLE_WASM_REMOTE_LOAD_CONVERSION", true,
"If enabled, Istio agent will intercept ECDS resource update, downloads Wasm module, "+
"and replaces Wasm module remote load with downloaded local module file.").Get()
PilotJwtPubKeyRefreshInterval = env.Register(
"PILOT_JWT_PUB_KEY_REFRESH_INTERVAL",
20*time.Minute,
"The interval for istiod to fetch the jwks_uri for the jwks public key.",
).Get()
// EnableUnsafeAssertions enables runtime checks to test assertions in our code. This should never be enabled in
// production; when assertions fail Istio will panic.
EnableUnsafeAssertions = env.Register(
"UNSAFE_PILOT_ENABLE_RUNTIME_ASSERTIONS",
false,
"If enabled, addition runtime asserts will be performed. "+
"These checks are both expensive and panic on failure. As a result, this should be used only for testing.",
).Get()
// EnableUnsafeDeltaTest enables runtime checks to test Delta XDS efficiency. This should never be enabled in
// production.
EnableUnsafeDeltaTest = env.Register(
"UNSAFE_PILOT_ENABLE_DELTA_TEST",
false,
"If enabled, addition runtime tests for Delta XDS efficiency are added. "+
"These checks are extremely expensive, so this should be used only for testing, not production.",
).Get()
SharedMeshConfig = env.Register("SHARED_MESH_CONFIG", "",
"Additional config map to load for shared MeshConfig settings. The standard mesh config will take precedence.").Get()
MultiRootMesh = env.Register("ISTIO_MULTIROOT_MESH", false,
"If enabled, mesh will support certificates signed by more than one trustAnchor for ISTIO_MUTUAL mTLS").Get()
EnableEnvoyFilterMetrics = env.Register("PILOT_ENVOY_FILTER_STATS", false,
"If true, Pilot will collect metrics for envoy filter operations.").Get()
EnableRouteCollapse = env.Register("PILOT_ENABLE_ROUTE_COLLAPSE_OPTIMIZATION", true,
"If true, Pilot will merge virtual hosts with the same routes into a single virtual host, as an optimization.").Get()
MulticlusterHeadlessEnabled = env.Register("ENABLE_MULTICLUSTER_HEADLESS", true,
"If true, the DNS name table for a headless service will resolve to same-network endpoints in any cluster.").Get()
ResolveHostnameGateways = env.Register("RESOLVE_HOSTNAME_GATEWAYS", true,
"If true, hostnames in the LoadBalancer addresses of a Service will be resolved at the control plane for use in cross-network gateways.").Get()
MultiNetworkGatewayAPI = env.Register("PILOT_MULTI_NETWORK_DISCOVER_GATEWAY_API", false,
"If true, Pilot will discover labeled Kubernetes gateway objects as multi-network gateways.").Get()
InsecureKubeConfigOptions = func() sets.String {
v := env.Register(
"PILOT_INSECURE_MULTICLUSTER_KUBECONFIG_OPTIONS",
"",
"Comma separated list of potentially insecure kubeconfig authentication options that are allowed for multicluster authentication."+
"Support values: all authProviders (`gcp`, `azure`, `exec`, `openstack`), "+
"`clientKey`, `clientCertificate`, `tokenFile`, and `exec`.").Get()
return sets.New(strings.Split(v, ",")...)
}()
CanonicalServiceForMeshExternalServiceEntry = env.Register("LABEL_CANONICAL_SERVICES_FOR_MESH_EXTERNAL_SERVICE_ENTRIES", false,
"If enabled, metadata representing canonical services for ServiceEntry resources with a location of mesh_external will be populated"+
"in the cluster metadata for those endpoints.").Get()
LocalClusterSecretWatcher = env.Register("LOCAL_CLUSTER_SECRET_WATCHER", false,
"If enabled, the cluster secret watcher will watch the namespace of the external cluster instead of config cluster").Get()
InformerWatchNamespace = env.Register("ISTIO_WATCH_NAMESPACE", "",
"If set, limit Kubernetes watches to a single namespace. "+
"Warning: only a single namespace can be set.").Get()
// This is a feature flag, can be removed if protobuf proves universally better.
KubernetesClientContentType = env.Register("ISTIO_KUBE_CLIENT_CONTENT_TYPE", "protobuf",
"The content type to use for Kubernetes clients. Defaults to protobuf. Valid options: [protobuf, json]").Get()
EnableExternalNameAlias = env.Register("ENABLE_EXTERNAL_NAME_ALIAS", true,
"If enabled, ExternalName Services will be treated as simple aliases: anywhere where we would match the concrete service, "+
"we also match the ExternalName. In general, this mirrors Kubernetes behavior more closely. However, it means that policies (routes and DestinationRule) "+
"cannot be applied to the ExternalName service. "+
"If disabled, ExternalName behaves in fairly unexpected manner. Port matters, while it does not in Kubernetes. If it is a TCP port, "+
"all traffic on that port will be matched, which can have disastrous consequences. Additionally, the destination is seen as an opaque destination; "+
"even if it is another service in the mesh, policies such as mTLS and load balancing will not be used when connecting to it.").Get()
// This is an experimental feature flag, can be removed once it became stable, and should introduced to Telemetry API.
MetricRotationInterval = env.Register("METRIC_ROTATION_INTERVAL", 0*time.Second,
"Metric scope rotation interval, set to 0 to disable the metric scope rotation").Get()
MetricGracefulDeletionInterval = env.Register("METRIC_GRACEFUL_DELETION_INTERVAL", 5*time.Minute,
"Metric expiry graceful deletion interval. No-op if METRIC_ROTATION_INTERVAL is disabled.").Get()
EnableControllerQueueMetrics = env.Register("ISTIO_ENABLE_CONTROLLER_QUEUE_METRICS", false,
"If enabled, publishes metrics for queue depth, latency and processing times.").Get()
ValidateWorkloadEntryIdentity = env.Register("ISTIO_WORKLOAD_ENTRY_VALIDATE_IDENTITY", true,
"If enabled, will validate the identity of a workload matches the identity of the "+
"WorkloadEntry it is associating with for health checks and auto registration. "+
"This flag is added for backwards compatibility only and will be removed in future releases").Get()
JwksResolverInsecureSkipVerify = env.Register("JWKS_RESOLVER_INSECURE_SKIP_VERIFY", false,
"If enabled, istiod will skip verifying the certificate of the JWKS server.").Get()
// User should not rely on builtin resource labels, this flag will be removed in future releases(1.20).
EnableOTELBuiltinResourceLabels = env.Register("ENABLE_OTEL_BUILTIN_RESOURCE_LABELS", false,
"If enabled, envoy will send builtin labels(e.g. node_name) via OTel sink.").Get()
EnableSelectorBasedK8sGatewayPolicy = env.Register("ENABLE_SELECTOR_BASED_K8S_GATEWAY_POLICY", true,
"If disabled, Gateway API gateways will ignore workloadSelector policies, only"+
"applying policies that select the gateway with a targetRef.").Get()
// Useful for IPv6-only EKS clusters. See https://aws.github.io/aws-eks-best-practices/networking/ipv6/ why it assigns an additional IPv4 NAT address.
// Also see https://github.com/istio/istio/issues/46719 why this flag is required
EnableAdditionalIpv4OutboundListenerForIpv6Only = env.RegisterBoolVar("ISTIO_ENABLE_IPV4_OUTBOUND_LISTENER_FOR_IPV6_CLUSTERS", false,
"If true, pilot will configure an additional IPv4 listener for outbound traffic in IPv6 only clusters, e.g. AWS EKS IPv6 only clusters.").Get()
EnableAutoSni = env.Register("ENABLE_AUTO_SNI", true,
"If enabled, automatically set SNI when `DestinationRules` do not specify the same").Get()
VerifyCertAtClient = env.Register("VERIFY_CERTIFICATE_AT_CLIENT", true,
"If enabled, certificates received by the proxy will be verified against the OS CA certificate bundle.").Get()
EnableVtprotobuf = env.Register("ENABLE_VTPROTOBUF", false,
"If true, will use optimized vtprotobuf based marshaling").Get()
)
// UnsafeFeaturesEnabled returns true if any unsafe features are enabled.
func UnsafeFeaturesEnabled() bool {
return EnableUnsafeAdminEndpoints || EnableUnsafeAssertions
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package features
import (
"strings"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
// Define security related features here.
var (
// SkipValidateTrustDomain tells the server proxy to not to check the peer's trust domain when
// mTLS is enabled in authentication policy.
SkipValidateTrustDomain = env.Register(
"PILOT_SKIP_VALIDATE_TRUST_DOMAIN",
false,
"Skip validating the peer is from the same trust domain when mTLS is enabled in authentication policy").Get()
XDSAuth = env.Register("XDS_AUTH", true,
"If true, will authenticate XDS clients.").Get()
EnableXDSIdentityCheck = env.Register(
"PILOT_ENABLE_XDS_IDENTITY_CHECK",
true,
"If enabled, pilot will authorize XDS clients, to ensure they are acting only as namespaces they have permissions for.",
).Get()
// TODO: Move this to proper API.
trustedGatewayCIDR = env.Register(
"TRUSTED_GATEWAY_CIDR",
"",
"If set, any connections from gateway to Istiod with this CIDR range are treated as trusted for using authentication mechanisms like XFCC."+
" This can only be used when the network where Istiod and the authenticating gateways are running in a trusted/secure network",
)
TrustedGatewayCIDR = func() []string {
cidr := trustedGatewayCIDR.Get()
// splitting the empty string will result [""]
if cidr == "" {
return []string{}
}
return strings.Split(cidr, ",")
}()
CATrustedNodeAccounts = func() sets.Set[types.NamespacedName] {
accounts := env.Register(
"CA_TRUSTED_NODE_ACCOUNTS",
"",
"If set, the list of service accounts that are allowed to use node authentication for CSRs. "+
"Node authentication allows an identity to create CSRs on behalf of other identities, but only if there is a pod "+
"running on the same node with that identity. "+
"This is intended for use with node proxies.",
).Get()
res := sets.New[types.NamespacedName]()
if accounts == "" {
return res
}
for _, v := range strings.Split(accounts, ",") {
ns, sa, valid := strings.Cut(v, "/")
if !valid {
log.Warnf("Invalid CA_TRUSTED_NODE_ACCOUNTS, ignoring: %v", v)
continue
}
res.Insert(types.NamespacedName{
Namespace: ns,
Name: sa,
})
}
return res
}()
CertSignerDomain = env.Register("CERT_SIGNER_DOMAIN", "", "The cert signer domain info").Get()
UseCacertsForSelfSignedCA = env.Register("USE_CACERTS_FOR_SELF_SIGNED_CA", false,
"If enabled, istiod will use a secret named cacerts to store its self-signed istio-"+
"generated root certificate.").Get()
)
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package features
import (
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/log"
)
// Define telemetry related features here.
var (
traceSamplingVar = env.Register(
"PILOT_TRACE_SAMPLING",
1.0,
"Sets the mesh-wide trace sampling percentage. Should be 0.0 - 100.0. Precision to 0.01. "+
"Default is 1.0.",
)
TraceSampling = func() float64 {
f := traceSamplingVar.Get()
if f < 0.0 || f > 100.0 {
log.Warnf("PILOT_TRACE_SAMPLING out of range: %v", f)
return 1.0
}
return f
}()
EnableTelemetryLabel = env.Register("PILOT_ENABLE_TELEMETRY_LABEL", true,
"If true, pilot will add telemetry related metadata to cluster and endpoint resources, which will be consumed by telemetry filter.",
).Get()
EndpointTelemetryLabel = env.Register("PILOT_ENDPOINT_TELEMETRY_LABEL", true,
"If true, pilot will add telemetry related metadata to Endpoint resource, which will be consumed by telemetry filter.",
).Get()
MetadataExchange = env.Register("PILOT_ENABLE_METADATA_EXCHANGE", true,
"If true, pilot will add metadata exchange filters, which will be consumed by telemetry filter.",
).Get()
StackdriverAuditLog = env.Register("STACKDRIVER_AUDIT_LOG", false, ""+
"If enabled, StackDriver audit logging will be enabled.").Get()
)
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package features
import (
"runtime"
"time"
"istio.io/istio/pkg/env"
)
// Define performance tuning related features here.
var (
MaxConcurrentStreams = env.Register(
"ISTIO_GPRC_MAXSTREAMS",
100000,
"Sets the maximum number of concurrent grpc streams.",
).Get()
// MaxRecvMsgSize The max receive buffer size of gRPC received channel of Pilot in bytes.
MaxRecvMsgSize = env.Register(
"ISTIO_GPRC_MAXRECVMSGSIZE",
4*1024*1024,
"Sets the max receive buffer size of gRPC stream in bytes.",
).Get()
PushThrottle = func() int {
v := env.Register(
"PILOT_PUSH_THROTTLE",
0,
"Limits the number of concurrent pushes allowed. On larger machines this can be increased for faster pushes. "+
"If set to 0 or unset, the max will be automatically determined based on the machine size",
).Get()
if v > 0 {
return v
}
procs := runtime.GOMAXPROCS(0)
// Heuristic to scale with cores. We end up with...
// 1: 20
// 2: 25
// 4: 35
// 32: 100
return min(15+5*procs, 100)
}()
RequestLimit = func() float64 {
v := env.Register(
"PILOT_MAX_REQUESTS_PER_SECOND",
0.0,
"Limits the number of incoming XDS requests per second. On larger machines this can be increased to handle more proxies concurrently. "+
"If set to 0 or unset, the max will be automatically determined based on the machine size",
).Get()
if v > 0 {
return v
}
procs := runtime.GOMAXPROCS(0)
// Heuristic to scale with cores. We end up with...
// 1: 20
// 2: 25
// 4: 35
// 32: 100
return min(float64(15+5*procs), 100.0)
}()
DebounceAfter = env.Register(
"PILOT_DEBOUNCE_AFTER",
100*time.Millisecond,
"The delay added to config/registry events for debouncing. This will delay the push by "+
"at least this interval. If no change is detected within this period, the push will happen, "+
" otherwise we'll keep delaying until things settle, up to a max of PILOT_DEBOUNCE_MAX.",
).Get()
DebounceMax = env.Register(
"PILOT_DEBOUNCE_MAX",
10*time.Second,
"The maximum amount of time to wait for events while debouncing. If events keep showing up with no breaks "+
"for this time, we'll trigger a push.",
).Get()
EnableEDSDebounce = env.Register(
"PILOT_ENABLE_EDS_DEBOUNCE",
true,
"If enabled, Pilot will include EDS pushes in the push debouncing, configured by PILOT_DEBOUNCE_AFTER and PILOT_DEBOUNCE_MAX."+
" EDS pushes may be delayed, but there will be fewer pushes. By default this is enabled",
).Get()
ConvertSidecarScopeConcurrency = env.Register(
"PILOT_CONVERT_SIDECAR_SCOPE_CONCURRENCY",
1,
"Used to adjust the concurrency of SidecarScope conversions. "+
"When istiod is deployed on a multi-core CPU server, increasing this value will help to use the CPU to "+
"accelerate configuration push, but it also means that istiod will consume more CPU resources.",
).Get()
MutexProfileFraction = env.Register("MUTEX_PROFILE_FRACTION", 1000,
"If set to a non-zero value, enables mutex profiling a rate of 1/MUTEX_PROFILE_FRACTION events."+
" For example, '1000' will record 0.1% of events. "+
"Set to 0 to disable entirely.").Get()
StatusUpdateInterval = env.Register(
"PILOT_STATUS_UPDATE_INTERVAL",
500*time.Millisecond,
"Interval to update the XDS distribution status.",
).Get()
StatusQPS = env.Register(
"PILOT_STATUS_QPS",
100,
"If status is enabled, controls the QPS with which status will be updated. "+
"See https://godoc.org/k8s.io/client-go/rest#Config QPS",
).Get()
StatusBurst = env.Register(
"PILOT_STATUS_BURST",
500,
"If status is enabled, controls the Burst rate with which status will be updated. "+
"See https://godoc.org/k8s.io/client-go/rest#Config Burst",
).Get()
StatusMaxWorkers = env.Register("PILOT_STATUS_MAX_WORKERS", 100, "The maximum number of workers"+
" Pilot will use to keep configuration status up to date. Smaller numbers will result in higher status latency, "+
"but larger numbers may impact CPU in high scale environments.").Get()
XDSCacheMaxSize = env.Register("PILOT_XDS_CACHE_SIZE", 60000,
"The maximum number of cache entries for the XDS cache.").Get()
XDSCacheIndexClearInterval = env.Register("PILOT_XDS_CACHE_INDEX_CLEAR_INTERVAL", 5*time.Second,
"The interval for xds cache index clearing.").Get()
XdsPushSendTimeout = env.Register(
"PILOT_XDS_SEND_TIMEOUT",
0*time.Second,
"The timeout to send the XDS configuration to proxies. After this timeout is reached, Pilot will discard that push.",
).Get()
)
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpc
import (
"context"
"io"
"math"
"strings"
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/status"
"istio.io/istio/pilot/pkg/features"
istiokeepalive "istio.io/istio/pkg/keepalive"
"istio.io/istio/pkg/util/sets"
)
type SendHandler func() error
// Send with timeout if specified. If timeout is zero, sends without timeout.
func Send(ctx context.Context, send SendHandler) error {
if features.XdsPushSendTimeout.Nanoseconds() > 0 {
errChan := make(chan error, 1)
timeoutCtx, cancel := context.WithTimeout(ctx, features.XdsPushSendTimeout)
defer cancel()
go func() {
err := send()
errChan <- err
close(errChan)
}()
select {
case <-timeoutCtx.Done():
return status.Errorf(codes.DeadlineExceeded, "timeout sending")
case err := <-errChan:
return err
}
}
err := send()
return err
}
func ServerOptions(options *istiokeepalive.Options, interceptors ...grpc.UnaryServerInterceptor) []grpc.ServerOption {
maxStreams := features.MaxConcurrentStreams
maxRecvMsgSize := features.MaxRecvMsgSize
grpcOptions := []grpc.ServerOption{
grpc.UnaryInterceptor(middleware.ChainUnaryServer(interceptors...)),
grpc.MaxConcurrentStreams(uint32(maxStreams)),
grpc.MaxRecvMsgSize(maxRecvMsgSize),
// Ensure we allow clients sufficient ability to send keep alives. If this is higher than client
// keep alive setting, it will prematurely get a GOAWAY sent.
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: options.Time / 2,
}),
grpc.KeepaliveParams(keepalive.ServerParameters{
Time: options.Time,
Timeout: options.Timeout,
MaxConnectionAge: options.MaxServerConnectionAge,
MaxConnectionAgeGrace: options.MaxServerConnectionAgeGrace,
}),
}
return grpcOptions
}
const (
defaultClientMaxReceiveMessageSize = math.MaxInt32
defaultInitialConnWindowSize = 1024 * 1024 // default gRPC InitialWindowSize
defaultInitialWindowSize = 1024 * 1024 // default gRPC ConnWindowSize
)
// ClientOptions returns consistent grpc dial options with custom dial options
func ClientOptions(options *istiokeepalive.Options, tlsOpts *TLSOptions) ([]grpc.DialOption, error) {
if options == nil {
options = istiokeepalive.DefaultOption()
}
keepaliveOption := grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: options.Time,
Timeout: options.Timeout,
})
initialWindowSizeOption := grpc.WithInitialWindowSize(int32(defaultInitialWindowSize))
initialConnWindowSizeOption := grpc.WithInitialConnWindowSize(int32(defaultInitialConnWindowSize))
msgSizeOption := grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaultClientMaxReceiveMessageSize))
var tlsDialOpts grpc.DialOption
var err error
if tlsOpts != nil {
tlsDialOpts, err = getTLSDialOption(tlsOpts)
if err != nil {
return nil, err
}
} else {
tlsDialOpts = grpc.WithTransportCredentials(insecure.NewCredentials())
}
return []grpc.DialOption{keepaliveOption, initialWindowSizeOption, initialConnWindowSizeOption, msgSizeOption, tlsDialOpts}, nil
}
var expectedGrpcFailureMessages = sets.New(
"client disconnected",
"error reading from server: EOF",
"transport is closing",
)
func containsExpectedMessage(msg string) bool {
for m := range expectedGrpcFailureMessages {
if strings.Contains(msg, m) {
return true
}
}
return false
}
// IsExpectedGRPCError checks a gRPC error code and determines whether it is an expected error when
// things are operating normally. This is basically capturing when the client disconnects.
func IsExpectedGRPCError(err error) bool {
if err == io.EOF {
return true
}
if s, ok := status.FromError(err); ok {
if s.Code() == codes.Canceled || s.Code() == codes.DeadlineExceeded {
return true
}
if s.Code() == codes.Unavailable && containsExpectedMessage(s.Message()) {
return true
}
}
// If this is not a gRPCStatus we should just error message.
if strings.Contains(err.Error(), "stream terminated by RST_STREAM with error code: NO_ERROR") {
return true
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpc
import (
"crypto/tls"
"crypto/x509"
"fmt"
"net"
"os"
"strings"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"istio.io/istio/pkg/log"
"istio.io/istio/security/pkg/pki/util"
)
// TLSOptions include TLS options that a grpc client uses to connect with server.
type TLSOptions struct {
RootCert string
Key string
Cert string
ServerAddress string
SAN string
}
func getTLSDialOption(opts *TLSOptions) (grpc.DialOption, error) {
rootCert, err := getRootCertificate(opts.RootCert)
if err != nil {
return nil, err
}
config := tls.Config{
GetClientCertificate: func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
var certificate tls.Certificate
key, cert := opts.Key, opts.Cert
if key != "" && cert != "" {
isExpired, err := util.IsCertExpired(opts.Cert)
if err != nil {
log.Warnf("cannot parse the cert chain, using token instead: %v", err)
return &certificate, nil
}
if isExpired {
log.Warnf("cert expired, using token instead")
return &certificate, nil
}
// Load the certificate from disk
certificate, err = tls.LoadX509KeyPair(cert, key)
if err != nil {
return nil, err
}
}
return &certificate, nil
},
RootCAs: rootCert,
MinVersion: tls.VersionTLS12,
}
if host, _, err := net.SplitHostPort(opts.ServerAddress); err == nil {
config.ServerName = host
}
// For debugging on localhost (with port forward)
if strings.Contains(config.ServerName, "localhost") {
config.ServerName = "istiod.istio-system.svc"
}
if opts.SAN != "" {
config.ServerName = opts.SAN
}
transportCreds := credentials.NewTLS(&config)
return grpc.WithTransportCredentials(transportCreds), nil
}
func getRootCertificate(rootCertFile string) (*x509.CertPool, error) {
var certPool *x509.CertPool
var rootCert []byte
var err error
if rootCertFile != "" {
rootCert, err = os.ReadFile(rootCertFile)
if err != nil {
return nil, err
}
certPool = x509.NewCertPool()
ok := certPool.AppendCertsFromPEM(rootCert)
if !ok {
return nil, fmt.Errorf("failed to create TLS dial option with root certificates")
}
} else {
certPool, err = x509.SystemCertPool()
if err != nil {
return nil, err
}
}
return certPool, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keycertbundle
import (
"os"
"sync"
)
// KeyCertBundle stores the cert, private key and root cert for istiod.
type KeyCertBundle struct {
CertPem []byte
KeyPem []byte
CABundle []byte
}
type Watcher struct {
mutex sync.RWMutex
bundle KeyCertBundle
watcherID int32
watchers map[int32]chan struct{}
}
func NewWatcher() *Watcher {
return &Watcher{
watchers: make(map[int32]chan struct{}),
}
}
// AddWatcher returns channel to receive the updated items.
func (w *Watcher) AddWatcher() (int32, chan struct{}) {
ch := make(chan struct{}, 1)
w.mutex.Lock()
defer w.mutex.Unlock()
id := w.watcherID
w.watchers[id] = ch
w.watcherID++
return id, ch
}
// RemoveWatcher removes the given watcher.
func (w *Watcher) RemoveWatcher(id int32) {
w.mutex.Lock()
defer w.mutex.Unlock()
ch := w.watchers[id]
if ch != nil {
close(ch)
}
delete(w.watchers, id)
}
// SetAndNotify sets the key cert and root cert and notify the watchers.
func (w *Watcher) SetAndNotify(key, cert, caBundle []byte) {
w.mutex.Lock()
defer w.mutex.Unlock()
if len(key) != 0 {
w.bundle.KeyPem = key
}
if len(cert) != 0 {
w.bundle.CertPem = cert
}
if len(caBundle) != 0 {
w.bundle.CABundle = caBundle
}
for _, ch := range w.watchers {
select {
case ch <- struct{}{}:
default:
}
}
}
// SetFromFilesAndNotify sets the key cert and root cert from files and notify the watchers.
func (w *Watcher) SetFromFilesAndNotify(keyFile, certFile, rootCert string) error {
cert, err := os.ReadFile(certFile)
if err != nil {
return err
}
key, err := os.ReadFile(keyFile)
if err != nil {
return err
}
caBundle, err := os.ReadFile(rootCert)
if err != nil {
return err
}
w.SetAndNotify(key, cert, caBundle)
return nil
}
// GetCABundle returns the CABundle.
func (w *Watcher) GetCABundle() []byte {
w.mutex.RLock()
defer w.mutex.RUnlock()
return w.bundle.CABundle
}
// GetKeyCertBundle returns the bundle.
func (w *Watcher) GetKeyCertBundle() KeyCertBundle {
w.mutex.RLock()
defer w.mutex.RUnlock()
return w.bundle
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8sleaderelection
import (
"net/http"
"sync"
"time"
)
// HealthzAdaptor associates the /healthz endpoint with the LeaderElection object.
// It helps deal with the /healthz endpoint being set up prior to the LeaderElection.
// This contains the code needed to act as an adaptor between the leader
// election code the health check code. It allows us to provide health
// status about the leader election. Most specifically about if the leader
// has failed to renew without exiting the process. In that case we should
// report not healthy and rely on the kubelet to take down the process.
type HealthzAdaptor struct {
pointerLock sync.Mutex
le *LeaderElector
timeout time.Duration
}
// Name returns the name of the health check we are implementing.
func (l *HealthzAdaptor) Name() string {
return "leaderElection"
}
// Check is called by the healthz endpoint handler.
// It fails (returns an error) if we own the lease but had not been able to renew it.
func (l *HealthzAdaptor) Check(req *http.Request) error {
l.pointerLock.Lock()
defer l.pointerLock.Unlock()
if l.le == nil {
return nil
}
return l.le.Check(l.timeout)
}
// SetLeaderElection ties a leader election object to a HealthzAdaptor
func (l *HealthzAdaptor) SetLeaderElection(le *LeaderElector) {
l.pointerLock.Lock()
defer l.pointerLock.Unlock()
l.le = le
}
// NewLeaderHealthzAdaptor creates a basic healthz adaptor to monitor a leader election.
// timeout determines the time beyond the lease expiry to be allowed for timeout.
// checks within the timeout period after the lease expires will still return healthy.
func NewLeaderHealthzAdaptor(timeout time.Duration) *HealthzAdaptor {
result := &HealthzAdaptor{
timeout: timeout,
}
return result
}
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8sresourcelock
import (
"context"
"encoding/json"
"errors"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
)
// TODO: This is almost a exact replica of Endpoints lock.
// going forwards as we self host more and more components
// and use ConfigMaps as the means to pass that configuration
// data we will likely move to deprecate the Endpoints lock.
type ConfigMapLock struct {
// ConfigMapMeta should contain a Name and a Namespace of a
// ConfigMapMeta object that the LeaderElector will attempt to lead.
ConfigMapMeta metav1.ObjectMeta
Client corev1client.ConfigMapsGetter
LockConfig ResourceLockConfig
cm *v1.ConfigMap
}
// Get returns the election record from a ConfigMap Annotation
func (cml *ConfigMapLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
var record LeaderElectionRecord
var err error
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Get(ctx, cml.ConfigMapMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, nil, err
}
if cml.cm.Annotations == nil {
cml.cm.Annotations = make(map[string]string)
}
recordStr, found := cml.cm.Annotations[LeaderElectionRecordAnnotationKey]
recordBytes := []byte(recordStr)
if found {
if err := json.Unmarshal(recordBytes, &record); err != nil {
return nil, nil, err
}
}
return &record, recordBytes, nil
}
// Create attempts to create a LeaderElectionRecord annotation
func (cml *ConfigMapLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Create(ctx, &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: cml.ConfigMapMeta.Name,
Namespace: cml.ConfigMapMeta.Namespace,
Annotations: map[string]string{
LeaderElectionRecordAnnotationKey: string(recordBytes),
},
},
}, metav1.CreateOptions{})
return err
}
// Update will update an existing annotation on a given resource.
func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
if cml.cm == nil {
return errors.New("configmap not initialized, call get or create first")
}
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
if cml.cm.Annotations == nil {
cml.cm.Annotations = make(map[string]string)
}
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
cm, err := cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
if err != nil {
return err
}
cml.cm = cm
return nil
}
// RecordEvent in leader election while adding meta-data
func (cml *ConfigMapLock) RecordEvent(s string) {
if cml.LockConfig.EventRecorder == nil {
return
}
events := fmt.Sprintf("%v %v", cml.LockConfig.Identity, s)
cml.LockConfig.EventRecorder.Eventf(&v1.ConfigMap{ObjectMeta: cml.cm.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
}
// Describe is used to convert details on current resource lock
// into a string
func (cml *ConfigMapLock) Describe() string {
return fmt.Sprintf("%v/%v", cml.ConfigMapMeta.Namespace, cml.ConfigMapMeta.Name)
}
// Identity returns the Identity of the lock
func (cml *ConfigMapLock) Identity() string {
return cml.LockConfig.Identity
}
// Identity returns the Identity of the lock
func (cml *ConfigMapLock) Key() string {
return cml.LockConfig.Key
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8sresourcelock
import (
"context"
"encoding/json"
"errors"
"fmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
)
type EndpointsLock struct {
// EndpointsMeta should contain a Name and a Namespace of an
// Endpoints object that the LeaderElector will attempt to lead.
EndpointsMeta metav1.ObjectMeta
Client corev1client.EndpointsGetter
LockConfig ResourceLockConfig
e *v1.Endpoints
}
// Get returns the election record from a Endpoints Annotation
func (el *EndpointsLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
var record LeaderElectionRecord
var err error
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Get(ctx, el.EndpointsMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, nil, err
}
if el.e.Annotations == nil {
el.e.Annotations = make(map[string]string)
}
recordStr, found := el.e.Annotations[LeaderElectionRecordAnnotationKey]
recordBytes := []byte(recordStr)
if found {
if err := json.Unmarshal(recordBytes, &record); err != nil {
return nil, nil, err
}
}
return &record, recordBytes, nil
}
// Create attempts to create a LeaderElectionRecord annotation
func (el *EndpointsLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
el.e, err = el.Client.Endpoints(el.EndpointsMeta.Namespace).Create(ctx, &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Name: el.EndpointsMeta.Name,
Namespace: el.EndpointsMeta.Namespace,
Annotations: map[string]string{
LeaderElectionRecordAnnotationKey: string(recordBytes),
},
},
}, metav1.CreateOptions{})
return err
}
// Update will update and existing annotation on a given resource.
func (el *EndpointsLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
if el.e == nil {
return errors.New("endpoint not initialized, call get or create first")
}
recordBytes, err := json.Marshal(ler)
if err != nil {
return err
}
if el.e.Annotations == nil {
el.e.Annotations = make(map[string]string)
}
el.e.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
e, err := el.Client.Endpoints(el.EndpointsMeta.Namespace).Update(ctx, el.e, metav1.UpdateOptions{})
if err != nil {
return err
}
el.e = e
return nil
}
// RecordEvent in leader election while adding meta-data
func (el *EndpointsLock) RecordEvent(s string) {
if el.LockConfig.EventRecorder == nil {
return
}
events := fmt.Sprintf("%v %v", el.LockConfig.Identity, s)
el.LockConfig.EventRecorder.Eventf(&v1.Endpoints{ObjectMeta: el.e.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events)
}
// Describe is used to convert details on current resource lock
// into a string
func (el *EndpointsLock) Describe() string {
return fmt.Sprintf("%v/%v", el.EndpointsMeta.Namespace, el.EndpointsMeta.Name)
}
// Identity returns the Identity of the lock
func (el *EndpointsLock) Identity() string {
return el.LockConfig.Identity
}
// Key returns the Key of the lock
func (el *EndpointsLock) Key() string {
return el.LockConfig.Key
}
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8sresourcelock
import (
"context"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
clientset "k8s.io/client-go/kubernetes"
coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
)
const (
LeaderElectionRecordAnnotationKey = "control-plane.alpha.kubernetes.io/leader"
EndpointsResourceLock = "endpoints"
ConfigMapsResourceLock = "configmaps"
LeasesResourceLock = "leases"
EndpointsLeasesResourceLock = "endpointsleases"
ConfigMapsLeasesResourceLock = "configmapsleases"
)
// LeaderElectionRecord is the record that is stored in the leader election annotation.
// This information should be used for observational purposes only and could be replaced
// with a random string (e.g. UUID) with only slight modification of this code.
// TODO(mikedanese): this should potentially be versioned
type LeaderElectionRecord struct {
// HolderIdentity is the ID that owns the lease. If empty, no one owns this lease and
// all callers may acquire. Versions of this library prior to Kubernetes 1.14 will not
// attempt to acquire leases with empty identities and will wait for the full lease
// interval to expire before attempting to reacquire. This value is set to empty when
// a client voluntarily steps down.
HolderIdentity string `json:"holderIdentity"`
// HolderKey is the Key of the lease owner. This may be empty if a key is not set.
HolderKey string `json:"holderKey"`
LeaseDurationSeconds int `json:"leaseDurationSeconds"`
AcquireTime metav1.Time `json:"acquireTime"`
RenewTime metav1.Time `json:"renewTime"`
LeaderTransitions int `json:"leaderTransitions"`
}
// EventRecorder records a change in the ResourceLock.
type EventRecorder interface {
Eventf(obj runtime.Object, eventType, reason, message string, args ...any)
}
// ResourceLockConfig common data that exists across different
// resource locks
type ResourceLockConfig struct {
// Identity is the unique string identifying a lease holder across
// all participants in an election.
Identity string
// Key is a user-defined value used to indicate how high priority this lock
// have. Other locks may steal the lock from us if they believe their key
// has a higher priority.
Key string
// EventRecorder is optional.
EventRecorder EventRecorder
}
// Interface offers a common interface for locking on arbitrary
// resources used in leader election. The Interface is used
// to hide the details on specific implementations in order to allow
// them to change over time. This interface is strictly for use
// by the leaderelection code.
type Interface interface {
// Get returns the LeaderElectionRecord
Get(ctx context.Context) (*LeaderElectionRecord, []byte, error)
// Create attempts to create a LeaderElectionRecord
Create(ctx context.Context, ler LeaderElectionRecord) error
// Update will update and existing LeaderElectionRecord
Update(ctx context.Context, ler LeaderElectionRecord) error
// RecordEvent is used to record events
RecordEvent(string)
// Identity will return the locks Identity
Identity() string
Key() string
// Describe is used to convert details on current resource lock
// into a string
Describe() string
}
// Manufacture will create a lock of a given type according to the input parameters
// nolint: lll
func New(lockType string, ns string, name string, coreClient corev1.CoreV1Interface, coordinationClient coordinationv1.CoordinationV1Interface, rlc ResourceLockConfig) (Interface, error) {
endpointsLock := &EndpointsLock{
EndpointsMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: coreClient,
LockConfig: rlc,
}
configmapLock := &ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: coreClient,
LockConfig: rlc,
}
leaseLock := &LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
},
Client: coordinationClient,
LockConfig: rlc,
}
switch lockType {
case EndpointsResourceLock:
return endpointsLock, nil
case ConfigMapsResourceLock:
return configmapLock, nil
case LeasesResourceLock:
return leaseLock, nil
case EndpointsLeasesResourceLock:
return &MultiLock{
Primary: endpointsLock,
Secondary: leaseLock,
}, nil
case ConfigMapsLeasesResourceLock:
return &MultiLock{
Primary: configmapLock,
Secondary: leaseLock,
}, nil
default:
return nil, fmt.Errorf("invalid lock-type %s", lockType)
}
}
// NewFromKubeconfig will create a lock of a given type according to the input parameters.
// Timeout set for a client used to contact to Kubernetes should be lower than
// RenewDeadline to keep a single hung request from forcing a leader loss.
// Setting it to max(time.Second, RenewDeadline/2) as a reasonable heuristic.
// nolint: lll
func NewFromKubeconfig(lockType string, ns string, name string, rlc ResourceLockConfig, kubeconfig *restclient.Config, renewDeadline time.Duration) (Interface, error) {
// shallow copy, do not modify the kubeconfig
config := *kubeconfig
timeout := renewDeadline / 2
if timeout < time.Second {
timeout = time.Second
}
config.Timeout = timeout
leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "leader-election"))
return New(lockType, ns, name, leaderElectionClient.CoreV1(), leaderElectionClient.CoordinationV1(), rlc)
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8sresourcelock
import (
"context"
"encoding/json"
"errors"
"fmt"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
coordinationv1client "k8s.io/client-go/kubernetes/typed/coordination/v1"
)
type LeaseLock struct {
// LeaseMeta should contain a Name and a Namespace of a
// LeaseMeta object that the LeaderElector will attempt to lead.
LeaseMeta metav1.ObjectMeta
Client coordinationv1client.LeasesGetter
LockConfig ResourceLockConfig
lease *coordinationv1.Lease
}
// Get returns the election record from a Lease spec
func (ll *LeaseLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
var err error
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Get(ctx, ll.LeaseMeta.Name, metav1.GetOptions{})
if err != nil {
return nil, nil, err
}
record := LeaseSpecToLeaderElectionRecord(&ll.lease.Spec)
recordByte, err := json.Marshal(*record)
if err != nil {
return nil, nil, err
}
return record, recordByte, nil
}
// Create attempts to create a Lease
func (ll *LeaseLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
var err error
ll.lease, err = ll.Client.Leases(ll.LeaseMeta.Namespace).Create(ctx, &coordinationv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: ll.LeaseMeta.Name,
Namespace: ll.LeaseMeta.Namespace,
},
Spec: LeaderElectionRecordToLeaseSpec(&ler),
}, metav1.CreateOptions{})
return err
}
// Update will update an existing Lease spec.
func (ll *LeaseLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
if ll.lease == nil {
return errors.New("lease not initialized, call get or create first")
}
ll.lease.Spec = LeaderElectionRecordToLeaseSpec(&ler)
lease, err := ll.Client.Leases(ll.LeaseMeta.Namespace).Update(ctx, ll.lease, metav1.UpdateOptions{})
if err != nil {
return err
}
ll.lease = lease
return nil
}
// RecordEvent in leader election while adding meta-data
func (ll *LeaseLock) RecordEvent(s string) {
if ll.LockConfig.EventRecorder == nil {
return
}
events := fmt.Sprintf("%v %v", ll.LockConfig.Identity, s)
ll.LockConfig.EventRecorder.Eventf(&coordinationv1.Lease{ObjectMeta: ll.lease.ObjectMeta}, corev1.EventTypeNormal, "LeaderElection", events)
}
// Describe is used to convert details on current resource lock
// into a string
func (ll *LeaseLock) Describe() string {
return fmt.Sprintf("%v/%v", ll.LeaseMeta.Namespace, ll.LeaseMeta.Name)
}
// Identity returns the Identity of the lock
func (ll *LeaseLock) Identity() string {
return ll.LockConfig.Identity
}
// Key returns the Key of the lock
func (ll *LeaseLock) Key() string {
return ll.LockConfig.Key
}
func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElectionRecord {
var r LeaderElectionRecord
if spec.HolderIdentity != nil {
r.HolderIdentity = *spec.HolderIdentity
}
if spec.LeaseDurationSeconds != nil {
r.LeaseDurationSeconds = int(*spec.LeaseDurationSeconds)
}
if spec.LeaseTransitions != nil {
r.LeaderTransitions = int(*spec.LeaseTransitions)
}
if spec.AcquireTime != nil {
r.AcquireTime = metav1.Time{Time: spec.AcquireTime.Time}
}
if spec.RenewTime != nil {
r.RenewTime = metav1.Time{Time: spec.RenewTime.Time}
}
return &r
}
func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.LeaseSpec {
leaseDurationSeconds := int32(ler.LeaseDurationSeconds)
leaseTransitions := int32(ler.LeaderTransitions)
return coordinationv1.LeaseSpec{
HolderIdentity: &ler.HolderIdentity,
LeaseDurationSeconds: &leaseDurationSeconds,
AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time},
RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time},
LeaseTransitions: &leaseTransitions,
}
}
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8sresourcelock
import (
"bytes"
"context"
"encoding/json"
kerrors "k8s.io/apimachinery/pkg/api/errors"
)
const (
UnknownLeader = "leaderelection.k8s.io/unknown"
)
// MultiLock is used for lock's migration
type MultiLock struct {
Primary Interface
Secondary Interface
}
// Get returns the older election record of the lock
func (ml *MultiLock) Get(ctx context.Context) (*LeaderElectionRecord, []byte, error) {
primary, primaryRaw, err := ml.Primary.Get(ctx)
if err != nil {
return nil, nil, err
}
secondary, secondaryRaw, err := ml.Secondary.Get(ctx)
if err != nil {
// Lock is held by old client
if kerrors.IsNotFound(err) && primary.HolderIdentity != ml.Identity() {
return primary, primaryRaw, nil
}
return nil, nil, err
}
if primary.HolderIdentity != secondary.HolderIdentity {
primary.HolderIdentity = UnknownLeader
primaryRaw, err = json.Marshal(primary)
if err != nil {
return nil, nil, err
}
}
return primary, ConcatRawRecord(primaryRaw, secondaryRaw), nil
}
// Create attempts to create both primary lock and secondary lock
func (ml *MultiLock) Create(ctx context.Context, ler LeaderElectionRecord) error {
err := ml.Primary.Create(ctx, ler)
if err != nil && !kerrors.IsAlreadyExists(err) {
return err
}
return ml.Secondary.Create(ctx, ler)
}
// Update will update and existing annotation on both two resources.
func (ml *MultiLock) Update(ctx context.Context, ler LeaderElectionRecord) error {
err := ml.Primary.Update(ctx, ler)
if err != nil {
return err
}
_, _, err = ml.Secondary.Get(ctx)
if err != nil && kerrors.IsNotFound(err) {
return ml.Secondary.Create(ctx, ler)
}
return ml.Secondary.Update(ctx, ler)
}
// RecordEvent in leader election while adding meta-data
func (ml *MultiLock) RecordEvent(s string) {
ml.Primary.RecordEvent(s)
ml.Secondary.RecordEvent(s)
}
// Describe is used to convert details on current resource lock
// into a string
func (ml *MultiLock) Describe() string {
return ml.Primary.Describe()
}
// Identity returns the Identity of the lock
func (ml *MultiLock) Identity() string {
return ml.Primary.Identity()
}
// Key returns the Key of the lock
func (ml *MultiLock) Key() string {
return ml.Primary.Key()
}
func ConcatRawRecord(primaryRaw, secondaryRaw []byte) []byte {
return bytes.Join([][]byte{primaryRaw, secondaryRaw}, []byte(","))
}
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package leaderelection implements leader election of a set of endpoints.
// It uses an annotation in the endpoints object to store the record of the
// election state. This implementation does not guarantee that only one
// client is acting as a leader (a.k.a. fencing).
//
// A client only acts on timestamps captured locally to infer the state of the
// leader election. The client does not consider timestamps in the leader
// election record to be accurate because these timestamps may not have been
// produced by a local clock. The implementation does not depend on their
// accuracy and only uses their change to indicate that another client has
// renewed the leader lease. Thus the implementation is tolerant to arbitrary
// clock skew, but is not tolerant to arbitrary clock skew rate.
//
// However the level of tolerance to skew rate can be configured by setting
// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a
// maximum tolerated ratio of time passed on the fastest node to time passed on
// the slowest node can be approximately achieved with a configuration that sets
// the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted
// to tolerate some nodes progressing forward in time twice as fast as other nodes,
// the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds.
//
// While not required, some method of clock synchronization between nodes in the
// cluster is highly recommended. It's important to keep in mind when configuring
// this client that the tolerance to skew rate varies inversely to master
// availability.
//
// Larger clusters often have a more lenient SLA for API latency. This should be
// taken into account when configuring the client. The rate of leader transitions
// should be monitored and RetryPeriod and LeaseDuration should be increased
// until the rate is stable and acceptably low. It's important to keep in mind
// when configuring this client that the tolerance to API latency varies inversely
// to master availability.
//
// DISCLAIMER: this is an alpha API. This library will likely change significantly
// or even be removed entirely in subsequent releases. Depend on this API at
// your own risk.
// nolint
package k8sleaderelection
import (
"bytes"
"context"
"fmt"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"k8s.io/utils/clock"
"istio.io/istio/pilot/pkg/leaderelection/k8sleaderelection/k8sresourcelock"
)
const (
JitterFactor = 1.2
)
// NewLeaderElector creates a LeaderElector from a LeaderElectionConfig
func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) {
if lec.LeaseDuration <= lec.RenewDeadline {
return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline")
}
if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) {
return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor")
}
if lec.LeaseDuration < 1 {
return nil, fmt.Errorf("leaseDuration must be greater than zero")
}
if lec.RenewDeadline < 1 {
return nil, fmt.Errorf("renewDeadline must be greater than zero")
}
if lec.RetryPeriod < 1 {
return nil, fmt.Errorf("retryPeriod must be greater than zero")
}
if lec.Callbacks.OnStartedLeading == nil {
return nil, fmt.Errorf("callback OnStartedLeading must not be nil")
}
if lec.Callbacks.OnStoppedLeading == nil {
return nil, fmt.Errorf("callback OnStoppedLeading must not be nil")
}
if lec.Lock == nil {
return nil, fmt.Errorf("lock must not be nil")
}
le := LeaderElector{
config: lec,
clock: clock.RealClock{},
metrics: globalMetricsFactory.newLeaderMetrics(),
}
le.metrics.leaderOff(le.config.Name)
return &le, nil
}
type KeyComparisonFunc func(existingKey string) bool
type LeaderElectionConfig struct {
// Lock is the resource that will be used for locking
Lock k8sresourcelock.Interface
// LeaseDuration is the duration that non-leader candidates will
// wait to force acquire leadership. This is measured against time of
// last observed ack.
//
// A client needs to wait a full LeaseDuration without observing a change to
// the record before it can attempt to take over. When all clients are
// shutdown and a new set of clients are started with different names against
// the same leader record, they must wait the full LeaseDuration before
// attempting to acquire the lease. Thus LeaseDuration should be as short as
// possible (within your tolerance for clock skew rate) to avoid a possible
// long waits in the scenario.
//
// Core clients default this value to 15 seconds.
LeaseDuration time.Duration
// RenewDeadline is the duration that the acting master will retry
// refreshing leadership before giving up.
//
// Core clients default this value to 10 seconds.
RenewDeadline time.Duration
// RetryPeriod is the duration the LeaderElector clients should wait
// between tries of actions.
//
// Core clients default this value to 2 seconds.
RetryPeriod time.Duration
// KeyComparison defines a function to compare the existing leader's key to our own.
// If the function returns true, indicating our key has high precedence, we will take over
// leadership even if their is another un-expired leader.
//
// This can be used to implemented a prioritized leader election. For example, if multiple
// versions of the same application run simultaneously, we can ensure the newest version
// will become the leader.
//
// It is the responsibility of the caller to ensure that all KeyComparison functions are
// logically consistent between all clients participating in the leader election to avoid multiple
// clients claiming to have high precedence and constantly pre-empting the existing leader.
//
// KeyComparison functions should ensure they handle an empty existingKey, as "key" is not a required field.
//
// Warning: when a lock is stolen (from KeyComparison returning true), the old leader may not
// immediately be notified they have lost the leader election.
KeyComparison KeyComparisonFunc
// Callbacks are callbacks that are triggered during certain lifecycle
// events of the LeaderElector
Callbacks LeaderCallbacks
// WatchDog is the associated health checker
// WatchDog may be null if its not needed/configured.
WatchDog *HealthzAdaptor
// ReleaseOnCancel should be set true if the lock should be released
// when the run context is canceled. If you set this to true, you must
// ensure all code guarded by this lease has successfully completed
// prior to canceling the context, or you may have two processes
// simultaneously acting on the critical path.
ReleaseOnCancel bool
// Name is the name of the resource lock for debugging
Name string
}
// LeaderCallbacks are callbacks that are triggered during certain
// lifecycle events of the LeaderElector. These are invoked asynchronously.
//
// possible future callbacks:
// - OnChallenge()
type LeaderCallbacks struct {
// OnStartedLeading is called when a LeaderElector client starts leading
OnStartedLeading func(context.Context)
// OnStoppedLeading is called when a LeaderElector client stops leading
OnStoppedLeading func()
// OnNewLeader is called when the client observes a leader that is
// not the previously observed leader. This includes the first observed
// leader when the client starts.
OnNewLeader func(identity string)
}
// LeaderElector is a leader election client.
type LeaderElector struct {
config LeaderElectionConfig
// internal bookkeeping
observedRecord k8sresourcelock.LeaderElectionRecord
observedRawRecord []byte
observedTime time.Time
// used to implement OnNewLeader(), may lag slightly from the
// value observedRecord.HolderIdentity if the transition has
// not yet been reported.
reportedLeader string
// clock is wrapper around time to allow for less flaky testing
clock clock.Clock
// used to lock the observedRecord
observedRecordLock sync.Mutex
metrics leaderMetricsAdapter
}
// Run starts the leader election loop. Run will not return
// before leader election loop is stopped by ctx or it has
// stopped holding the leader lease
func (le *LeaderElector) Run(ctx context.Context) {
defer runtime.HandleCrash()
defer func() {
le.config.Callbacks.OnStoppedLeading()
}()
if !le.acquire(ctx) {
return // ctx signaled done
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go le.config.Callbacks.OnStartedLeading(ctx)
le.renew(ctx)
}
// RunOrDie starts a client with the provided config or panics if the config
// fails to validate. RunOrDie blocks until leader election loop is
// stopped by ctx or it has stopped holding the leader lease
func RunOrDie(ctx context.Context, lec LeaderElectionConfig) {
le, err := NewLeaderElector(lec)
if err != nil {
panic(err)
}
if lec.WatchDog != nil {
lec.WatchDog.SetLeaderElection(le)
}
le.Run(ctx)
}
// GetLeader returns the identity of the last observed leader or returns the empty string if
// no leader has yet been observed.
// This function is for informational purposes. (e.g. monitoring, logs, etc.)
func (le *LeaderElector) GetLeader() string {
return le.getObservedRecord().HolderIdentity
}
// IsLeader returns true if the last observed leader was this client else returns false.
func (le *LeaderElector) IsLeader() bool {
return le.getObservedRecord().HolderIdentity == le.config.Lock.Identity()
}
// acquire loops calling tryAcquireOrRenew and returns true immediately when tryAcquireOrRenew succeeds.
// Returns false if ctx signals done.
func (le *LeaderElector) acquire(ctx context.Context) bool {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
succeeded := false
desc := le.config.Lock.Describe()
klog.Infof("attempting to acquire leader lease %v...", desc)
wait.JitterUntil(func() {
succeeded = le.tryAcquireOrRenew(ctx)
le.maybeReportTransition()
if !succeeded {
klog.V(4).Infof("failed to acquire lease %v", desc)
return
}
le.config.Lock.RecordEvent("became leader")
le.metrics.leaderOn(le.config.Name)
klog.Infof("successfully acquired lease %v", desc)
cancel()
}, le.config.RetryPeriod, JitterFactor, true, ctx.Done())
return succeeded
}
// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done.
func (le *LeaderElector) renew(ctx context.Context) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
wait.Until(func() {
timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline)
defer timeoutCancel()
err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) {
return le.tryAcquireOrRenew(timeoutCtx), nil
}, timeoutCtx.Done())
le.maybeReportTransition()
desc := le.config.Lock.Describe()
if err == nil {
klog.V(5).Infof("successfully renewed lease %v", desc)
return
}
le.config.Lock.RecordEvent("stopped leading")
le.metrics.leaderOff(le.config.Name)
klog.Infof("failed to renew lease %v: %v", desc, err)
cancel()
}, le.config.RetryPeriod, ctx.Done())
// if we hold the lease, give it up
if le.config.ReleaseOnCancel {
le.release()
}
}
// release attempts to release the leader lease if we have acquired it.
func (le *LeaderElector) release() bool {
if !le.IsLeader() {
return true
}
now := metav1.Now()
leaderElectionRecord := k8sresourcelock.LeaderElectionRecord{
LeaderTransitions: le.observedRecord.LeaderTransitions,
LeaseDurationSeconds: 1,
RenewTime: now,
AcquireTime: now,
}
if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil {
klog.Errorf("Failed to release lock: %v", err)
return false
}
le.setObservedRecord(&leaderElectionRecord)
return true
}
// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired,
// else it tries to renew the lease if it has already been acquired. Returns true
// on success else returns false.
func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
now := metav1.Now()
leaderElectionRecord := k8sresourcelock.LeaderElectionRecord{
HolderIdentity: le.config.Lock.Identity(),
HolderKey: le.config.Lock.Key(),
LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second),
RenewTime: now,
AcquireTime: now,
}
// 1. obtain or create the ElectionRecord
oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx)
if err != nil {
if !errors.IsNotFound(err) {
klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err)
return false
}
if err = le.config.Lock.Create(ctx, leaderElectionRecord); err != nil {
klog.Errorf("error initially creating leader election record: %v", err)
return false
}
le.setObservedRecord(&leaderElectionRecord)
return true
}
// 2. Record obtained, check the Identity & Time
if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) {
le.setObservedRecord(oldLeaderElectionRecord)
le.observedRawRecord = oldLeaderElectionRawRecord
}
if len(oldLeaderElectionRecord.HolderIdentity) > 0 &&
le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
!le.IsLeader() {
if le.config.KeyComparison != nil && le.config.KeyComparison(oldLeaderElectionRecord.HolderKey) {
// Lock is held and not expired, but our key is higher than the existing one.
// We will pre-empt the existing leader.
// nolint: lll
klog.V(4).Infof("lock is held by %v with key %v, but our key (%v) evicts it", oldLeaderElectionRecord.HolderIdentity, oldLeaderElectionRecord.HolderKey, le.config.Lock.Key())
} else {
klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity)
return false
}
}
// 3. We're going to try to update. The leaderElectionRecord is set to it's default
// here. Let's correct it before updating.
if le.IsLeader() {
leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions
} else {
leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1
}
// update the lock itself
if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil {
klog.Errorf("Failed to update lock: %v", err)
return false
}
le.setObservedRecord(&leaderElectionRecord)
return true
}
func (le *LeaderElector) maybeReportTransition() {
if le.observedRecord.HolderIdentity == le.reportedLeader {
return
}
le.reportedLeader = le.observedRecord.HolderIdentity
if le.config.Callbacks.OnNewLeader != nil {
go le.config.Callbacks.OnNewLeader(le.reportedLeader)
}
}
// Check will determine if the current lease is expired by more than timeout.
func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error {
if !le.IsLeader() {
// Currently not concerned with the case that we are hot standby
return nil
}
// If we are more than timeout seconds after the lease duration that is past the timeout
// on the lease renew. Time to start reporting ourselves as unhealthy. We should have
// died but conditions like deadlock can prevent this. (See #70819)
if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease {
return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name)
}
return nil
}
// setObservedRecord will set a new observedRecord and update observedTime to the current time.
// Protect critical sections with lock.
func (le *LeaderElector) setObservedRecord(observedRecord *k8sresourcelock.LeaderElectionRecord) {
le.observedRecordLock.Lock()
defer le.observedRecordLock.Unlock()
le.observedRecord = *observedRecord
le.observedTime = le.clock.Now()
}
// getObservedRecord returns observersRecord.
// Protect critical sections with lock.
func (le *LeaderElector) getObservedRecord() k8sresourcelock.LeaderElectionRecord {
le.observedRecordLock.Lock()
defer le.observedRecordLock.Unlock()
return le.observedRecord
}
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package k8sleaderelection
import (
"sync"
)
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
type leaderMetricsAdapter interface {
leaderOn(name string)
leaderOff(name string)
}
// GaugeMetric represents a single numerical value that can arbitrarily go up
// and down.
type SwitchMetric interface {
On(name string)
Off(name string)
}
type noopMetric struct{}
func (noopMetric) On(name string) {}
func (noopMetric) Off(name string) {}
// defaultLeaderMetrics expects the caller to lock before setting any metrics.
type defaultLeaderMetrics struct {
// leader's value indicates if the current process is the owner of name lease
leader SwitchMetric
}
func (m *defaultLeaderMetrics) leaderOn(name string) {
if m == nil {
return
}
m.leader.On(name)
}
func (m *defaultLeaderMetrics) leaderOff(name string) {
if m == nil {
return
}
m.leader.Off(name)
}
type noMetrics struct{}
func (noMetrics) leaderOn(name string) {}
func (noMetrics) leaderOff(name string) {}
// MetricsProvider generates various metrics used by the leader election.
type MetricsProvider interface {
NewLeaderMetric() SwitchMetric
}
type noopMetricsProvider struct{}
func (noopMetricsProvider) NewLeaderMetric() SwitchMetric {
return noopMetric{}
}
var globalMetricsFactory = leaderMetricsFactory{
metricsProvider: noopMetricsProvider{},
}
type leaderMetricsFactory struct {
metricsProvider MetricsProvider
onlyOnce sync.Once
}
func (f *leaderMetricsFactory) setProvider(mp MetricsProvider) {
f.onlyOnce.Do(func() {
f.metricsProvider = mp
})
}
func (f *leaderMetricsFactory) newLeaderMetrics() leaderMetricsAdapter {
mp := f.metricsProvider
if mp == (noopMetricsProvider{}) {
return noMetrics{}
}
return &defaultLeaderMetrics{
leader: mp.NewLeaderMetric(),
}
}
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {
globalMetricsFactory.setProvider(metricsProvider)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package leaderelection
import (
"context"
"fmt"
"os"
"strings"
"sync"
"time"
"go.uber.org/atomic"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/leaderelection/k8sleaderelection"
"istio.io/istio/pilot/pkg/leaderelection/k8sleaderelection/k8sresourcelock"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/revisions"
)
// Various locks used throughout the code
const (
NamespaceController = "istio-namespace-controller-election"
ServiceExportController = "istio-serviceexport-controller-election"
// This holds the legacy name to not conflict with older control plane deployments which are just
// doing the ingress syncing.
IngressController = "istio-leader"
// GatewayStatusController controls the status of gateway.networking.k8s.io objects. For the v1alpha1
// this was formally "istio-gateway-leader"; because they are a different API group we need a different
// election to ensure we do not only handle one or the other.
GatewayStatusController = "istio-gateway-status-leader"
StatusController = "istio-status-leader"
AnalyzeController = "istio-analyze-leader"
// GatewayDeploymentController controls translating Kubernetes Gateway objects into various derived
// resources (Service, Deployment, etc).
// Unlike other types which use ConfigMaps, we use a Lease here. This is because:
// * Others use configmap for backwards compatibility
// * This type is per-revision, so it is higher cost. Leases are cheaper
// * Other types use "prioritized leader election", which isn't implemented for Lease
GatewayDeploymentController = "istio-gateway-deployment"
)
// Leader election key prefix for remote istiod managed clusters
const remoteIstiodPrefix = "^"
type LeaderElection struct {
namespace string
name string
runFns []func(stop <-chan struct{})
client kubernetes.Interface
ttl time.Duration
// enabled sets whether leader election is enabled. Setting enabled=false
// before calling Run() bypasses leader election and assumes that we are
// always leader, avoiding unnecessary lease updates on single-node
// clusters.
enabled bool
// Criteria to determine leader priority.
revision string
perRevision bool
remote bool
defaultWatcher revisions.DefaultWatcher
// Records which "cycle" the election is on. This is incremented each time an election is won and then lost
// This is mostly just for testing
cycle *atomic.Int32
electionID string
// Store as field for testing
le *k8sleaderelection.LeaderElector
mu sync.RWMutex
}
// Run will start leader election, calling all runFns when we become the leader.
// If leader election is disabled, it skips straight to the runFns.
func (l *LeaderElection) Run(stop <-chan struct{}) {
if !l.enabled {
log.Infof("bypassing leader election: %v", l.electionID)
for _, f := range l.runFns {
go f(stop)
}
<-stop
return
}
if l.defaultWatcher != nil {
go l.defaultWatcher.Run(stop)
}
for {
le, err := l.create()
if err != nil {
// This should never happen; errors are only from invalid input and the input is not user modifiable
panic("LeaderElection creation failed: " + err.Error())
}
l.mu.Lock()
l.le = le
l.cycle.Inc()
l.mu.Unlock()
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stop
cancel()
}()
le.Run(ctx)
select {
case <-stop:
// We were told to stop explicitly. Exit now
return
default:
cancel()
// Otherwise, we may have lost our lock. This can happen when the default revision changes and steals
// the lock from us.
log.Infof("Leader election cycle %v lost. Trying again", l.cycle.Load())
}
}
}
func (l *LeaderElection) create() (*k8sleaderelection.LeaderElector, error) {
callbacks := k8sleaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
log.Infof("leader election lock obtained: %v", l.electionID)
for _, f := range l.runFns {
go f(ctx.Done())
}
},
OnStoppedLeading: func() {
log.Infof("leader election lock lost: %v", l.electionID)
},
}
key := l.revision
if l.remote {
key = remoteIstiodPrefix + key
}
var lock k8sresourcelock.Interface = &k8sresourcelock.ConfigMapLock{
ConfigMapMeta: metav1.ObjectMeta{Namespace: l.namespace, Name: l.electionID},
Client: l.client.CoreV1(),
LockConfig: k8sresourcelock.ResourceLockConfig{
Identity: l.name,
Key: key,
},
}
if l.perRevision {
lock = &k8sresourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{Namespace: l.namespace, Name: l.electionID},
Client: l.client.CoordinationV1(),
// Note: Key is NOT used. This is not implemented in the library for Lease nor needed, since this is already per-revision.
// See below, where we disable KeyComparison
LockConfig: k8sresourcelock.ResourceLockConfig{
Identity: l.name,
},
}
}
config := k8sleaderelection.LeaderElectionConfig{
Lock: lock,
LeaseDuration: l.ttl,
RenewDeadline: l.ttl / 2,
RetryPeriod: l.ttl / 4,
Callbacks: callbacks,
// When Pilot exits, the lease will be dropped. This is more likely to lead to a case where
// to instances are both considered the leaders. As such, if this is intended to be use for mission-critical
// usages (rather than avoiding duplication of work), this may need to be re-evaluated.
ReleaseOnCancel: true,
}
if !l.perRevision {
// Function to use to decide whether this leader should steal the existing lock.
// This is disable when perRevision is used, as this enables the Lease. Lease doesn't have a holderKey field to place our key
// as holderKey is an Istio specific fork.
// While its possible to make it work with Lease as well (via an annotation to store it), we don't ever need prioritized
// for these per-revision ones anyways, since the prioritization is about preferring one revision over others.
config.KeyComparison = func(leaderKey string) bool {
return LocationPrioritizedComparison(leaderKey, l)
}
}
return k8sleaderelection.NewLeaderElector(config)
}
func LocationPrioritizedComparison(currentLeaderRevision string, l *LeaderElection) bool {
var currentLeaderRemote bool
if currentLeaderRemote = strings.HasPrefix(currentLeaderRevision, remoteIstiodPrefix); currentLeaderRemote {
currentLeaderRevision = strings.TrimPrefix(currentLeaderRevision, remoteIstiodPrefix)
}
defaultRevision := l.defaultWatcher.GetDefault()
if l.revision != currentLeaderRevision && defaultRevision != "" && defaultRevision == l.revision {
// Always steal the lock if the new one is the default revision and the current one is not
return true
}
// Otherwise steal the lock if the new one and the current one are the same revision, but new one is local and current is remote
return l.revision == currentLeaderRevision && !l.remote && currentLeaderRemote
}
// AddRunFunction registers a function to run when we are the leader. These will be run asynchronously.
// To avoid running when not a leader, functions should respect the stop channel.
func (l *LeaderElection) AddRunFunction(f func(stop <-chan struct{})) *LeaderElection {
l.runFns = append(l.runFns, f)
return l
}
// NewLeaderElection creates a leader election instance with the provided ID. This follows standard Kubernetes
// elections, with one difference: the "default" revision will steal the lock from other revisions.
func NewLeaderElection(namespace, name, electionID, revision string, client kube.Client) *LeaderElection {
return newLeaderElection(namespace, name, electionID, revision, false, false, client)
}
// NewPerRevisionLeaderElection creates a *per revision* leader election. This means there will be one leader for each revision.
func NewPerRevisionLeaderElection(namespace, name, electionID, revision string, client kube.Client) *LeaderElection {
return newLeaderElection(namespace, name, electionID, revision, true, false, client)
}
func NewLeaderElectionMulticluster(namespace, name, electionID, revision string, remote bool, client kube.Client) *LeaderElection {
return newLeaderElection(namespace, name, electionID, revision, false, remote, client)
}
func newLeaderElection(namespace, name, electionID, revision string, perRevision bool, remote bool, client kube.Client) *LeaderElection {
var watcher revisions.DefaultWatcher
if features.EnableLeaderElection {
watcher = revisions.NewDefaultWatcher(client, revision)
}
if name == "" {
hn, _ := os.Hostname()
name = fmt.Sprintf("unknown-%s", hn)
}
if perRevision && revision != "" {
electionID += "-" + revision
}
return &LeaderElection{
namespace: namespace,
name: name,
client: client.Kube(),
electionID: electionID,
revision: revision,
perRevision: perRevision,
enabled: features.EnableLeaderElection,
remote: remote,
defaultWatcher: watcher,
// Default to a 30s ttl. Overridable for tests
ttl: time.Second * 30,
cycle: atomic.NewInt32(0),
mu: sync.RWMutex{},
}
}
func (l *LeaderElection) isLeader() bool {
l.mu.RLock()
defer l.mu.RUnlock()
if !l.enabled {
return true
}
if l.le == nil {
return false
}
return l.le.IsLeader()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"sync"
"istio.io/istio/pkg/cluster"
)
// AddressMap provides a thread-safe mapping of addresses for each Kubernetes cluster.
type AddressMap struct {
// Addresses hold the underlying map. Most code should only access this through the available methods.
// Should only be used by tests and construction/initialization logic, where there is no concern
// for race conditions.
Addresses map[cluster.ID][]string
// NOTE: The copystructure library is not able to copy unexported fields, so the mutex will not be copied.
mutex sync.RWMutex
}
func (m *AddressMap) Len() int {
if m == nil {
return 0
}
m.mutex.RLock()
defer m.mutex.RUnlock()
return len(m.Addresses)
}
func (m *AddressMap) DeepCopy() *AddressMap {
if m == nil {
return nil
}
return &AddressMap{
Addresses: m.GetAddresses(),
}
}
// GetAddresses returns the mapping of clusters to addresses.
func (m *AddressMap) GetAddresses() map[cluster.ID][]string {
if m == nil {
return nil
}
m.mutex.RLock()
defer m.mutex.RUnlock()
if m.Addresses == nil {
return nil
}
out := make(map[cluster.ID][]string)
for k, v := range m.Addresses {
if v == nil {
out[k] = nil
} else {
out[k] = append([]string{}, v...)
}
}
return out
}
// SetAddresses sets the addresses per cluster.
func (m *AddressMap) SetAddresses(addrs map[cluster.ID][]string) {
if len(addrs) == 0 {
addrs = nil
}
m.mutex.Lock()
m.Addresses = addrs
m.mutex.Unlock()
}
func (m *AddressMap) GetAddressesFor(c cluster.ID) []string {
if m == nil {
return nil
}
m.mutex.RLock()
defer m.mutex.RUnlock()
if m.Addresses == nil {
return nil
}
// Copy the Addresses array.
return append([]string{}, m.Addresses[c]...)
}
func (m *AddressMap) SetAddressesFor(c cluster.ID, addresses []string) *AddressMap {
m.mutex.Lock()
defer m.mutex.Unlock()
if len(addresses) == 0 {
// Setting an empty array for the cluster. Remove the entry for the cluster if it exists.
if m.Addresses != nil {
delete(m.Addresses, c)
// Delete the map if there's nothing left.
if len(m.Addresses) == 0 {
m.Addresses = nil
}
}
} else {
// Create the map if it doesn't already exist.
if m.Addresses == nil {
m.Addresses = make(map[cluster.ID][]string)
}
m.Addresses[c] = addresses
}
return m
}
func (m *AddressMap) AddAddressesFor(c cluster.ID, addresses []string) *AddressMap {
if len(addresses) == 0 {
return m
}
m.mutex.Lock()
defer m.mutex.Unlock()
// Create the map if nil.
if m.Addresses == nil {
m.Addresses = make(map[cluster.ID][]string)
}
m.Addresses[c] = append(m.Addresses[c], addresses...)
return m
}
func (m *AddressMap) ForEach(fn func(c cluster.ID, addresses []string)) {
if m == nil {
return
}
m.mutex.RLock()
defer m.mutex.RUnlock()
if m.Addresses == nil {
return
}
for c, addresses := range m.Addresses {
fn(c, addresses)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"crypto/md5"
"fmt"
"strings"
"time"
"istio.io/api/security/v1beta1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/kind"
)
// MutualTLSMode is the mutual TLS mode specified by authentication policy.
type MutualTLSMode int
const (
// MTLSUnknown is used to indicate the variable hasn't been initialized correctly (with the authentication policy).
MTLSUnknown MutualTLSMode = iota
// MTLSDisable if authentication policy disable mTLS.
MTLSDisable
// MTLSPermissive if authentication policy enable mTLS in permissive mode.
MTLSPermissive
// MTLSStrict if authentication policy enable mTLS in strict mode.
MTLSStrict
)
// In Ambient, we convert k8s PeerAuthentication resources to the same type as AuthorizationPolicies
// To prevent conflicts in xDS, we add this prefix to the converted PeerAuthentication resources.
const convertedPeerAuthenticationPrefix = "converted_peer_authentication_" // use '_' character since those are illegal in k8s names
// String converts MutualTLSMode to human readable string for debugging.
func (mode MutualTLSMode) String() string {
switch mode {
case MTLSDisable:
return "DISABLE"
case MTLSPermissive:
return "PERMISSIVE"
case MTLSStrict:
return "STRICT"
default:
return "UNKNOWN"
}
}
// ConvertToMutualTLSMode converts from peer authn MTLS mode (`PeerAuthentication_MutualTLS_Mode`)
// to the MTLS mode specified by authn policy.
func ConvertToMutualTLSMode(mode v1beta1.PeerAuthentication_MutualTLS_Mode) MutualTLSMode {
switch mode {
case v1beta1.PeerAuthentication_MutualTLS_DISABLE:
return MTLSDisable
case v1beta1.PeerAuthentication_MutualTLS_PERMISSIVE:
return MTLSPermissive
case v1beta1.PeerAuthentication_MutualTLS_STRICT:
return MTLSStrict
default:
return MTLSUnknown
}
}
// AuthenticationPolicies organizes authentication (mTLS + JWT) policies by namespace.
type AuthenticationPolicies struct {
// Maps from namespace to the v1beta1 authentication policies.
requestAuthentications map[string][]config.Config
peerAuthentications map[string][]config.Config
// namespaceMutualTLSMode is the MutualTLSMode corresponding to the namespace-level PeerAuthentication.
// All namespace-level policies, and only them, are added to this map. If the policy mTLS mode is set
// to UNSET, it will be resolved to the value set by mesh policy if exist (i.e not UNKNOWN), or MTLSPermissive
// otherwise.
namespaceMutualTLSMode map[string]MutualTLSMode
// globalMutualTLSMode is the MutualTLSMode corresponding to the mesh-level PeerAuthentication.
// This value can be MTLSUnknown, if there is no mesh-level policy.
globalMutualTLSMode MutualTLSMode
rootNamespace string
// aggregateVersion contains the versions of all peer authentications.
aggregateVersion string
}
// initAuthenticationPolicies creates a new AuthenticationPolicies struct and populates with the
// authentication policies in the mesh environment.
func initAuthenticationPolicies(env *Environment) *AuthenticationPolicies {
policy := &AuthenticationPolicies{
requestAuthentications: map[string][]config.Config{},
peerAuthentications: map[string][]config.Config{},
globalMutualTLSMode: MTLSUnknown,
rootNamespace: env.Mesh().GetRootNamespace(),
}
policy.addRequestAuthentication(sortConfigByCreationTime(env.List(gvk.RequestAuthentication, NamespaceAll)))
policy.addPeerAuthentication(sortConfigByCreationTime(env.List(gvk.PeerAuthentication, NamespaceAll)))
return policy
}
func (policy *AuthenticationPolicies) addRequestAuthentication(configs []config.Config) {
for _, config := range configs {
policy.requestAuthentications[config.Namespace] = append(policy.requestAuthentications[config.Namespace], config)
}
}
func (policy *AuthenticationPolicies) addPeerAuthentication(configs []config.Config) {
// Sort configs in ascending order by their creation time.
sortConfigByCreationTime(configs)
foundNamespaceMTLS := make(map[string]v1beta1.PeerAuthentication_MutualTLS_Mode)
// Track which namespace/mesh level policy seen so far to make sure the oldest one is used.
seenNamespaceOrMeshConfig := make(map[string]time.Time)
versions := []string{}
for _, config := range configs {
versions = append(versions, config.UID+"."+config.ResourceVersion)
// Mesh & namespace level policy are those that have empty selector.
spec := config.Spec.(*v1beta1.PeerAuthentication)
if spec.Selector == nil || len(spec.Selector.MatchLabels) == 0 {
if t, ok := seenNamespaceOrMeshConfig[config.Namespace]; ok {
log.Warnf(
"Namespace/mesh-level PeerAuthentication is already defined for %q at time %v. Ignore %q which was created at time %v",
config.Namespace, t, config.Name, config.CreationTimestamp)
continue
}
seenNamespaceOrMeshConfig[config.Namespace] = config.CreationTimestamp
mode := v1beta1.PeerAuthentication_MutualTLS_UNSET
if spec.Mtls != nil {
mode = spec.Mtls.Mode
}
if config.Namespace == policy.rootNamespace {
// This is mesh-level policy. UNSET is treated as permissive for mesh-policy.
if mode == v1beta1.PeerAuthentication_MutualTLS_UNSET {
policy.globalMutualTLSMode = MTLSPermissive
} else {
policy.globalMutualTLSMode = ConvertToMutualTLSMode(mode)
}
} else {
// For regular namespace, just add to the intermediate map.
foundNamespaceMTLS[config.Namespace] = mode
}
}
// Add the config to the map by namespace for future look up. This is done after namespace/mesh
// singleton check so there should be at most one namespace/mesh config is added to the map.
policy.peerAuthentications[config.Namespace] = append(policy.peerAuthentications[config.Namespace], config)
}
// nolint: gosec
// Not security sensitive code
policy.aggregateVersion = fmt.Sprintf("%x", md5.Sum([]byte(strings.Join(versions, ";"))))
// Process found namespace-level policy.
policy.namespaceMutualTLSMode = make(map[string]MutualTLSMode, len(foundNamespaceMTLS))
inheritedMTLSMode := policy.globalMutualTLSMode
if inheritedMTLSMode == MTLSUnknown {
// If the mesh policy is not explicitly presented, use default value MTLSPermissive.
inheritedMTLSMode = MTLSPermissive
}
for ns, mtlsMode := range foundNamespaceMTLS {
if mtlsMode == v1beta1.PeerAuthentication_MutualTLS_UNSET {
policy.namespaceMutualTLSMode[ns] = inheritedMTLSMode
} else {
policy.namespaceMutualTLSMode[ns] = ConvertToMutualTLSMode(mtlsMode)
}
}
}
// GetNamespaceMutualTLSMode returns the MutualTLSMode as defined by a namespace or mesh level
// PeerAuthentication. The return value could be `MTLSUnknown` if there is no mesh nor namespace
// PeerAuthentication policy for the given namespace.
func (policy *AuthenticationPolicies) GetNamespaceMutualTLSMode(namespace string) MutualTLSMode {
if mode, ok := policy.namespaceMutualTLSMode[namespace]; ok {
return mode
}
return policy.globalMutualTLSMode
}
// GetJwtPoliciesForWorkload returns a list of JWT policies matching to labels.
func (policy *AuthenticationPolicies) GetJwtPoliciesForWorkload(namespace string,
workloadLabels labels.Instance,
isWaypoint bool,
) []*config.Config {
return getConfigsForWorkload(policy.requestAuthentications, WorkloadSelectionOpts{
RootNamespace: policy.rootNamespace,
Namespace: namespace,
WorkloadLabels: workloadLabels,
IsWaypoint: isWaypoint,
})
}
// GetPeerAuthenticationsForWorkload returns a list of peer authentication policies matching to labels.
func (policy *AuthenticationPolicies) GetPeerAuthenticationsForWorkload(namespace string,
workloadLabels labels.Instance,
isWaypoint bool,
) []*config.Config {
return getConfigsForWorkload(policy.peerAuthentications, WorkloadSelectionOpts{
RootNamespace: policy.rootNamespace,
Namespace: namespace,
WorkloadLabels: workloadLabels,
IsWaypoint: isWaypoint,
})
}
// GetRootNamespace return root namespace that is tracked by the policy object.
func (policy *AuthenticationPolicies) GetRootNamespace() string {
return policy.rootNamespace
}
// GetVersion return versions of all peer authentications..
func (policy *AuthenticationPolicies) GetVersion() string {
return policy.aggregateVersion
}
func GetAmbientPolicyConfigName(key ConfigKey) string {
switch key.Kind {
case kind.PeerAuthentication:
return convertedPeerAuthenticationPrefix + key.Name
default:
return key.Name
}
}
func getConfigsForWorkload(configsByNamespace map[string][]config.Config, selectionOpts WorkloadSelectionOpts) []*config.Config {
workloadLabels := selectionOpts.WorkloadLabels
namespace := selectionOpts.Namespace
rootNamespace := selectionOpts.RootNamespace
configs := make([]*config.Config, 0)
var lookupInNamespaces []string
if namespace != rootNamespace {
// Only check the root namespace if the (workload) namespace is not already the root namespace
// to avoid double inclusion.
lookupInNamespaces = []string{namespace, rootNamespace}
} else {
lookupInNamespaces = []string{namespace}
}
for _, ns := range lookupInNamespaces {
if nsConfig, ok := configsByNamespace[ns]; ok {
for idx := range nsConfig {
cfg := &nsConfig[idx]
if ns != cfg.Namespace {
// Should never come here. Log warning just in case.
log.Warnf("Seeing config %s with namespace %s in map entry for %s. Ignored", cfg.Name, cfg.Namespace, ns)
continue
}
var selector labels.Instance // NOTE: nil/empty selector matches all workloads
switch cfg.GroupVersionKind {
case gvk.RequestAuthentication:
ra := cfg.Spec.(*v1beta1.RequestAuthentication)
switch getPolicyMatcher(cfg.GroupVersionKind, cfg.Name, selectionOpts, ra) {
case policyMatchSelector:
selector = ra.GetSelector().GetMatchLabels()
case policyMatchDirect:
configs = append(configs, cfg)
continue
case policyMatchIgnore:
continue
}
case gvk.PeerAuthentication:
selector = cfg.Spec.(*v1beta1.PeerAuthentication).GetSelector().GetMatchLabels()
default:
log.Warnf("Not support authentication type %q", cfg.GroupVersionKind)
continue
}
if selector.SubsetOf(workloadLabels) {
configs = append(configs, cfg)
}
}
}
}
return configs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
authpb "istio.io/api/security/v1beta1"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/gvk"
)
type AuthorizationPolicy struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
Annotations map[string]string `json:"annotations"`
Spec *authpb.AuthorizationPolicy `json:"spec"`
}
// AuthorizationPolicies organizes AuthorizationPolicy by namespace.
type AuthorizationPolicies struct {
// Maps from namespace to the Authorization policies.
NamespaceToPolicies map[string][]AuthorizationPolicy `json:"namespace_to_policies"`
// The name of the root namespace. Policy in the root namespace applies to workloads in all namespaces.
RootNamespace string `json:"root_namespace"`
}
// GetAuthorizationPolicies returns the AuthorizationPolicies for the given environment.
func GetAuthorizationPolicies(env *Environment) *AuthorizationPolicies {
policy := &AuthorizationPolicies{
NamespaceToPolicies: map[string][]AuthorizationPolicy{},
RootNamespace: env.Mesh().GetRootNamespace(),
}
policies := env.List(gvk.AuthorizationPolicy, NamespaceAll)
sortConfigByCreationTime(policies)
for _, config := range policies {
authzConfig := AuthorizationPolicy{
Name: config.Name,
Namespace: config.Namespace,
Annotations: config.Annotations,
Spec: config.Spec.(*authpb.AuthorizationPolicy),
}
policy.NamespaceToPolicies[config.Namespace] = append(policy.NamespaceToPolicies[config.Namespace], authzConfig)
}
return policy
}
type AuthorizationPoliciesResult struct {
Custom []AuthorizationPolicy
Deny []AuthorizationPolicy
Allow []AuthorizationPolicy
Audit []AuthorizationPolicy
}
// ListAuthorizationPolicies returns authorization policies applied to the workload in the given namespace.
func (policy *AuthorizationPolicies) ListAuthorizationPolicies(selectionOpts WorkloadSelectionOpts) AuthorizationPoliciesResult {
configs := AuthorizationPoliciesResult{}
if policy == nil {
return configs
}
rootNamespace := policy.RootNamespace
namespace := selectionOpts.Namespace
workloadLabels := selectionOpts.WorkloadLabels
var lookupInNamespaces []string
if namespace != rootNamespace {
// Only check the root namespace if the (workload) namespace is not already the root namespace
// to avoid double inclusion.
lookupInNamespaces = []string{rootNamespace, namespace}
} else {
lookupInNamespaces = []string{namespace}
}
for _, ns := range lookupInNamespaces {
for _, config := range policy.NamespaceToPolicies[ns] {
spec := config.Spec
switch getPolicyMatcher(gvk.AuthorizationPolicy, config.Name, selectionOpts, spec) {
case policyMatchSelector:
selector := labels.Instance(spec.GetSelector().GetMatchLabels())
if selector.SubsetOf(workloadLabels) {
configs = updateAuthorizationPoliciesResult(configs, config)
}
case policyMatchDirect:
configs = updateAuthorizationPoliciesResult(configs, config)
}
}
}
return configs
}
func updateAuthorizationPoliciesResult(configs AuthorizationPoliciesResult, config AuthorizationPolicy) AuthorizationPoliciesResult {
log.Infof("applying authorization policy %s.%s",
config.Namespace, config.Name)
switch config.Spec.GetAction() {
case authpb.AuthorizationPolicy_ALLOW:
configs.Allow = append(configs.Allow, config)
case authpb.AuthorizationPolicy_DENY:
configs.Deny = append(configs.Deny, config)
case authpb.AuthorizationPolicy_AUDIT:
configs.Audit = append(configs.Audit, config)
case authpb.AuthorizationPolicy_CUSTOM:
configs.Custom = append(configs.Custom, config)
default:
log.Errorf("ignored authorization policy %s.%s with unsupported action: %s",
config.Namespace, config.Name, config.Spec.GetAction())
}
return configs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"strings"
"sync"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/util/sets"
)
var (
defaultClusterLocalNamespaces = []string{"kube-system"}
defaultClusterLocalServices = []string{"kubernetes.default.svc"}
)
// ClusterLocalHosts is a map of host names or wildcard patterns which should only
// be made accessible from within the same cluster.
type ClusterLocalHosts struct {
specific sets.Set[host.Name]
wildcard sets.Set[host.Name]
}
// IsClusterLocal indicates whether the given host should be treated as a
// cluster-local destination.
func (c ClusterLocalHosts) IsClusterLocal(h host.Name) bool {
_, _, ok := MostSpecificHostMatch(h, c.specific, c.wildcard)
return ok
}
// ClusterLocalProvider provides the cluster-local hosts.
type ClusterLocalProvider interface {
// GetClusterLocalHosts returns the list of cluster-local hosts, sorted in
// ascending order. The caller must not modify the returned list.
GetClusterLocalHosts() ClusterLocalHosts
}
// NewClusterLocalProvider returns a new ClusterLocalProvider for the Environment.
func NewClusterLocalProvider(e *Environment) ClusterLocalProvider {
c := &clusterLocalProvider{}
// Register a handler to update the environment when the mesh config is updated.
e.AddMeshHandler(func() {
c.onMeshUpdated(e)
})
// Update the cluster-local hosts now.
c.onMeshUpdated(e)
return c
}
var _ ClusterLocalProvider = &clusterLocalProvider{}
type clusterLocalProvider struct {
mutex sync.RWMutex
hosts ClusterLocalHosts
}
func (c *clusterLocalProvider) GetClusterLocalHosts() ClusterLocalHosts {
c.mutex.RLock()
out := c.hosts
c.mutex.RUnlock()
return out
}
func (c *clusterLocalProvider) onMeshUpdated(e *Environment) {
// Create the default list of cluster-local hosts.
domainSuffix := e.DomainSuffix
defaultClusterLocalHosts := make([]host.Name, 0)
for _, n := range defaultClusterLocalNamespaces {
defaultClusterLocalHosts = append(defaultClusterLocalHosts, host.Name("*."+n+".svc."+domainSuffix))
}
for _, s := range defaultClusterLocalServices {
defaultClusterLocalHosts = append(defaultClusterLocalHosts, host.Name(s+"."+domainSuffix))
}
if discoveryHost, _, err := e.GetDiscoveryAddress(); err != nil {
log.Errorf("failed to make discoveryAddress cluster-local: %v", err)
} else {
if !strings.HasSuffix(string(discoveryHost), domainSuffix) {
discoveryHost += host.Name("." + domainSuffix)
}
defaultClusterLocalHosts = append(defaultClusterLocalHosts, discoveryHost)
}
// Collect the cluster-local hosts.
hosts := ClusterLocalHosts{
specific: make(map[host.Name]struct{}, 0),
wildcard: make(map[host.Name]struct{}, 0),
}
for _, serviceSettings := range e.Mesh().ServiceSettings {
if serviceSettings.GetSettings().GetClusterLocal() {
for _, h := range serviceSettings.GetHosts() {
hostname := host.Name(h)
if hostname.IsWildCarded() {
hosts.wildcard.Insert(hostname)
} else {
hosts.specific.Insert(hostname)
}
}
} else {
// Remove defaults if specified to be non-cluster-local.
for _, h := range serviceSettings.GetHosts() {
for i, defaultClusterLocalHost := range defaultClusterLocalHosts {
if len(defaultClusterLocalHost) > 0 {
if h == string(defaultClusterLocalHost) ||
(defaultClusterLocalHost.IsWildCarded() &&
strings.HasSuffix(h, string(defaultClusterLocalHost[1:]))) {
// This default was explicitly overridden, so remove it.
defaultClusterLocalHosts[i] = ""
}
}
}
}
}
}
// Add any remaining defaults to the end of the list.
for _, defaultClusterLocalHost := range defaultClusterLocalHosts {
if len(defaultClusterLocalHost) > 0 {
if defaultClusterLocalHost.IsWildCarded() {
hosts.wildcard.Insert(defaultClusterLocalHost)
} else {
hosts.specific.Insert(defaultClusterLocalHost)
}
}
}
c.mutex.Lock()
c.hosts = hosts
c.mutex.Unlock()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"sort"
"strings"
udpa "github.com/cncf/xds/go/udpa/type/v1"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/hash"
netutil "istio.io/istio/pkg/util/net"
"istio.io/istio/pkg/util/sets"
)
// Statically link protobuf descriptors from UDPA
var _ = udpa.TypedStruct{}
type ConfigHash uint64
// ConfigKey describe a specific config item.
// In most cases, the name is the config's name. However, for ServiceEntry it is service's FQDN.
type ConfigKey struct {
Kind kind.Kind
Name string
Namespace string
}
func (key ConfigKey) HashCode() ConfigHash {
h := hash.New()
h.Write([]byte{byte(key.Kind)})
// Add separator / to avoid collision.
h.WriteString("/")
h.WriteString(key.Namespace)
h.WriteString("/")
h.WriteString(key.Name)
return ConfigHash(h.Sum64())
}
func (key ConfigKey) String() string {
return key.Kind.String() + "/" + key.Namespace + "/" + key.Name
}
// ConfigsOfKind extracts configs of the specified kind.
func ConfigsOfKind(configs sets.Set[ConfigKey], kind kind.Kind) sets.Set[ConfigKey] {
ret := make(sets.Set[ConfigKey])
for conf := range configs {
if conf.Kind == kind {
ret.Insert(conf)
}
}
return ret
}
// HasConfigsOfKind returns true if configs has changes of type kind
func HasConfigsOfKind(configs sets.Set[ConfigKey], kind kind.Kind) bool {
for c := range configs {
if c.Kind == kind {
return true
}
}
return false
}
// ConfigNamesOfKind extracts config names of the specified kind.
func ConfigNamesOfKind(configs sets.Set[ConfigKey], kind kind.Kind) sets.String {
ret := sets.New[string]()
for conf := range configs {
if conf.Kind == kind {
ret.Insert(conf.Name)
}
}
return ret
}
// ConfigNamespacedNameOfKind extracts config names of the specified kind.
func ConfigNamespacedNameOfKind(configs map[ConfigKey]struct{}, kind kind.Kind) sets.Set[types.NamespacedName] {
ret := sets.New[types.NamespacedName]()
for conf := range configs {
if conf.Kind == kind {
ret.Insert(types.NamespacedName{
Namespace: conf.Namespace,
Name: conf.Name,
})
}
}
return ret
}
// ConfigNameOfKind extracts config names of the specified kind.
func ConfigNameOfKind(configs map[ConfigKey]struct{}, kind kind.Kind) sets.String {
ret := sets.New[string]()
for conf := range configs {
if conf.Kind == kind {
ret.Insert(conf.Name)
}
}
return ret
}
// ConfigStore describes a set of platform agnostic APIs that must be supported
// by the underlying platform to store and retrieve Istio configuration.
//
// Configuration key is defined to be a combination of the type, name, and
// namespace of the configuration object. The configuration key is guaranteed
// to be unique in the store.
//
// The storage interface presented here assumes that the underlying storage
// layer supports _Get_ (list), _Update_ (update), _Create_ (create) and
// _Delete_ semantics but does not guarantee any transactional semantics.
//
// _Update_, _Create_, and _Delete_ are mutator operations. These operations
// are asynchronous, and you might not see the effect immediately (e.g. _Get_
// might not return the object by key immediately after you mutate the store.)
// Intermittent errors might occur even though the operation succeeds, so you
// should always check if the object store has been modified even if the
// mutating operation returns an error. Objects should be created with
// _Create_ operation and updated with _Update_ operation.
//
// Resource versions record the last mutation operation on each object. If a
// mutation is applied to a different revision of an object than what the
// underlying storage expects as defined by pure equality, the operation is
// blocked. The client of this interface should not make assumptions about the
// structure or ordering of the revision identifier.
//
// Object references supplied and returned from this interface should be
// treated as read-only. Modifying them violates thread-safety.
type ConfigStore interface {
// Schemas exposes the configuration type schema known by the config store.
// The type schema defines the bidirectional mapping between configuration
// types and the protobuf encoding schema.
Schemas() collection.Schemas
// Get retrieves a configuration element by a type and a key
Get(typ config.GroupVersionKind, name, namespace string) *config.Config
// List returns objects by type and namespace.
// Use "" for the namespace to list across namespaces.
List(typ config.GroupVersionKind, namespace string) []config.Config
// Create adds a new configuration object to the store. If an object with the
// same name and namespace for the type already exists, the operation fails
// with no side effects.
Create(config config.Config) (revision string, err error)
// Update modifies an existing configuration object in the store. Update
// requires that the object has been created. Resource version prevents
// overriding a value that has been changed between prior _Get_ and _Put_
// operation to achieve optimistic concurrency. This method returns a new
// revision if the operation succeeds.
Update(config config.Config) (newRevision string, err error)
UpdateStatus(config config.Config) (newRevision string, err error)
// Patch applies only the modifications made in the PatchFunc rather than doing a full replace. Useful to avoid
// read-modify-write conflicts when there are many concurrent-writers to the same resource.
Patch(orig config.Config, patchFn config.PatchFunc) (string, error)
// Delete removes an object from the store by key
// For k8s, resourceVersion must be fulfilled before a deletion is carried out.
// If not possible, a 409 Conflict status will be returned.
Delete(typ config.GroupVersionKind, name, namespace string, resourceVersion *string) error
}
type EventHandler = func(config.Config, config.Config, Event)
// ConfigStoreController is a local fully-replicated cache of the config store with additional handlers. The
// controller actively synchronizes its local state with the remote store and
// provides a notification mechanism to receive update events. As such, the
// notification handlers must be registered prior to calling _Run_, and the
// cache requires initial synchronization grace period after calling _Run_.
//
// Update notifications require the following consistency guarantee: the view
// in the cache must be AT LEAST as fresh as the moment notification arrives, but
// MAY BE more fresh (e.g. if _Delete_ cancels an _Add_ event).
//
// Handlers execute on the single worker queue in the order they are appended.
// Handlers receive the notification event and the associated object. Note
// that all handlers must be registered before starting the cache controller.
type ConfigStoreController interface {
ConfigStore
// RegisterEventHandler adds a handler to receive config update events for a
// configuration type
RegisterEventHandler(kind config.GroupVersionKind, handler EventHandler)
// Run until a signal is received.
// Run *should* block, so callers should typically call `go controller.Run(stop)`
Run(stop <-chan struct{})
// HasSynced returns true after initial cache synchronization is complete
HasSynced() bool
}
const (
// NamespaceAll is a designated symbol for listing across all namespaces
NamespaceAll = ""
)
// ResolveShortnameToFQDN uses metadata information to resolve a reference
// to shortname of the service to FQDN
func ResolveShortnameToFQDN(hostname string, meta config.Meta) host.Name {
if len(hostname) == 0 {
// only happens when the gateway-api BackendRef is invalid
return ""
}
out := hostname
// Treat the wildcard hostname as fully qualified. Any other variant of a wildcard hostname will contain a `.` too,
// and skip the next if, so we only need to check for the literal wildcard itself.
if hostname == "*" {
return host.Name(out)
}
// if the hostname is a valid ipv4 or ipv6 address, do not append domain or namespace
if netutil.IsValidIPAddress(hostname) {
return host.Name(out)
}
// if FQDN is specified, do not append domain or namespace to hostname
if !strings.Contains(hostname, ".") {
if meta.Namespace != "" {
out = out + "." + meta.Namespace
}
// FIXME this is a gross hack to hardcode a service's domain name in kubernetes
// BUG this will break non kubernetes environments if they use shortnames in the
// rules.
if meta.Domain != "" {
out = out + ".svc." + meta.Domain
}
}
return host.Name(out)
}
// resolveGatewayName uses metadata information to resolve a reference
// to shortname of the gateway to FQDN
func resolveGatewayName(gwname string, meta config.Meta) string {
out := gwname
// New way of binding to a gateway in remote namespace
// is ns/name. Old way is either FQDN or short name
if !strings.Contains(gwname, "/") {
if !strings.Contains(gwname, ".") {
// we have a short name. Resolve to a gateway in same namespace
out = meta.Namespace + "/" + gwname
} else {
// parse namespace from FQDN. This is very hacky, but meant for backward compatibility only
// This is a legacy FQDN format. Transform name.ns.svc.cluster.local -> ns/name
i := strings.Index(gwname, ".")
fqdn := strings.Index(gwname[i+1:], ".")
if fqdn == -1 {
out = gwname[i+1:] + "/" + gwname[:i]
} else {
out = gwname[i+1:i+1+fqdn] + "/" + gwname[:i]
}
}
} else {
// remove the . from ./gateway and substitute it with the namespace name
i := strings.Index(gwname, "/")
if gwname[:i] == "." {
out = meta.Namespace + "/" + gwname[i+1:]
}
}
return out
}
// MostSpecificHostMatch compares the maps of specific and wildcard hosts to the needle, and returns the longest element
// matching the needle and it's value, or false if no element in the maps matches the needle.
func MostSpecificHostMatch[V any](needle host.Name, specific map[host.Name]V, wildcard map[host.Name]V) (host.Name, V, bool) {
if needle.IsWildCarded() {
// exact match first
if v, ok := wildcard[needle]; ok {
return needle, v, true
}
return mostSpecificHostWildcardMatch(string(needle[1:]), wildcard)
}
// exact match first
if v, ok := specific[needle]; ok {
return needle, v, true
}
// check wildcard
return mostSpecificHostWildcardMatch(string(needle), wildcard)
}
func mostSpecificHostWildcardMatch[V any](needle string, wildcard map[host.Name]V) (host.Name, V, bool) {
found := false
var matchHost host.Name
var matchValue V
for h, v := range wildcard {
if strings.HasSuffix(needle, string(h[1:])) {
if !found {
matchHost = h
matchValue = wildcard[h]
found = true
} else if host.MoreSpecific(h, matchHost) {
matchHost = h
matchValue = v
}
}
}
return matchHost, matchValue, found
}
// OldestMatchingHost returns the oldest matching host for a given needle (whether specific or wildcarded)
func OldestMatchingHost(needle host.Name, specific map[host.Name]config.Config, wildcard map[host.Name]config.Config) (host.Name, config.Config, bool) {
// The algorithm is a bit different than MostSpecificHostMatch. We can't short-circuit on the first
// match, regardless of whether it's specific or wildcarded. This is because we have to check the timestamp
// of all configs to make sure there's not an older matching one that we should use instead.
if needle.IsWildCarded() {
needle = needle[1:]
}
found := false
var matchHost host.Name
var matchValue config.Config
// exact match first
if v, ok := specific[needle]; ok {
found = true
matchHost = needle
matchValue = v
}
// Even if we have a match, we still need to check the wildcard map to see if there's an older match
for h, v := range wildcard {
if strings.HasSuffix(string(needle), string(h[1:])) {
if !found {
matchHost = h
matchValue = wildcard[h]
found = true
} else if h.Matches(matchHost) && v.GetCreationTimestamp().Before(matchValue.GetCreationTimestamp()) {
// Only replace if the new match is more specific and older than the current match
matchHost = h
matchValue = v
}
}
}
return matchHost, matchValue, found
}
// sortConfigByCreationTime sorts the list of config objects in ascending order by their creation time (if available).
func sortConfigByCreationTime(configs []config.Config) []config.Config {
sort.Slice(configs, func(i, j int) bool {
// If creation time is the same, then behavior is nondeterministic. In this case, we can
// pick an arbitrary but consistent ordering based on name and namespace, which is unique.
// CreationTimestamp is stored in seconds, so this is not uncommon.
if configs[i].CreationTimestamp == configs[j].CreationTimestamp {
in := configs[i].Name + "." + configs[i].Namespace
jn := configs[j].Name + "." + configs[j].Namespace
return in < jn
}
return configs[i].CreationTimestamp.Before(configs[j].CreationTimestamp)
})
return configs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"bytes"
"encoding/json"
"fmt"
"net"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
anypb "google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/structpb"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/credentials"
"istio.io/istio/pilot/pkg/features"
istionetworking "istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/serviceregistry/util/label"
"istio.io/istio/pilot/pkg/trustbundle"
networkutil "istio.io/istio/pilot/pkg/util/network"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/ledger"
"istio.io/istio/pkg/monitoring"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/spiffe"
"istio.io/istio/pkg/util/identifier"
netutil "istio.io/istio/pkg/util/net"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
)
var _ mesh.Holder = &Environment{}
func NewEnvironment() *Environment {
var cache XdsCache
if features.EnableXDSCaching {
cache = NewXdsCache()
} else {
cache = DisabledCache{}
}
return &Environment{
pushContext: NewPushContext(),
Cache: cache,
EndpointIndex: NewEndpointIndex(cache),
}
}
// Environment provides an aggregate environmental API for Pilot
type Environment struct {
// Discovery interface for listing services and instances.
ServiceDiscovery
// Config interface for listing routing rules
ConfigStore
// Watcher is the watcher for the mesh config (to be merged into the config store)
mesh.Watcher
// NetworksWatcher (loaded from a config map) provides information about the
// set of networks inside a mesh and how to route to endpoints in each
// network. Each network provides information about the endpoints in a
// routable L3 network. A single routable L3 network can have one or more
// service registries.
NetworksWatcher mesh.NetworksWatcher
NetworkManager *NetworkManager
// mutex used for protecting Environment.pushContext
mutex sync.RWMutex
// pushContext holds information during push generation. It is reset on config change, at the beginning
// of the pushAll. It will hold all errors and stats and possibly caches needed during the entire cache computation.
// DO NOT USE EXCEPT FOR TESTS AND HANDLING OF NEW CONNECTIONS.
// ALL USE DURING A PUSH SHOULD USE THE ONE CREATED AT THE
// START OF THE PUSH, THE GLOBAL ONE MAY CHANGE AND REFLECT A DIFFERENT
// CONFIG AND PUSH
pushContext *PushContext
// DomainSuffix provides a default domain for the Istio server.
DomainSuffix string
ledger ledger.Ledger
// TrustBundle: List of Mesh TrustAnchors
TrustBundle *trustbundle.TrustBundle
clusterLocalServices ClusterLocalProvider
CredentialsController credentials.MulticlusterController
GatewayAPIController GatewayController
// EndpointShards for a service. This is a global (per-server) list, built from
// incremental updates. This is keyed by service and namespace
EndpointIndex *EndpointIndex
// Cache for XDS resources.
Cache XdsCache
}
func (e *Environment) Mesh() *meshconfig.MeshConfig {
if e != nil && e.Watcher != nil {
return e.Watcher.Mesh()
}
return nil
}
func (e *Environment) MeshNetworks() *meshconfig.MeshNetworks {
if e != nil && e.NetworksWatcher != nil {
return e.NetworksWatcher.Networks()
}
return nil
}
// SetPushContext sets the push context with lock protected
func (e *Environment) SetPushContext(pc *PushContext) {
e.mutex.Lock()
defer e.mutex.Unlock()
e.pushContext = pc
}
// PushContext returns the push context with lock protected
func (e *Environment) PushContext() *PushContext {
e.mutex.RLock()
defer e.mutex.RUnlock()
return e.pushContext
}
// GetDiscoveryAddress parses the DiscoveryAddress specified via MeshConfig.
func (e *Environment) GetDiscoveryAddress() (host.Name, string, error) {
proxyConfig := mesh.DefaultProxyConfig()
if e.Mesh().DefaultConfig != nil {
proxyConfig = e.Mesh().DefaultConfig
}
hostname, port, err := net.SplitHostPort(proxyConfig.DiscoveryAddress)
if err != nil {
return "", "", fmt.Errorf("invalid Istiod Address: %s, %v", proxyConfig.DiscoveryAddress, err)
}
if _, err := strconv.Atoi(port); err != nil {
return "", "", fmt.Errorf("invalid Istiod Port: %s, %s, %v", port, proxyConfig.DiscoveryAddress, err)
}
return host.Name(hostname), port, nil
}
func (e *Environment) AddMeshHandler(h func()) {
if e != nil && e.Watcher != nil {
e.Watcher.AddMeshHandler(h)
}
}
func (e *Environment) AddNetworksHandler(h func()) {
if e != nil && e.NetworksWatcher != nil {
e.NetworksWatcher.AddNetworksHandler(h)
}
}
func (e *Environment) AddMetric(metric monitoring.Metric, key string, proxyID, msg string) {
if e != nil {
e.PushContext().AddMetric(metric, key, proxyID, msg)
}
}
func (e *Environment) Version() string {
if x := e.GetLedger(); x != nil {
return x.RootHash()
}
return ""
}
// Init initializes the Environment for use.
func (e *Environment) Init() {
// Use a default DomainSuffix, if none was provided.
if len(e.DomainSuffix) == 0 {
e.DomainSuffix = constants.DefaultClusterLocalDomain
}
// Create the cluster-local service registry.
e.clusterLocalServices = NewClusterLocalProvider(e)
}
func (e *Environment) InitNetworksManager(updater XDSUpdater) (err error) {
e.NetworkManager, err = NewNetworkManager(e, updater)
return
}
func (e *Environment) ClusterLocal() ClusterLocalProvider {
return e.clusterLocalServices
}
func (e *Environment) GetLedger() ledger.Ledger {
return e.ledger
}
func (e *Environment) SetLedger(l ledger.Ledger) {
e.ledger = l
}
func (e *Environment) GetProxyConfigOrDefault(ns string, labels, annotations map[string]string, meshConfig *meshconfig.MeshConfig) *meshconfig.ProxyConfig {
push := e.PushContext()
if push != nil && push.ProxyConfigs != nil {
if generatedProxyConfig := push.ProxyConfigs.EffectiveProxyConfig(
&NodeMetadata{
Namespace: ns,
Labels: labels,
Annotations: annotations,
}, meshConfig); generatedProxyConfig != nil {
return generatedProxyConfig
}
}
return mesh.DefaultProxyConfig()
}
// Resources is an alias for array of marshaled resources.
type Resources = []*discovery.Resource
// DeletedResources is an alias for array of strings that represent removed resources in delta.
type DeletedResources = []string
func AnyToUnnamedResources(r []*anypb.Any) Resources {
a := make(Resources, 0, len(r))
for _, rr := range r {
a = append(a, &discovery.Resource{Resource: rr})
}
return a
}
func ResourcesToAny(r Resources) []*anypb.Any {
a := make([]*anypb.Any, 0, len(r))
for _, rr := range r {
a = append(a, rr.Resource)
}
return a
}
// XdsUpdates include information about the subset of updated resources.
// See for example EDS incremental updates.
type XdsUpdates = sets.Set[ConfigKey]
// XdsLogDetails contains additional metadata that is captured by Generators and used by xds processors
// like Ads and Delta to uniformly log.
type XdsLogDetails struct {
Incremental bool
AdditionalInfo string
}
var DefaultXdsLogDetails = XdsLogDetails{}
// XdsResourceGenerator creates the response for a typeURL DiscoveryRequest or DeltaDiscoveryRequest. If no generator
// is associated with a Proxy, the default (a networking.core.ConfigGenerator instance) will be used.
// The server may associate a different generator based on client metadata. Different
// WatchedResources may use same or different Generator.
// Note: any errors returned will completely close the XDS stream. Use with caution; typically and empty
// or no response is preferred.
type XdsResourceGenerator interface {
// Generate generates the Sotw resources for Xds.
Generate(proxy *Proxy, w *WatchedResource, req *PushRequest) (Resources, XdsLogDetails, error)
}
// XdsDeltaResourceGenerator generates Sotw and delta resources.
type XdsDeltaResourceGenerator interface {
XdsResourceGenerator
// GenerateDeltas returns the changed and removed resources, along with whether or not delta was actually used.
GenerateDeltas(proxy *Proxy, req *PushRequest, w *WatchedResource) (Resources, DeletedResources, XdsLogDetails, bool, error)
}
// Proxy contains information about an specific instance of a proxy (envoy sidecar, gateway,
// etc). The Proxy is initialized when a sidecar connects to Pilot, and populated from
// 'node' info in the protocol as well as data extracted from registries.
//
// In current Istio implementation nodes use a 4-parts '~' delimited ID.
// Type~IPAddress~ID~Domain
type Proxy struct {
sync.RWMutex
// Type specifies the node type. First part of the ID.
Type NodeType
// IPAddresses is the IP addresses of the proxy used to identify it and its
// co-located service instances. Example: "10.60.1.6". In some cases, the host
// where the proxy and service instances reside may have more than one IP address
IPAddresses []string
// ID is the unique platform-specific sidecar proxy ID. For k8s it is the pod ID and
// namespace <podName.namespace>.
ID string
// Locality is the location of where Envoy proxy runs. This is extracted from
// the registry where possible. If the registry doesn't provide a locality for the
// proxy it will use the one sent via ADS that can be configured in the Envoy bootstrap
Locality *core.Locality
// DNSDomain defines the DNS domain suffix for short hostnames (e.g.
// "default.svc.cluster.local")
DNSDomain string
// ConfigNamespace defines the namespace where this proxy resides
// for the purposes of network scoping.
// NOTE: DO NOT USE THIS FIELD TO CONSTRUCT DNS NAMES
ConfigNamespace string
// Labels specifies the set of workload instance (ex: k8s pod) labels associated with this node.
// Labels can be different from that in Metadata because of pod labels update after startup,
// while NodeMetadata.Labels are set during bootstrap.
Labels map[string]string
// Metadata key-value pairs extending the Node identifier
Metadata *NodeMetadata
// the sidecarScope associated with the proxy
SidecarScope *SidecarScope
// the sidecarScope associated with the proxy previously
PrevSidecarScope *SidecarScope
// The merged gateways associated with the proxy if this is a Router
MergedGateway *MergedGateway
// ServiceTargets contains a list of all Services associated with the proxy, contextualized for this particular proxy.
// These are unique to this proxy, as the port information is specific to it - while a ServicePort is shared with the
// service, the target port may be distinct per-endpoint. So this maintains a view specific to this proxy.
// ServiceTargets will maintain a list entry for each Service-port, so if we have 2 services each with 3 ports, we
// would have 6 entries.
ServiceTargets []ServiceTarget
// Istio version associated with the Proxy
IstioVersion *IstioVersion
// VerifiedIdentity determines whether a proxy had its identity verified. This
// generally occurs by JWT or mTLS authentication. This can be false when
// connecting over plaintext. If this is set to true, we can verify the proxy has
// access to ConfigNamespace namespace. However, other options such as node type
// are not part of an Istio identity and thus are not verified.
VerifiedIdentity *spiffe.Identity
// IPMode of proxy.
ipMode IPMode
// GlobalUnicastIP stores the global unicast IP if available, otherwise nil
GlobalUnicastIP string
// XdsResourceGenerator is used to generate resources for the node, based on the PushContext.
// If nil, the default networking/core v2 generator is used. This field can be set
// at connect time, based on node metadata, to trigger generation of a different style
// of configuration.
XdsResourceGenerator XdsResourceGenerator
// WatchedResources contains the list of watched resources for the proxy, keyed by the DiscoveryRequest TypeUrl.
WatchedResources map[string]*WatchedResource
// XdsNode is the xDS node identifier
XdsNode *core.Node
workloadEntryName string
workloadEntryAutoCreated bool
// LastPushContext stores the most recent push context for this proxy. This will be monotonically
// increasing in version. Requests should send config based on this context; not the global latest.
// Historically, the latest was used which can cause problems when computing whether a push is
// required, as the computed sidecar scope version would not monotonically increase.
LastPushContext *PushContext
// LastPushTime records the time of the last push. This is used in conjunction with
// LastPushContext; the XDS cache depends on knowing the time of the PushContext to determine if a
// key is stale or not.
LastPushTime time.Time
}
// WatchedResource tracks an active DiscoveryRequest subscription.
type WatchedResource struct {
// TypeUrl is copied from the DiscoveryRequest.TypeUrl that initiated watching this resource.
// nolint
TypeUrl string
// ResourceNames tracks the list of resources that are actively watched.
// For LDS and CDS, all resources of the TypeUrl type are watched if it is empty.
// For endpoints the resource names will have list of clusters and for clusters it is empty.
// For Delta Xds, all resources of the TypeUrl that a client has subscribed to.
ResourceNames []string
// Wildcard indicates the subscription is a wildcard subscription. This only applies to types that
// allow both wildcard and non-wildcard subscriptions.
Wildcard bool
// NonceSent is the nonce sent in the last sent response. If it is equal with NonceAcked, the
// last message has been processed. If empty: we never sent a message of this type.
NonceSent string
// NonceAcked is the last acked message.
NonceAcked string
// AlwaysRespond, if true, will ensure that even when a request would otherwise be treated as an
// ACK, it will be responded to. This typically happens when a proxy reconnects to another instance of
// Istiod. In that case, Envoy expects us to respond to EDS/RDS/SDS requests to finish warming of
// clusters/listeners.
// Typically, this should be set to 'false' after response; keeping it true would likely result in an endless loop.
AlwaysRespond bool
// LastResources tracks the contents of the last push.
// This field is extremely expensive to maintain and is typically disabled
LastResources Resources
}
var istioVersionRegexp = regexp.MustCompile(`^([1-9]+)\.([0-9]+)(\.([0-9]+))?`)
// StringList is a list that will be marshaled to a comma separate string in Json
type StringList []string
func (l StringList) MarshalJSON() ([]byte, error) {
if l == nil {
return nil, nil
}
return json.Marshal(strings.Join(l, ","))
}
func (l *StringList) UnmarshalJSON(data []byte) error {
var inner string
err := json.Unmarshal(data, &inner)
if err != nil {
return err
}
if len(inner) == 0 {
*l = []string{}
} else {
*l = strings.Split(inner, ",")
}
return nil
}
// PodPort describes a mapping of port name to port number. Generally, this is just the definition of
// a port in Kubernetes, but without depending on Kubernetes api.
type PodPort struct {
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
// named port in a pod must have a unique name. Name for the port that can be
// referred to by services.
// +optional
Name string `json:"name,omitempty"`
// Number of port to expose on the pod's IP address.
// This must be a valid port number, 0 < x < 65536.
ContainerPort int `json:"containerPort"`
// Name of the protocol
Protocol string `json:"protocol"`
}
// PodPortList defines a list of PodPort's that is serialized as a string
// This is for legacy reasons, where proper JSON was not supported and was written as a string
type PodPortList []PodPort
func (l PodPortList) MarshalJSON() ([]byte, error) {
if l == nil {
return nil, nil
}
b, err := json.Marshal([]PodPort(l))
if err != nil {
return nil, err
}
b = bytes.ReplaceAll(b, []byte{'"'}, []byte{'\\', '"'})
out := append([]byte{'"'}, b...)
out = append(out, '"')
return out, nil
}
func (l *PodPortList) UnmarshalJSON(data []byte) error {
var pl []PodPort
pls, err := strconv.Unquote(string(data))
if err != nil {
return err
}
if err := json.Unmarshal([]byte(pls), &pl); err != nil {
return err
}
*l = pl
return nil
}
// StringBool defines a boolean that is serialized as a string for legacy reasons
type StringBool bool
func (s StringBool) MarshalJSON() ([]byte, error) {
return []byte(fmt.Sprintf(`"%t"`, s)), nil
}
func (s *StringBool) UnmarshalJSON(data []byte) error {
pls, err := strconv.Unquote(string(data))
if err != nil {
return err
}
b, err := strconv.ParseBool(pls)
if err != nil {
return err
}
*s = StringBool(b)
return nil
}
// ProxyConfig can only be marshaled using (gogo) jsonpb. However, the rest of node meta is not a proto
// To allow marshaling, we need to define a custom type that calls out to the gogo marshaller
type NodeMetaProxyConfig meshconfig.ProxyConfig
func (s *NodeMetaProxyConfig) MarshalJSON() ([]byte, error) {
pc := (*meshconfig.ProxyConfig)(s)
return protomarshal.Marshal(pc)
}
func (s *NodeMetaProxyConfig) UnmarshalJSON(data []byte) error {
pc := (*meshconfig.ProxyConfig)(s)
return protomarshal.UnmarshalAllowUnknown(data, pc)
}
// Node is a typed version of Envoy node with metadata.
type Node struct {
// ID of the Envoy node
ID string
// Metadata is the typed node metadata
Metadata *BootstrapNodeMetadata
// RawMetadata is the untyped node metadata
RawMetadata map[string]any
// Locality from Envoy bootstrap
Locality *core.Locality
}
// BootstrapNodeMetadata is a superset of NodeMetadata, intended to model the entirety of the node metadata
// we configure in the Envoy bootstrap. This is split out from NodeMetadata to explicitly segment the parameters
// that are consumed by Pilot from the parameters used only as part of the bootstrap. Fields used by bootstrap only
// are consumed by Envoy itself, such as the telemetry filters.
type BootstrapNodeMetadata struct {
NodeMetadata
// InstanceName is the short name for the workload instance (ex: pod name)
// replaces POD_NAME
InstanceName string `json:"NAME,omitempty"`
// Owner specifies the workload owner (opaque string). Typically, this is the owning controller of
// of the workload instance (ex: k8s deployment for a k8s pod).
Owner string `json:"OWNER,omitempty"`
// PilotSAN is the list of subject alternate names for the xDS server.
PilotSubjectAltName []string `json:"PILOT_SAN,omitempty"`
// XDSRootCert defines the root cert to use for XDS connections
XDSRootCert string `json:"-"`
// OutlierLogPath is the cluster manager outlier event log path.
OutlierLogPath string `json:"OUTLIER_LOG_PATH,omitempty"`
// AppContainers is the list of containers in the pod.
AppContainers string `json:"APP_CONTAINERS,omitempty"`
// IstioProxySHA is the SHA of the proxy version.
IstioProxySHA string `json:"ISTIO_PROXY_SHA,omitempty"`
}
// NodeMetadata defines the metadata associated with a proxy
// Fields should not be assumed to exist on the proxy, especially newly added fields which will not exist
// on older versions.
// The JSON field names should never change, as they are needed for backward compatibility with older proxies
// nolint: maligned
type NodeMetadata struct {
// ProxyConfig defines the proxy config specified for a proxy.
// Note that this setting may be configured different for each proxy, due user overrides
// or from different versions of proxies connecting. While Pilot has access to the meshConfig.defaultConfig,
// this field should be preferred if it is present.
ProxyConfig *NodeMetaProxyConfig `json:"PROXY_CONFIG,omitempty"`
// IstioVersion specifies the Istio version associated with the proxy
IstioVersion string `json:"ISTIO_VERSION,omitempty"`
// IstioRevision specifies the Istio revision associated with the proxy.
// Mostly used when istiod requests the upstream.
IstioRevision string `json:"ISTIO_REVISION,omitempty"`
// Labels specifies the set of workload instance (ex: k8s pod) labels associated with this node.
// It contains both StaticLabels and pod labels if any, it is a superset of StaticLabels.
// Note: it is not meant to be used during xds generation.
Labels map[string]string `json:"LABELS,omitempty"`
// StaticLabels specifies the set of labels from `ISTIO_METAJSON_LABELS`.
StaticLabels map[string]string `json:"STATIC_LABELS,omitempty"`
// Annotations specifies the set of workload instance (ex: k8s pod) annotations associated with this node.
Annotations map[string]string `json:"ANNOTATIONS,omitempty"`
// InstanceIPs is the set of IPs attached to this proxy
InstanceIPs StringList `json:"INSTANCE_IPS,omitempty"`
// Namespace is the namespace in which the workload instance is running.
Namespace string `json:"NAMESPACE,omitempty"`
// NodeName is the name of the kubernetes node on which the workload instance is running.
NodeName string `json:"NODE_NAME,omitempty"`
// WorkloadName specifies the name of the workload represented by this node.
WorkloadName string `json:"WORKLOAD_NAME,omitempty"`
// InterceptionMode is the name of the metadata variable that carries info about
// traffic interception mode at the proxy
InterceptionMode TrafficInterceptionMode `json:"INTERCEPTION_MODE,omitempty"`
// ServiceAccount specifies the service account which is running the workload.
ServiceAccount string `json:"SERVICE_ACCOUNT,omitempty"`
// HTTPProxyPort enables http proxy on the port for the current sidecar.
// Same as MeshConfig.HttpProxyPort, but with per/sidecar scope.
HTTPProxyPort string `json:"HTTP_PROXY_PORT,omitempty"`
// MeshID specifies the mesh ID environment variable.
MeshID string `json:"MESH_ID,omitempty"`
// ClusterID defines the cluster the node belongs to.
ClusterID cluster.ID `json:"CLUSTER_ID,omitempty"`
// Network defines the network the node belongs to. It is an optional metadata,
// set at injection time. When set, the Endpoints returned to a node and not on same network
// will be replaced with the gateway defined in the settings.
Network network.ID `json:"NETWORK,omitempty"`
// RequestedNetworkView specifies the networks that the proxy wants to see
RequestedNetworkView StringList `json:"REQUESTED_NETWORK_VIEW,omitempty"`
// PodPorts defines the ports on a pod. This is used to lookup named ports.
PodPorts PodPortList `json:"POD_PORTS,omitempty"`
// TLSServerCertChain is the absolute path to server cert-chain file
TLSServerCertChain string `json:"TLS_SERVER_CERT_CHAIN,omitempty"`
// TLSServerKey is the absolute path to server private key file
TLSServerKey string `json:"TLS_SERVER_KEY,omitempty"`
// TLSServerRootCert is the absolute path to server root cert file
TLSServerRootCert string `json:"TLS_SERVER_ROOT_CERT,omitempty"`
// TLSClientCertChain is the absolute path to client cert-chain file
TLSClientCertChain string `json:"TLS_CLIENT_CERT_CHAIN,omitempty"`
// TLSClientKey is the absolute path to client private key file
TLSClientKey string `json:"TLS_CLIENT_KEY,omitempty"`
// TLSClientRootCert is the absolute path to client root cert file
TLSClientRootCert string `json:"TLS_CLIENT_ROOT_CERT,omitempty"`
CertBaseDir string `json:"BASE,omitempty"`
// IdleTimeout specifies the idle timeout for the proxy, in duration format (10s).
// If not set, default timeout is 1 hour.
IdleTimeout string `json:"IDLE_TIMEOUT,omitempty"`
// HTTP10 indicates the application behind the sidecar is making outbound http requests with HTTP/1.0
// protocol. It will enable the "AcceptHttp_10" option on the http options for outbound HTTP listeners.
// Alpha in 1.1, based on feedback may be turned into an API or change. Set to "1" to enable.
HTTP10 string `json:"HTTP10,omitempty"`
// Generator indicates the client wants to use a custom Generator plugin.
Generator string `json:"GENERATOR,omitempty"`
// DNSCapture indicates whether the workload has enabled dns capture
DNSCapture StringBool `json:"DNS_CAPTURE,omitempty"`
// DNSAutoAllocate indicates whether the workload should have auto allocated addresses for ServiceEntry
// This allows resolving ServiceEntries, which is especially useful for distinguishing TCP traffic
// This depends on DNSCapture.
DNSAutoAllocate StringBool `json:"DNS_AUTO_ALLOCATE,omitempty"`
// EnableHBONE, if set, will enable generation of HBONE config.
// Note: this only impacts sidecars; ztunnel and waypoint proxy unconditionally use HBONE.
EnableHBONE StringBool `json:"ENABLE_HBONE,omitempty"`
// AutoRegister will enable auto registration of the connected endpoint to the service registry using the given WorkloadGroup name
AutoRegisterGroup string `json:"AUTO_REGISTER_GROUP,omitempty"`
// WorkloadEntry specifies the name of the WorkloadEntry this proxy corresponds to.
//
// This field is intended for use in those scenarios where a user needs to
// onboard a workload from a VM without relying on auto-registration.
//
// At runtime, when a proxy establishes an ADS connection to the istiod,
// istiod will treat a non-empty value of this field as an indicator
// that proxy corresponds to a VM and must be represented by a WorkloadEntry
// with a given name.
WorkloadEntry string `json:"WORKLOAD_ENTRY,omitempty"`
// UnprivilegedPod is used to determine whether a Gateway Pod can open ports < 1024
UnprivilegedPod string `json:"UNPRIVILEGED_POD,omitempty"`
// PlatformMetadata contains any platform specific metadata
PlatformMetadata map[string]string `json:"PLATFORM_METADATA,omitempty"`
// StsPort specifies the port of security token exchange server (STS).
// Used by envoy filters
StsPort string `json:"STS_PORT,omitempty"`
// Envoy status port redirecting to agent status port.
EnvoyStatusPort int `json:"ENVOY_STATUS_PORT,omitempty"`
// Envoy prometheus port redirecting to admin port prometheus endpoint.
EnvoyPrometheusPort int `json:"ENVOY_PROMETHEUS_PORT,omitempty"`
// ExitOnZeroActiveConnections terminates Envoy if there are no active connections if set.
ExitOnZeroActiveConnections StringBool `json:"EXIT_ON_ZERO_ACTIVE_CONNECTIONS,omitempty"`
// InboundListenerExactBalance sets connection balance config to use exact_balance for virtualInbound,
// as long as QUIC, since it uses UDP, isn't also used.
InboundListenerExactBalance StringBool `json:"INBOUND_LISTENER_EXACT_BALANCE,omitempty"`
// OutboundListenerExactBalance sets connection balance config to use exact_balance for outbound
// redirected tcp listeners. This does not change the virtualOutbound listener.
OutboundListenerExactBalance StringBool `json:"OUTBOUND_LISTENER_EXACT_BALANCE,omitempty"`
// The istiod address when running ASM Managed Control Plane.
CloudrunAddr string `json:"CLOUDRUN_ADDR,omitempty"`
// Metadata discovery service enablement
MetadataDiscovery StringBool `json:"METADATA_DISCOVERY,omitempty"`
// Contains a copy of the raw metadata. This is needed to lookup arbitrary values.
// If a value is known ahead of time it should be added to the struct rather than reading from here,
Raw map[string]any `json:"-"`
}
// ProxyConfigOrDefault is a helper function to get the ProxyConfig from metadata, or fallback to a default
// This is useful as the logic should check for proxy config from proxy first and then defer to mesh wide defaults
// if not present.
func (m NodeMetadata) ProxyConfigOrDefault(def *meshconfig.ProxyConfig) *meshconfig.ProxyConfig {
if m.ProxyConfig != nil {
return (*meshconfig.ProxyConfig)(m.ProxyConfig)
}
return def
}
// GetView returns a restricted view of the mesh for this proxy. The view can be
// restricted by network (via ISTIO_META_REQUESTED_NETWORK_VIEW).
// If not set, we assume that the proxy wants to see endpoints in any network.
func (node *Proxy) GetView() ProxyView {
return newProxyView(node)
}
// InNetwork returns true if the proxy is on the given network, or if either
// the proxy's network or the given network is unspecified ("").
func (node *Proxy) InNetwork(network network.ID) bool {
return node == nil || identifier.IsSameOrEmpty(network.String(), node.Metadata.Network.String())
}
// InCluster returns true if the proxy is in the given cluster, or if either
// the proxy's cluster id or the given cluster id is unspecified ("").
func (node *Proxy) InCluster(cluster cluster.ID) bool {
return node == nil || identifier.IsSameOrEmpty(cluster.String(), node.Metadata.ClusterID.String())
}
// IsWaypointProxy returns true if the proxy is acting as a waypoint proxy in an ambient mesh.
func (node *Proxy) IsWaypointProxy() bool {
return node.Type == Waypoint
}
// IsZTunnel returns true if the proxy is acting as a ztunnel in an ambient mesh.
func (node *Proxy) IsZTunnel() bool {
return node.Type == Ztunnel
}
// IsAmbient returns true if the proxy is acting as either a ztunnel or a waypoint proxy in an ambient mesh.
func (node *Proxy) IsAmbient() bool {
return node.IsWaypointProxy() || node.IsZTunnel()
}
func (m *BootstrapNodeMetadata) UnmarshalJSON(data []byte) error {
// Create a new type from the target type to avoid recursion.
type BootstrapNodeMetadata2 BootstrapNodeMetadata
t2 := &BootstrapNodeMetadata2{}
if err := json.Unmarshal(data, t2); err != nil {
return err
}
var raw map[string]any
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
*m = BootstrapNodeMetadata(*t2)
m.Raw = raw
return nil
}
// ToStruct converts NodeMetadata to a protobuf structure. This should be used only for debugging - performance is bad.
func (m NodeMetadata) ToStruct() *structpb.Struct {
j, err := json.Marshal(m)
if err != nil {
return nil
}
pbs := &structpb.Struct{}
if err := protomarshal.Unmarshal(j, pbs); err != nil {
return nil
}
return pbs
}
// IstioVersion encodes the Istio version of the proxy. This is a low key way to
// do semver style comparisons and generate the appropriate envoy config
type IstioVersion struct {
Major int
Minor int
Patch int
}
var MaxIstioVersion = &IstioVersion{Major: 65535, Minor: 65535, Patch: 65535}
// Compare returns -1/0/1 if version is less than, equal or greater than inv
// To compare only on major, call this function with { X, -1, -1}.
// to compare only on major & minor, call this function with {X, Y, -1}.
func (pversion *IstioVersion) Compare(inv *IstioVersion) int {
// check major
if r := compareVersion(pversion.Major, inv.Major); r != 0 {
return r
}
// check minor
if inv.Minor > -1 {
if r := compareVersion(pversion.Minor, inv.Minor); r != 0 {
return r
}
// check patch
if inv.Patch > -1 {
if r := compareVersion(pversion.Patch, inv.Patch); r != 0 {
return r
}
}
}
return 0
}
func compareVersion(ov, nv int) int {
if ov == nv {
return 0
}
if ov < nv {
return -1
}
return 1
}
// NodeType decides the responsibility of the proxy serves in the mesh
type NodeType string
const (
// SidecarProxy type is used for sidecar proxies in the application containers
SidecarProxy NodeType = "sidecar"
// Router type is used for standalone proxies acting as L7/L4 routers
Router NodeType = "router"
// Waypoint type is used for waypoint proxies
Waypoint NodeType = "waypoint"
// Ztunnel type is used for node proxies (ztunnel)
Ztunnel NodeType = "ztunnel"
)
var NodeTypes = [...]NodeType{SidecarProxy, Router, Waypoint, Ztunnel}
// IPMode represents the IP mode of proxy.
type IPMode int
// IPMode constants starting with index 1.
const (
IPv4 IPMode = iota + 1
IPv6
Dual
)
// IsApplicationNodeType verifies that the NodeType is one of the declared constants in the model
func IsApplicationNodeType(nType NodeType) bool {
switch nType {
case SidecarProxy, Router, Waypoint, Ztunnel:
return true
default:
return false
}
}
// ServiceNode encodes the proxy node attributes into a URI-acceptable string
func (node *Proxy) ServiceNode() string {
ip := ""
if len(node.IPAddresses) > 0 {
ip = node.IPAddresses[0]
}
return strings.Join([]string{
string(node.Type), ip, node.ID, node.DNSDomain,
}, serviceNodeSeparator)
}
// SetSidecarScope identifies the sidecar scope object associated with this
// proxy and updates the proxy Node. This is a convenience hack so that
// callers can simply call push.Services(node) while the implementation of
// push.Services can return the set of services from the proxyNode's
// sidecar scope or from the push context's set of global services. Similar
// logic applies to push.VirtualServices and push.DestinationRule. The
// short cut here is useful only for CDS and parts of RDS generation code.
//
// Listener generation code will still use the SidecarScope object directly
// as it needs the set of services for each listener port.
func (node *Proxy) SetSidecarScope(ps *PushContext) {
sidecarScope := node.SidecarScope
switch node.Type {
case SidecarProxy:
node.SidecarScope = ps.getSidecarScope(node, node.Labels)
case Router, Waypoint:
// Gateways should just have a default scope with egress: */*
node.SidecarScope = ps.getSidecarScope(node, nil)
}
node.PrevSidecarScope = sidecarScope
}
// SetGatewaysForProxy merges the Gateway objects associated with this
// proxy and caches the merged object in the proxy Node. This is a convenience hack so that
// callers can simply call push.MergedGateways(node) instead of having to
// fetch all the gateways and invoke the merge call in multiple places (lds/rds).
// Must be called after ServiceTargets are set
func (node *Proxy) SetGatewaysForProxy(ps *PushContext) {
if node.Type != Router {
return
}
node.MergedGateway = ps.mergeGateways(node)
}
func (node *Proxy) SetServiceTargets(serviceDiscovery ServiceDiscovery) {
instances := serviceDiscovery.GetProxyServiceTargets(node)
// Keep service instances in order of creation/hostname.
sort.SliceStable(instances, func(i, j int) bool {
if instances[i].Service != nil && instances[j].Service != nil {
if !instances[i].Service.CreationTime.Equal(instances[j].Service.CreationTime) {
return instances[i].Service.CreationTime.Before(instances[j].Service.CreationTime)
}
// Additionally, sort by hostname just in case services created automatically at the same second.
return instances[i].Service.Hostname < instances[j].Service.Hostname
}
return true
})
node.ServiceTargets = instances
}
// SetWorkloadLabels will set the node.Labels.
// It merges both node meta labels and workload labels and give preference to workload labels.
func (node *Proxy) SetWorkloadLabels(env *Environment) {
// If this is VM proxy, do not override labels at all, because in istio test we use pod to simulate VM.
if node.IsVM() {
node.Labels = node.Metadata.Labels
return
}
labels := env.GetProxyWorkloadLabels(node)
if labels != nil {
node.Labels = make(map[string]string, len(labels)+len(node.Metadata.StaticLabels))
// we can't just equate proxy workload labels to node meta labels as it may be customized by user
// with `ISTIO_METAJSON_LABELS` env (pkg/bootstrap/config.go extractAttributesMetadata).
// so, we fill the `ISTIO_METAJSON_LABELS` as well.
for k, v := range node.Metadata.StaticLabels {
node.Labels[k] = v
}
for k, v := range labels {
node.Labels[k] = v
}
} else {
// If could not find pod labels, fallback to use the node metadata labels.
node.Labels = node.Metadata.Labels
}
}
// DiscoverIPMode discovers the IP Versions supported by Proxy based on its IP addresses.
func (node *Proxy) DiscoverIPMode() {
if networkutil.AllIPv4(node.IPAddresses) {
node.ipMode = IPv4
} else if networkutil.AllIPv6(node.IPAddresses) {
node.ipMode = IPv6
} else {
node.ipMode = Dual
}
node.GlobalUnicastIP = networkutil.GlobalUnicastIP(node.IPAddresses)
}
// SupportsIPv4 returns true if proxy supports IPv4 addresses.
func (node *Proxy) SupportsIPv4() bool {
return node.ipMode == IPv4 || node.ipMode == Dual
}
// SupportsIPv6 returns true if proxy supports IPv6 addresses.
func (node *Proxy) SupportsIPv6() bool {
return node.ipMode == IPv6 || node.ipMode == Dual
}
// IsIPv6 returns true if proxy only supports IPv6 addresses.
func (node *Proxy) IsIPv6() bool {
return node.ipMode == IPv6
}
func (node *Proxy) IsDualStack() bool {
return node.ipMode == Dual
}
// GetIPMode returns proxy's ipMode
func (node *Proxy) GetIPMode() IPMode {
return node.ipMode
}
// ParseMetadata parses the opaque Metadata from an Envoy Node into string key-value pairs.
// Any non-string values are ignored.
func ParseMetadata(metadata *structpb.Struct) (*NodeMetadata, error) {
if metadata == nil {
return &NodeMetadata{}, nil
}
bootstrapNodeMeta, err := ParseBootstrapNodeMetadata(metadata)
if err != nil {
return nil, err
}
return &bootstrapNodeMeta.NodeMetadata, nil
}
// ParseBootstrapNodeMetadata parses the opaque Metadata from an Envoy Node into string key-value pairs.
func ParseBootstrapNodeMetadata(metadata *structpb.Struct) (*BootstrapNodeMetadata, error) {
if metadata == nil {
return &BootstrapNodeMetadata{}, nil
}
b, err := protomarshal.MarshalProtoNames(metadata)
if err != nil {
return nil, fmt.Errorf("failed to read node metadata %v: %v", metadata, err)
}
meta := &BootstrapNodeMetadata{}
if err := json.Unmarshal(b, meta); err != nil {
return nil, fmt.Errorf("failed to unmarshal node metadata (%v): %v", string(b), err)
}
return meta, nil
}
// ParseServiceNodeWithMetadata parse the Envoy Node from the string generated by ServiceNode
// function and the metadata.
func ParseServiceNodeWithMetadata(nodeID string, metadata *NodeMetadata) (*Proxy, error) {
parts := strings.Split(nodeID, serviceNodeSeparator)
out := &Proxy{
Metadata: metadata,
}
if len(parts) != 4 {
return out, fmt.Errorf("missing parts in the service node %q", nodeID)
}
if !IsApplicationNodeType(NodeType(parts[0])) {
return out, fmt.Errorf("invalid node type (valid types: %v) in the service node %q", NodeTypes, nodeID)
}
out.Type = NodeType(parts[0])
// Get all IP Addresses from Metadata
if hasValidIPAddresses(metadata.InstanceIPs) {
out.IPAddresses = metadata.InstanceIPs
} else if netutil.IsValidIPAddress(parts[1]) {
// Fall back, use IP from node id, it's only for backward-compatibility, IP should come from metadata
out.IPAddresses = append(out.IPAddresses, parts[1])
}
// Does query from ingress or router have to carry valid IP address?
if len(out.IPAddresses) == 0 {
return out, fmt.Errorf("no valid IP address in the service node id or metadata")
}
out.ID = parts[2]
out.DNSDomain = parts[3]
if len(metadata.IstioVersion) == 0 {
log.Warnf("Istio Version is not found in metadata for %v, which may have undesirable side effects", out.ID)
}
out.IstioVersion = ParseIstioVersion(metadata.IstioVersion)
return out, nil
}
// ParseIstioVersion parses a version string and returns IstioVersion struct
func ParseIstioVersion(ver string) *IstioVersion {
// strip the release- prefix if any and extract the version string
ver = istioVersionRegexp.FindString(strings.TrimPrefix(ver, "release-"))
if ver == "" {
// return very large values assuming latest version
return MaxIstioVersion
}
parts := strings.Split(ver, ".")
// we are guaranteed to have at least major and minor based on the regex
major, _ := strconv.Atoi(parts[0])
minor, _ := strconv.Atoi(parts[1])
// Assume very large patch release if not set
patch := 65535
if len(parts) > 2 {
patch, _ = strconv.Atoi(parts[2])
}
return &IstioVersion{Major: major, Minor: minor, Patch: patch}
}
// GetOrDefault returns either the value, or the default if the value is empty. Useful when retrieving node metadata fields.
func GetOrDefault(s string, def string) string {
if len(s) > 0 {
return s
}
return def
}
// GetProxyConfigNamespace extracts the namespace associated with the proxy
// from the proxy metadata or the proxy ID
func GetProxyConfigNamespace(proxy *Proxy) string {
if proxy == nil {
return ""
}
// First look for ISTIO_META_CONFIG_NAMESPACE
// All newer proxies (from Istio 1.1 onwards) are supposed to supply this
if len(proxy.Metadata.Namespace) > 0 {
return proxy.Metadata.Namespace
}
// if not found, for backward compatibility, extract the namespace from
// the proxy domain. this is a k8s specific hack and should be enabled
parts := strings.Split(proxy.DNSDomain, ".")
if len(parts) > 1 { // k8s will have namespace.<domain>
return parts[0]
}
return ""
}
const (
serviceNodeSeparator = "~"
)
// ParsePort extracts port number from a valid proxy address
func ParsePort(addr string) int {
_, sPort, err := net.SplitHostPort(addr)
if sPort == "" {
return 0
}
if err != nil {
log.Warn(err)
}
port, pErr := strconv.Atoi(sPort)
if pErr != nil {
log.Warn(pErr)
}
return port
}
// hasValidIPAddresses returns true if the input ips are all valid, otherwise returns false.
func hasValidIPAddresses(ipAddresses []string) bool {
if len(ipAddresses) == 0 {
return false
}
for _, ipAddress := range ipAddresses {
if !netutil.IsValidIPAddress(ipAddress) {
return false
}
}
return true
}
// TrafficInterceptionMode indicates how traffic to/from the workload is captured and
// sent to Envoy. This should not be confused with the CaptureMode in the API that indicates
// how the user wants traffic to be intercepted for the listener. TrafficInterceptionMode is
// always derived from the Proxy metadata
type TrafficInterceptionMode string
const (
// InterceptionNone indicates that the workload is not using IPtables for traffic interception
InterceptionNone TrafficInterceptionMode = "NONE"
// InterceptionTproxy implies traffic intercepted by IPtables with TPROXY mode
InterceptionTproxy TrafficInterceptionMode = "TPROXY"
// InterceptionRedirect implies traffic intercepted by IPtables with REDIRECT mode
// This is our default mode
InterceptionRedirect TrafficInterceptionMode = "REDIRECT"
)
// GetInterceptionMode extracts the interception mode associated with the proxy
// from the proxy metadata
func (node *Proxy) GetInterceptionMode() TrafficInterceptionMode {
if node == nil {
return InterceptionRedirect
}
switch node.Metadata.InterceptionMode {
case "TPROXY":
return InterceptionTproxy
case "REDIRECT":
return InterceptionRedirect
case "NONE":
return InterceptionNone
}
return InterceptionRedirect
}
// IsUnprivileged returns true if the proxy has explicitly indicated that it is
// unprivileged, i.e. it cannot bind to the privileged ports 1-1023.
func (node *Proxy) IsUnprivileged() bool {
if node == nil || node.Metadata == nil {
return false
}
// expect explicit "true" value
unprivileged, _ := strconv.ParseBool(node.Metadata.UnprivilegedPod)
return unprivileged
}
// CanBindToPort returns true if the proxy can bind to a given port.
func (node *Proxy) CanBindToPort(bindTo bool, port uint32) bool {
if bindTo {
if IsPrivilegedPort(port) && node.IsUnprivileged() {
return false
}
if node.Metadata != nil &&
(node.Metadata.EnvoyPrometheusPort == int(port) || node.Metadata.EnvoyStatusPort == int(port)) {
// can not bind to port that already bound by proxy static listener
return false
}
}
return true
}
// IsPrivilegedPort returns true if a given port is in the range 1-1023.
func IsPrivilegedPort(port uint32) bool {
// check for 0 is important because:
// 1) technically, 0 is not a privileged port; any process can ask to bind to 0
// 2) this function will be receiving 0 on input in the case of UDS listeners
return 0 < port && port < 1024
}
func (node *Proxy) IsVM() bool {
// TODO use node metadata to indicate that this is a VM instead of the TestVMLabel
return node.Metadata.Labels[constants.TestVMLabel] != ""
}
func (node *Proxy) IsProxylessGrpc() bool {
return node.Metadata != nil && node.Metadata.Generator == "grpc"
}
func (node *Proxy) GetNodeName() string {
if node.Metadata != nil && len(node.Metadata.NodeName) > 0 {
return node.Metadata.NodeName
}
// fall back to get the node name from labels
// this can happen for an "old" proxy with no `Metadata.NodeName` set
// TODO: remove this when 1.16 is EOL?
return node.Labels[label.LabelHostname]
}
func (node *Proxy) GetClusterID() cluster.ID {
if node == nil || node.Metadata == nil {
return ""
}
return node.Metadata.ClusterID
}
func (node *Proxy) GetNamespace() string {
if node == nil || node.Metadata == nil {
return ""
}
return node.Metadata.Namespace
}
func (node *Proxy) GetIstioVersion() string {
if node == nil || node.Metadata == nil {
return ""
}
return node.Metadata.IstioVersion
}
func (node *Proxy) GetID() string {
if node == nil {
return ""
}
return node.ID
}
func (node *Proxy) FuzzValidate() bool {
if node.Metadata == nil {
return false
}
found := false
for _, t := range NodeTypes {
if node.Type == t {
found = true
break
}
}
if !found {
return false
}
return len(node.IPAddresses) != 0
}
func (node *Proxy) EnableHBONE() bool {
return node.IsAmbient() || (features.EnableHBONE && bool(node.Metadata.EnableHBONE))
}
// WaypointScope is either an entire namespace or an individual service account
// in the namespace. This setting dictates the upstream TLS verification
// strategy, depending on the binding of the waypoints to its backend
// workloads.
type WaypointScope struct {
Namespace string
ServiceAccount string // optional
}
func (node *Proxy) WaypointScope() WaypointScope {
return WaypointScope{
Namespace: node.ConfigNamespace,
ServiceAccount: node.Metadata.Annotations[constants.WaypointServiceAccount],
}
}
func (node *Proxy) SetWorkloadEntry(name string, create bool) {
node.Lock()
defer node.Unlock()
node.workloadEntryName = name
node.workloadEntryAutoCreated = create
}
func (node *Proxy) WorkloadEntry() (string, bool) {
node.RLock()
defer node.RUnlock()
return node.workloadEntryName, node.workloadEntryAutoCreated
}
// SupportsEnvoyExtendedJwt indicates that the proxy JWT extension is capable of
// replacing istio_authn filter.
func (node *Proxy) SupportsEnvoyExtendedJwt() bool {
return node.IstioVersion == nil ||
node.IstioVersion.Compare(&IstioVersion{Major: 1, Minor: 21, Patch: -1}) >= 0
}
type GatewayController interface {
ConfigStoreController
// Reconcile updates the internal state of the gateway controller for a given input. This should be
// called before any List/Get calls if the state has changed
Reconcile(ctx *PushContext) error
// SecretAllowed determines if a SDS credential is accessible to a given namespace.
// For example, for resourceName of `kubernetes-gateway://ns-name/secret-name` and namespace of `ingress-ns`,
// this would return true only if there was a policy allowing `ingress-ns` to access Secrets in the `ns-name` namespace.
SecretAllowed(resourceName string, namespace string) bool
}
// OutboundListenerClass is a helper to turn a NodeType for outbound to a ListenerClass.
func OutboundListenerClass(t NodeType) istionetworking.ListenerClass {
if t == Router {
return istionetworking.ListenerClassGateway
}
return istionetworking.ListenerClassSidecarOutbound
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"sync"
"istio.io/istio/pkg/cluster"
)
type ServiceHandler func(*Service, *Service, Event)
// Controller defines an event controller loop. Proxy agent registers itself
// with the controller loop and receives notifications on changes to the
// service topology or changes to the configuration artifacts.
//
// The controller guarantees the following consistency requirement: registry
// view in the controller is as AT LEAST as fresh as the moment notification
// arrives, but MAY BE more fresh (e.g. "delete" cancels an "add" event). For
// example, an event for a service creation will see a service registry without
// the service if the event is immediately followed by the service deletion
// event.
//
// Handlers execute on the single worker queue in the order they are appended.
// Handlers receive the notification event and the associated object. Note
// that all handlers must be appended before starting the controller.
type Controller interface {
// Note: AppendXXXHandler is used to register high level handlers.
// For per cluster handlers, they should be registered by the `AppendXXXHandlerForCluster` interface.
// AppendServiceHandler notifies about changes to the service catalog.
AppendServiceHandler(f ServiceHandler)
// AppendWorkloadHandler notifies about changes to workloads. This differs from InstanceHandler,
// which deals with service instances (the result of a merge of Service and Workload)
AppendWorkloadHandler(f func(*WorkloadInstance, Event))
// Run until a signal is received
Run(stop <-chan struct{})
// HasSynced returns true after initial cache synchronization is complete
HasSynced() bool
}
// AggregateController is a wrapper of Controller, it supports registering handlers of a specific cluster。
type AggregateController interface {
Controller
// AppendServiceHandlerForCluster is similar to Controller.AppendServiceHandler,
// but it is used to store the handler from a specific cluster.
AppendServiceHandlerForCluster(clusterID cluster.ID, f ServiceHandler)
UnRegisterHandlersForCluster(clusterID cluster.ID)
}
// ControllerHandlers is a utility to help Controller implementations manage their lists of handlers.
type ControllerHandlers struct {
mutex sync.RWMutex
serviceHandlers []ServiceHandler
workloadHandlers []func(*WorkloadInstance, Event)
}
func (c *ControllerHandlers) AppendServiceHandler(f ServiceHandler) {
// Copy on write.
c.mutex.Lock()
handlers := make([]ServiceHandler, 0, len(c.serviceHandlers)+1)
handlers = append(handlers, c.serviceHandlers...)
handlers = append(handlers, f)
c.serviceHandlers = handlers
c.mutex.Unlock()
}
func (c *ControllerHandlers) AppendWorkloadHandler(f func(*WorkloadInstance, Event)) {
// Copy on write.
c.mutex.Lock()
handlers := make([]func(*WorkloadInstance, Event), 0, len(c.workloadHandlers)+1)
handlers = append(handlers, c.workloadHandlers...)
handlers = append(handlers, f)
c.workloadHandlers = handlers
c.mutex.Unlock()
}
func (c *ControllerHandlers) GetServiceHandlers() []ServiceHandler {
c.mutex.RLock()
defer c.mutex.RUnlock()
// Return a shallow copy of the array
return c.serviceHandlers
}
func (c *ControllerHandlers) GetWorkloadHandlers() []func(*WorkloadInstance, Event) {
c.mutex.RLock()
defer c.mutex.RUnlock()
// Return a shallow copy of the array
return c.workloadHandlers
}
func (c *ControllerHandlers) NotifyServiceHandlers(prev, curr *Service, event Event) {
for _, f := range c.GetServiceHandlers() {
f(prev, curr, event)
}
}
func (c *ControllerHandlers) NotifyWorkloadHandlers(w *WorkloadInstance, event Event) {
for _, f := range c.GetWorkloadHandlers() {
f(w, event)
}
}
// Event represents a registry update event
type Event int
const (
// EventAdd is sent when an object is added
EventAdd Event = iota
// EventUpdate is sent when an object is modified
// Captures the modified object
EventUpdate
// EventDelete is sent when an object is deleted
// Captures the object at the last known state
EventDelete
)
func (event Event) String() string {
out := "unknown"
switch event {
case EventAdd:
out = "add"
case EventUpdate:
out = "update"
case EventDelete:
out = "delete"
}
return out
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package credentials
import (
"fmt"
"strings"
"istio.io/istio/pkg/cluster"
)
const (
// KubernetesSecretType is the name of a SDS secret stored in Kubernetes. Secrets here take the form
// kubernetes://secret-name. They will be pulled from the same namespace and cluster as the requesting proxy lives in.
KubernetesSecretType = "kubernetes"
KubernetesSecretTypeURI = KubernetesSecretType + "://"
// KubernetesGatewaySecretType is the name of a SDS secret stored in Kubernetes, used by the gateway-api. Secrets here
// take the form kubernetes-gateway://namespace/name. They are pulled from the config cluster.
KubernetesGatewaySecretType = "kubernetes-gateway"
kubernetesGatewaySecretTypeURI = KubernetesGatewaySecretType + "://"
// BuiltinGatewaySecretType is the name of a SDS secret that uses the workloads own mTLS certificate
BuiltinGatewaySecretType = "builtin"
BuiltinGatewaySecretTypeURI = BuiltinGatewaySecretType + "://"
// SdsCaSuffix is the suffix of the sds resource name for root CA.
SdsCaSuffix = "-cacert"
)
// SecretResource defines a reference to a secret
type SecretResource struct {
// ResourceType is the type of secret. One of KubernetesSecretType or KubernetesGatewaySecretType
ResourceType string
// Name is the name of the secret
Name string
// Namespace is the namespace the secret resides in. For implicit namespace references (such as in KubernetesSecretType),
// this will be resolved to the appropriate namespace. As a result, this should never be empty.
Namespace string
// ResourceName is the original name of the resource
ResourceName string
// Cluster is the cluster the secret should be fetched from.
Cluster cluster.ID
}
func (sr SecretResource) Key() string {
return sr.ResourceType + "/" + sr.Name + "/" + sr.Namespace + "/" + string(sr.Cluster)
}
func (sr SecretResource) KubernetesResourceName() string {
return fmt.Sprintf("%s://%s/%s", sr.ResourceType, sr.Namespace, sr.Name)
}
func ToKubernetesGatewayResource(namespace, name string) string {
if strings.HasPrefix(name, BuiltinGatewaySecretTypeURI) {
return BuiltinGatewaySecretTypeURI
}
return fmt.Sprintf("%s://%s/%s", KubernetesGatewaySecretType, namespace, name)
}
// ToResourceName turns a `credentialName` into a resource name used for SDS
func ToResourceName(name string) string {
if strings.HasPrefix(name, BuiltinGatewaySecretTypeURI) {
return "default"
}
// If they explicitly defined the type, keep it
if strings.HasPrefix(name, KubernetesSecretTypeURI) || strings.HasPrefix(name, kubernetesGatewaySecretTypeURI) {
return name
}
// Otherwise, to kubernetes://
return KubernetesSecretTypeURI + name
}
// ParseResourceName parses a raw resourceName string.
func ParseResourceName(resourceName string, proxyNamespace string, proxyCluster cluster.ID, configCluster cluster.ID) (SecretResource, error) {
sep := "/"
if strings.HasPrefix(resourceName, KubernetesSecretTypeURI) {
// Valid formats:
// * kubernetes://secret-name
// * kubernetes://secret-namespace/secret-name
// If namespace is not set, we will fetch from the namespace of the proxy. The secret will be read from
// the cluster the proxy resides in. This mirrors the legacy behavior mounting a secret as a file
res := strings.TrimPrefix(resourceName, KubernetesSecretTypeURI)
split := strings.Split(res, sep)
namespace := proxyNamespace
name := split[0]
if len(split) > 1 {
namespace = split[0]
name = split[1]
}
return SecretResource{ResourceType: KubernetesSecretType, Name: name, Namespace: namespace, ResourceName: resourceName, Cluster: proxyCluster}, nil
} else if strings.HasPrefix(resourceName, kubernetesGatewaySecretTypeURI) {
// Valid formats:
// * kubernetes-gateway://secret-namespace/secret-name
// Namespace is required. The secret is read from the config cluster; this is the primary difference from KubernetesSecretType.
res := strings.TrimPrefix(resourceName, kubernetesGatewaySecretTypeURI)
split := strings.Split(res, sep)
if len(split) <= 1 {
return SecretResource{}, fmt.Errorf("invalid resource name %q. Expected namespace and name", resourceName)
}
namespace := split[0]
name := split[1]
if len(namespace) == 0 {
return SecretResource{}, fmt.Errorf("invalid resource name %q. Expected namespace", resourceName)
}
if len(name) == 0 {
return SecretResource{}, fmt.Errorf("invalid resource name %q. Expected name", resourceName)
}
return SecretResource{ResourceType: KubernetesGatewaySecretType, Name: name, Namespace: namespace, ResourceName: resourceName, Cluster: configCluster}, nil
}
return SecretResource{}, fmt.Errorf("unknown resource type: %v", resourceName)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"k8s.io/apimachinery/pkg/types"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/visibility"
"istio.io/istio/pkg/util/sets"
)
// This function merges one or more destination rules for a given host string
// into a single destination rule. Note that it does not perform inheritance style merging.
// IOW, given three dest rules (*.foo.com, *.foo.com, *.com), calling this function for
// each config will result in a final dest rule set (*.foo.com, and *.com).
//
// The following is the merge logic:
// 1. Unique subsets (based on subset name) are concatenated to the original rule's list of subsets
// 2. If the original rule did not have any top level traffic policy, traffic policies from the new rule will be
// used.
// 3. If the original rule did not have any exportTo, exportTo settings from the new rule will be used.
func (ps *PushContext) mergeDestinationRule(p *consolidatedDestRules, destRuleConfig config.Config, exportToSet sets.Set[visibility.Instance]) {
rule := destRuleConfig.Spec.(*networking.DestinationRule)
resolvedHost := ResolveShortnameToFQDN(rule.Host, destRuleConfig.Meta)
var destRules map[host.Name][]*ConsolidatedDestRule
if resolvedHost.IsWildCarded() {
destRules = p.wildcardDestRules
} else {
destRules = p.specificDestRules
}
if mdrList, exists := destRules[resolvedHost]; exists {
// `addRuleToProcessedDestRules` determines if the incoming destination rule would become a new unique entry in the processedDestRules list.
addRuleToProcessedDestRules := true
for _, mdr := range mdrList {
existingRule := mdr.rule.Spec.(*networking.DestinationRule)
bothWithoutSelector := rule.GetWorkloadSelector() == nil && existingRule.GetWorkloadSelector() == nil
bothWithSelector := existingRule.GetWorkloadSelector() != nil && rule.GetWorkloadSelector() != nil
selectorsMatch := labels.Instance(existingRule.GetWorkloadSelector().GetMatchLabels()).Equals(rule.GetWorkloadSelector().GetMatchLabels())
if bothWithSelector && !selectorsMatch {
// If the new destination rule and the existing one has workload selectors associated with them, skip merging
// if the selectors do not match
continue
}
// If both the destination rules are without a workload selector or with matching workload selectors, simply merge them.
// If the incoming rule has a workload selector, it has to be merged with the existing rules with workload selector, and
// at the same time added as a unique entry in the processedDestRules.
if bothWithoutSelector || (rule.GetWorkloadSelector() != nil && selectorsMatch) {
addRuleToProcessedDestRules = false
}
// Deep copy destination rule, to prevent mutate it later when merge with a new one.
// This can happen when there are more than one destination rule of same host in one namespace.
copied := mdr.rule.DeepCopy()
mdr.rule = &copied
mdr.from = append(mdr.from, destRuleConfig.NamespacedName())
mergedRule := copied.Spec.(*networking.DestinationRule)
existingSubset := sets.String{}
for _, subset := range mergedRule.Subsets {
existingSubset.Insert(subset.Name)
}
// we have an another destination rule for same host.
// concatenate both of them -- essentially add subsets from one to other.
// Note: we only add the subsets and do not overwrite anything else like exportTo or top level
// traffic policies if they already exist
for _, subset := range rule.Subsets {
if !existingSubset.Contains(subset.Name) {
// if not duplicated, append
mergedRule.Subsets = append(mergedRule.Subsets, subset)
} else {
// duplicate subset
ps.AddMetric(DuplicatedSubsets, string(resolvedHost), "",
fmt.Sprintf("Duplicate subset %s found while merging destination rules for %s",
subset.Name, string(resolvedHost)))
}
}
// If there is no top level policy and the incoming rule has top level
// traffic policy, use the one from the incoming rule.
if mergedRule.TrafficPolicy == nil && rule.TrafficPolicy != nil {
mergedRule.TrafficPolicy = rule.TrafficPolicy
}
// If there is no exportTo in the existing rule and
// the incoming rule has an explicit exportTo, use the
// one from the incoming rule.
if p.exportTo[resolvedHost].IsEmpty() && !exportToSet.IsEmpty() {
p.exportTo[resolvedHost] = exportToSet
}
}
if addRuleToProcessedDestRules {
destRules[resolvedHost] = append(destRules[resolvedHost], ConvertConsolidatedDestRule(&destRuleConfig))
}
return
}
// DestinationRule does not exist for the resolved host so add it
destRules[resolvedHost] = append(destRules[resolvedHost], ConvertConsolidatedDestRule(&destRuleConfig))
p.exportTo[resolvedHost] = exportToSet
}
func ConvertConsolidatedDestRule(cfg *config.Config) *ConsolidatedDestRule {
return &ConsolidatedDestRule{
rule: cfg,
from: []types.NamespacedName{cfg.NamespacedName()},
}
}
// Equals compare l equals r consolidatedDestRule or not.
func (l *ConsolidatedDestRule) Equals(r *ConsolidatedDestRule) bool {
if l == r {
return true
}
if l == nil || r == nil {
return false
}
// compare from
if len(l.from) != len(r.from) {
return false
}
for i, v := range l.from {
if v != r.from[i] {
return false
}
}
return true
}
func (l *ConsolidatedDestRule) GetRule() *config.Config {
if l == nil {
return nil
}
return l.rule
}
func (l *ConsolidatedDestRule) GetFrom() []types.NamespacedName {
if l == nil {
return nil
}
return l.from
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"errors"
"istio.io/istio/pkg/ledger"
)
// DisabledLedger is an empty mock of the ledger.Ledger interface
// which we will substitute when distribution tracking is disabled.
type DisabledLedger struct {
ledger.Ledger
}
func (d *DisabledLedger) Put(key, value string) (string, error) {
return "", nil
}
func (d *DisabledLedger) Delete(key string) error {
return nil
}
func (d *DisabledLedger) Get(key string) (string, error) {
return "", nil
}
func (d *DisabledLedger) RootHash() string {
return ""
}
func (d *DisabledLedger) GetPreviousValue(previousHash, key string) (result string, err error) {
return "", errors.New("distribution tracking is disabled")
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"sort"
"sync"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
// shardRegistry is a simplified interface for registries that can produce a shard key
type shardRegistry interface {
Cluster() cluster.ID
Provider() provider.ID
}
// ShardKeyFromRegistry computes the shard key based on provider type and cluster id.
func ShardKeyFromRegistry(instance shardRegistry) ShardKey {
return ShardKey{Cluster: instance.Cluster(), Provider: instance.Provider()}
}
// ShardKey is the key for EndpointShards made of a key with the format "provider/cluster"
type ShardKey struct {
Cluster cluster.ID
Provider provider.ID
}
func (sk ShardKey) String() string {
return fmt.Sprintf("%s/%s", sk.Provider, sk.Cluster)
}
// MarshalText implements the TextMarshaler interface (for json key usage)
func (sk ShardKey) MarshalText() (text []byte, err error) {
return []byte(sk.String()), nil
}
// EndpointShards holds the set of endpoint shards of a service. Registries update
// individual shards incrementally. The shards are aggregated and split into
// clusters when a push for the specific cluster is needed.
type EndpointShards struct {
// mutex protecting below map.
sync.RWMutex
// Shards is used to track the shards. EDS updates are grouped by shard.
// Current implementation uses the registry name as key - in multicluster this is the
// name of the k8s cluster, derived from the config (secret).
Shards map[ShardKey][]*IstioEndpoint
// ServiceAccounts has the concatenation of all service accounts seen so far in endpoints.
// This is updated on push, based on shards. If the previous list is different than
// current list, a full push will be forced, to trigger a secure naming update.
// Due to the larger time, it is still possible that connection errors will occur while
// CDS is updated.
ServiceAccounts sets.String
}
// Keys gives a sorted list of keys for EndpointShards.Shards.
// Calls to Keys should be guarded with a lock on the EndpointShards.
func (es *EndpointShards) Keys() []ShardKey {
// len(shards) ~= number of remote clusters which isn't too large, doing this sort frequently
// shouldn't be too problematic. If it becomes an issue we can cache it in the EndpointShards struct.
keys := make([]ShardKey, 0, len(es.Shards))
for k := range es.Shards {
keys = append(keys, k)
}
if len(keys) >= 2 {
sort.Slice(keys, func(i, j int) bool {
if keys[i].Provider == keys[j].Provider {
return keys[i].Cluster < keys[j].Cluster
}
return keys[i].Provider < keys[j].Provider
})
}
return keys
}
// CopyEndpoints takes a snapshot of all endpoints. As input, it takes a map of port name to number, to allow it to group
// the results by service port number. This is a bit weird, but lets us efficiently construct the format the caller needs.
func (es *EndpointShards) CopyEndpoints(portMap map[string]int) map[int][]*IstioEndpoint {
es.RLock()
defer es.RUnlock()
res := map[int][]*IstioEndpoint{}
for _, v := range es.Shards {
for _, ep := range v {
portNum, f := portMap[ep.ServicePortName]
if !f {
continue
}
res[portNum] = append(res[portNum], ep)
}
}
return res
}
func (es *EndpointShards) DeepCopy() *EndpointShards {
es.RLock()
defer es.RUnlock()
res := &EndpointShards{
Shards: make(map[ShardKey][]*IstioEndpoint, len(es.Shards)),
ServiceAccounts: es.ServiceAccounts.Copy(),
}
for k, v := range es.Shards {
res.Shards[k] = make([]*IstioEndpoint, 0, len(v))
for _, ep := range v {
res.Shards[k] = append(res.Shards[k], ep.DeepCopy())
}
}
return res
}
// EndpointIndex is a mutex protected index of endpoint shards
type EndpointIndex struct {
mu sync.RWMutex
// keyed by svc then ns
shardsBySvc map[string]map[string]*EndpointShards
// We'll need to clear the cache in-sync with endpoint shards modifications.
cache XdsCache
}
func NewEndpointIndex(cache XdsCache) *EndpointIndex {
return &EndpointIndex{
shardsBySvc: make(map[string]map[string]*EndpointShards),
cache: cache,
}
}
// must be called with lock
func (e *EndpointIndex) clearCacheForService(svc, ns string) {
e.cache.Clear(sets.Set[ConfigKey]{{
Kind: kind.ServiceEntry,
Name: svc,
Namespace: ns,
}: {}})
}
// Shardz returns a full deep copy of the global map of shards. This should be used only for testing
// and debugging, as the cloning is expensive.
func (e *EndpointIndex) Shardz() map[string]map[string]*EndpointShards {
e.mu.RLock()
defer e.mu.RUnlock()
out := make(map[string]map[string]*EndpointShards, len(e.shardsBySvc))
for svcKey, v := range e.shardsBySvc {
out[svcKey] = make(map[string]*EndpointShards, len(v))
for nsKey, v := range v {
out[svcKey][nsKey] = v.DeepCopy()
}
}
return out
}
// ShardsForService returns the shards and true if they are found, or returns nil, false.
func (e *EndpointIndex) ShardsForService(serviceName, namespace string) (*EndpointShards, bool) {
e.mu.RLock()
defer e.mu.RUnlock()
byNs, ok := e.shardsBySvc[serviceName]
if !ok {
return nil, false
}
shards, ok := byNs[namespace]
return shards, ok
}
// GetOrCreateEndpointShard returns the shards. The second return parameter will be true if this service was seen
// for the first time.
func (e *EndpointIndex) GetOrCreateEndpointShard(serviceName, namespace string) (*EndpointShards, bool) {
e.mu.Lock()
defer e.mu.Unlock()
if _, exists := e.shardsBySvc[serviceName]; !exists {
e.shardsBySvc[serviceName] = map[string]*EndpointShards{}
}
if ep, exists := e.shardsBySvc[serviceName][namespace]; exists {
return ep, false
}
// This endpoint is for a service that was not previously loaded.
ep := &EndpointShards{
Shards: map[ShardKey][]*IstioEndpoint{},
ServiceAccounts: sets.String{},
}
e.shardsBySvc[serviceName][namespace] = ep
// Clear the cache here to avoid race in cache writes.
e.clearCacheForService(serviceName, namespace)
return ep, true
}
func (e *EndpointIndex) DeleteServiceShard(shard ShardKey, serviceName, namespace string, preserveKeys bool) {
e.mu.Lock()
defer e.mu.Unlock()
e.deleteServiceInner(shard, serviceName, namespace, preserveKeys)
}
func (e *EndpointIndex) DeleteShard(shardKey ShardKey) {
e.mu.Lock()
defer e.mu.Unlock()
for svc, shardsByNamespace := range e.shardsBySvc {
for ns := range shardsByNamespace {
e.deleteServiceInner(shardKey, svc, ns, false)
}
}
if e.cache == nil {
return
}
e.cache.ClearAll()
}
// must be called with lock
func (e *EndpointIndex) deleteServiceInner(shard ShardKey, serviceName, namespace string, preserveKeys bool) {
if e.shardsBySvc[serviceName] == nil ||
e.shardsBySvc[serviceName][namespace] == nil {
return
}
epShards := e.shardsBySvc[serviceName][namespace]
epShards.Lock()
delete(epShards.Shards, shard)
// Clear the cache here to avoid race in cache writes.
e.clearCacheForService(serviceName, namespace)
if !preserveKeys {
if len(epShards.Shards) == 0 {
delete(e.shardsBySvc[serviceName], namespace)
}
if len(e.shardsBySvc[serviceName]) == 0 {
delete(e.shardsBySvc, serviceName)
}
}
epShards.Unlock()
}
// PushType is an enumeration that decides what type push we should do when we get EDS update.
type PushType int
const (
// NoPush does not push any thing.
NoPush PushType = iota
// IncrementalPush just pushes endpoints.
IncrementalPush
// FullPush triggers full push - typically used for new services.
FullPush
)
// UpdateServiceEndpoints updates EndpointShards data by clusterID, hostname, IstioEndpoints.
// It also tracks the changes to ServiceAccounts. It returns whether endpoints need to be pushed and
// it also returns if they need to be pushed whether a full push is needed or incremental push is sufficient.
func (e *EndpointIndex) UpdateServiceEndpoints(
shard ShardKey,
hostname string,
namespace string,
istioEndpoints []*IstioEndpoint,
) PushType {
if len(istioEndpoints) == 0 {
// Should delete the service EndpointShards when endpoints become zero to prevent memory leak,
// but we should not delete the keys from EndpointIndex map - that will trigger
// unnecessary full push which can become a real problem if a pod is in crashloop and thus endpoints
// flip flopping between 1 and 0.
e.DeleteServiceShard(shard, hostname, namespace, true)
log.Infof("Incremental push, service %s at shard %v has no endpoints", hostname, shard)
return IncrementalPush
}
pushType := IncrementalPush
// Find endpoint shard for this service, if it is available - otherwise create a new one.
ep, created := e.GetOrCreateEndpointShard(hostname, namespace)
// If we create a new endpoint shard, that means we have not seen the service earlier. We should do a full push.
if created {
log.Infof("Full push, new service %s/%s", namespace, hostname)
pushType = FullPush
}
ep.Lock()
defer ep.Unlock()
newIstioEndpoints := istioEndpoints
oldIstioEndpoints := ep.Shards[shard]
needPush := false
if oldIstioEndpoints == nil {
// If there are no old endpoints, we should push with incoming endpoints as there is nothing to compare.
needPush = true
} else {
newIstioEndpoints = make([]*IstioEndpoint, 0, len(istioEndpoints))
// Check if new Endpoints are ready to be pushed. This check
// will ensure that if a new pod comes with a non ready endpoint,
// we do not unnecessarily push that config to Envoy.
// Please note that address is not a unique key. So this may not accurately
// identify based on health status and push too many times - which is ok since its an optimization.
emap := make(map[string]*IstioEndpoint, len(oldIstioEndpoints))
nmap := make(map[string]*IstioEndpoint, len(newIstioEndpoints))
// Add new endpoints only if they are ever ready once to shards
// so that full push does not send them from shards.
for _, oie := range oldIstioEndpoints {
emap[oie.Address] = oie
}
for _, nie := range istioEndpoints {
nmap[nie.Address] = nie
}
for _, nie := range istioEndpoints {
if oie, exists := emap[nie.Address]; exists {
// If endpoint exists already, we should push if it's health status changes.
if oie.HealthStatus != nie.HealthStatus {
needPush = true
}
newIstioEndpoints = append(newIstioEndpoints, nie)
} else {
// If the endpoint does not exist in shards that means it is a
// new endpoint. Always send new healthy endpoints.
// Also send new unhealthy endpoints when SendUnhealthyEndpoints is enabled.
// This is OK since we disable panic threshold when SendUnhealthyEndpoints is enabled.
if nie.HealthStatus != UnHealthy || features.SendUnhealthyEndpoints.Load() {
needPush = true
}
newIstioEndpoints = append(newIstioEndpoints, nie)
}
}
// Next, check for endpoints that were in old but no longer exist. If there are any, there is a
// removal so we need to push an update.
for _, oie := range oldIstioEndpoints {
if _, f := nmap[oie.Address]; !f {
needPush = true
}
}
}
if pushType != FullPush && !needPush {
log.Debugf("No push, either old endpoint health status did not change or new endpoint came with unhealthy status, %v", hostname)
pushType = NoPush
}
ep.Shards[shard] = newIstioEndpoints
// Check if ServiceAccounts have changed. We should do a full push if they have changed.
saUpdated := updateShardServiceAccount(ep, hostname)
// For existing endpoints, we need to do full push if service accounts change.
if saUpdated && pushType != FullPush {
// Avoid extra logging if already a full push
log.Infof("Full push, service accounts changed, %v", hostname)
pushType = FullPush
}
// Clear the cache here. While it would likely be cleared later when we trigger a push, a race
// condition is introduced where an XDS response may be generated before the update, but not
// completed until after a response after the update. Essentially, we transition from v0 -> v1 ->
// v0 -> invalidate -> v1. Reverting a change we pushed violates our contract of monotonically
// moving forward in version. In practice, this is pretty rare and self corrects nearly
// immediately. However, clearing the cache here has almost no impact on cache performance as we
// would clear it shortly after anyways.
e.clearCacheForService(hostname, namespace)
return pushType
}
// updateShardServiceAccount updates the service endpoints' sa when service/endpoint event happens.
// Note: it is not concurrent safe.
func updateShardServiceAccount(shards *EndpointShards, serviceName string) bool {
oldServiceAccount := shards.ServiceAccounts
serviceAccounts := sets.String{}
for _, epShards := range shards.Shards {
for _, ep := range epShards {
if ep.ServiceAccount != "" {
serviceAccounts.Insert(ep.ServiceAccount)
}
}
}
if !oldServiceAccount.Equals(serviceAccounts) {
shards.ServiceAccounts = serviceAccounts
log.Debugf("Updating service accounts now, svc %v, before service account %v, after %v",
serviceName, oldServiceAccount, serviceAccounts)
return true
}
return false
}
// EndpointIndexUpdater is an updater that will keep an EndpointIndex in sync. This is intended for tests only.
type EndpointIndexUpdater struct {
Index *EndpointIndex
}
var _ XDSUpdater = &EndpointIndexUpdater{}
func NewEndpointIndexUpdater(ei *EndpointIndex) *EndpointIndexUpdater {
return &EndpointIndexUpdater{Index: ei}
}
func (f *EndpointIndexUpdater) ConfigUpdate(*PushRequest) {}
func (f *EndpointIndexUpdater) EDSUpdate(shard ShardKey, serviceName string, namespace string, eps []*IstioEndpoint) {
f.Index.UpdateServiceEndpoints(shard, serviceName, namespace, eps)
}
func (f *EndpointIndexUpdater) EDSCacheUpdate(shard ShardKey, serviceName string, namespace string, eps []*IstioEndpoint) {
f.Index.UpdateServiceEndpoints(shard, serviceName, namespace, eps)
}
func (f *EndpointIndexUpdater) SvcUpdate(shard ShardKey, hostname string, namespace string, event Event) {
if event == EventDelete {
f.Index.DeleteServiceShard(shard, hostname, namespace, false)
}
}
func (f *EndpointIndexUpdater) ProxyUpdate(_ cluster.ID, _ string) {}
func (f *EndpointIndexUpdater) RemoveShard(shardKey ShardKey) {
f.Index.DeleteShard(shardKey)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"regexp"
"strings"
"google.golang.org/protobuf/proto"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/xds"
"istio.io/istio/pkg/util/sets"
)
// EnvoyFilterWrapper is a wrapper for the EnvoyFilter api object with pre-processed data
type EnvoyFilterWrapper struct {
Name string
Namespace string
workloadSelector labels.Instance
Patches map[networking.EnvoyFilter_ApplyTo][]*EnvoyFilterConfigPatchWrapper
}
// EnvoyFilterConfigPatchWrapper is a wrapper over the EnvoyFilter ConfigPatch api object
// fields are ordered such that this struct is aligned
type EnvoyFilterConfigPatchWrapper struct {
Value proto.Message
Match *networking.EnvoyFilter_EnvoyConfigObjectMatch
ApplyTo networking.EnvoyFilter_ApplyTo
Operation networking.EnvoyFilter_Patch_Operation
// Pre-compile the regex from proxy version match in the match
ProxyVersionRegex *regexp.Regexp
// ProxyPrefixMatch provides a prefix match for the proxy version. The current API only allows
// regex match, but as an optimization we can reduce this to a prefix match for common cases.
// If this is set, ProxyVersionRegex is ignored.
ProxyPrefixMatch string
Name string
Namespace string
FullName string
}
// wellKnownVersions defines a mapping of well known regex matches to prefix matches
// This is done only as an optimization; behavior should remain the same
// All versions specified by the default installation (Telemetry V2) should be added here.
var wellKnownVersions = map[string]string{
`^1\.16.*`: "1.16",
`^1\.17.*`: "1.17",
`^1\.18.*`: "1.18",
`^1\.19.*`: "1.19",
`^1\.20.*`: "1.20",
`^1\.21.*`: "1.21",
`^1\.22.*`: "1.22",
`^1\.23.*`: "1.23",
// Hopefully we have a better API by 1.23. If not, add it here
}
// convertToEnvoyFilterWrapper converts from EnvoyFilter config to EnvoyFilterWrapper object
func convertToEnvoyFilterWrapper(local *config.Config) *EnvoyFilterWrapper {
localEnvoyFilter := local.Spec.(*networking.EnvoyFilter)
out := &EnvoyFilterWrapper{Name: local.Name, Namespace: local.Namespace}
if localEnvoyFilter.WorkloadSelector != nil {
out.workloadSelector = localEnvoyFilter.WorkloadSelector.Labels
}
out.Patches = make(map[networking.EnvoyFilter_ApplyTo][]*EnvoyFilterConfigPatchWrapper)
for _, cp := range localEnvoyFilter.ConfigPatches {
if cp.Patch == nil {
// Should be caught by validation, but sometimes its disabled and we don't want to crash
// as a result.
log.Debugf("envoyfilter %v/%v discarded due to missing patch", local.Namespace, local.Name)
continue
}
cpw := &EnvoyFilterConfigPatchWrapper{
Name: local.Name,
Namespace: local.Namespace,
FullName: local.Namespace + "/" + local.Name,
ApplyTo: cp.ApplyTo,
Match: cp.Match,
Operation: cp.Patch.Operation,
}
var err error
// Use non-strict building to avoid issues where EnvoyFilter is valid but meant
// for a different version of the API than we are built with
cpw.Value, err = xds.BuildXDSObjectFromStruct(cp.ApplyTo, cp.Patch.Value, false)
// There generally won't be an error here because validation catches mismatched types
// Should only happen in tests or without validation
if err != nil {
log.Errorf("failed to build envoy filter value: %v", err)
continue
}
if cp.Match == nil {
// create a match all object
cpw.Match = &networking.EnvoyFilter_EnvoyConfigObjectMatch{Context: networking.EnvoyFilter_ANY}
} else if cp.Match.Proxy != nil && cp.Match.Proxy.ProxyVersion != "" {
// Attempt to convert regex to a simple prefix match for the common case of matching
// a standard Istio version. This field should likely be replaced with semver, but for now
// we can workaround the performance impact of regex
if prefix, f := wellKnownVersions[cp.Match.Proxy.ProxyVersion]; f {
cpw.ProxyPrefixMatch = prefix
} else {
// pre-compile the regex for proxy version if it exists
// ignore the error because validation catches invalid regular expressions.
cpw.ProxyVersionRegex, _ = regexp.Compile(cp.Match.Proxy.ProxyVersion)
}
}
if _, exists := out.Patches[cp.ApplyTo]; !exists {
out.Patches[cp.ApplyTo] = make([]*EnvoyFilterConfigPatchWrapper, 0)
}
if cpw.Operation == networking.EnvoyFilter_Patch_INSERT_AFTER ||
cpw.Operation == networking.EnvoyFilter_Patch_INSERT_BEFORE ||
cpw.Operation == networking.EnvoyFilter_Patch_INSERT_FIRST {
// insert_before, after or first is applicable for listener filter, network filter,
// http filter and http route, convert the rest to add
if cpw.ApplyTo != networking.EnvoyFilter_HTTP_FILTER &&
cpw.ApplyTo != networking.EnvoyFilter_NETWORK_FILTER &&
cpw.ApplyTo != networking.EnvoyFilter_HTTP_ROUTE &&
cpw.ApplyTo != networking.EnvoyFilter_LISTENER_FILTER {
cpw.Operation = networking.EnvoyFilter_Patch_ADD
}
}
out.Patches[cp.ApplyTo] = append(out.Patches[cp.ApplyTo], cpw)
}
return out
}
func proxyMatch(proxy *Proxy, cp *EnvoyFilterConfigPatchWrapper) bool {
if cp.Match.Proxy == nil {
return true
}
if cp.ProxyPrefixMatch != "" {
if !strings.HasPrefix(proxy.Metadata.IstioVersion, cp.ProxyPrefixMatch) {
return false
}
}
if cp.ProxyVersionRegex != nil {
ver := proxy.Metadata.IstioVersion
if ver == "" {
// we do not have a proxy version but the user has a regex. so this is a mismatch
return false
}
if !cp.ProxyVersionRegex.MatchString(ver) {
return false
}
}
for k, v := range cp.Match.Proxy.Metadata {
if proxy.Metadata.Raw[k] != v {
return false
}
}
return true
}
// Returns the keys of all the wrapped envoyfilters.
func (efw *EnvoyFilterWrapper) Keys() []string {
if efw == nil {
return nil
}
keys := sets.String{}
for _, patches := range efw.Patches {
for _, patch := range patches {
keys.Insert(patch.Key())
}
}
return sets.SortedList(keys)
}
// Returns the keys of all the wrapped envoyfilters.
func (efw *EnvoyFilterWrapper) KeysApplyingTo(applyTo ...networking.EnvoyFilter_ApplyTo) []string {
if efw == nil {
return nil
}
keys := sets.String{}
for _, a := range applyTo {
for _, patch := range efw.Patches[a] {
keys.Insert(patch.Key())
}
}
return sets.SortedList(keys)
}
func (cpw *EnvoyFilterConfigPatchWrapper) Key() string {
if cpw == nil {
return ""
}
return cpw.FullName
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"net/url"
"strings"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
httpwasm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/wasm/v3"
networkwasm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/wasm/v3"
wasmextensions "github.com/envoyproxy/go-control-plane/envoy/extensions/wasm/v3"
anypb "google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/wrapperspb"
extensions "istio.io/api/extensions/v1alpha1"
typeapi "istio.io/api/type/v1beta1"
"istio.io/istio/pilot/pkg/model/credentials"
istionetworking "istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/util/protomarshal"
)
const (
defaultRuntime = "envoy.wasm.runtime.v8"
fileScheme = "file"
ociScheme = "oci"
// name of environment variable at Wasm VM, which will carry the Wasm image pull secret.
WasmSecretEnv = "ISTIO_META_WASM_IMAGE_PULL_SECRET"
// name of environment variable at Wasm VM, which will carry the Wasm image pull policy.
WasmPolicyEnv = "ISTIO_META_WASM_IMAGE_PULL_POLICY"
// name of environment variable at Wasm VM, which will carry the resource version of WasmPlugin.
WasmResourceVersionEnv = "ISTIO_META_WASM_PLUGIN_RESOURCE_VERSION"
// WasmPluginResourceNamePrefix is the prefix of the resource name of WasmPlugin,
// preventing the name collision with other resources.
WasmPluginResourceNamePrefix = "extenstions.istio.io/wasmplugin/"
)
// WasmPluginType defines the type of wasm plugin
type WasmPluginType int
const (
WasmPluginTypeHTTP WasmPluginType = iota
WasmPluginTypeNetwork
WasmPluginTypeAny
)
func fromPluginType(pluginType extensions.PluginType) WasmPluginType {
switch pluginType {
case extensions.PluginType_HTTP:
return WasmPluginTypeHTTP
case extensions.PluginType_NETWORK:
return WasmPluginTypeNetwork
case extensions.PluginType_UNSPECIFIED_PLUGIN_TYPE:
return WasmPluginTypeHTTP // Return HTTP as default for backward compatibility.
}
return WasmPluginTypeHTTP
}
func workloadModeForListenerClass(class istionetworking.ListenerClass) typeapi.WorkloadMode {
switch class {
case istionetworking.ListenerClassGateway:
return typeapi.WorkloadMode_CLIENT
case istionetworking.ListenerClassSidecarInbound:
return typeapi.WorkloadMode_SERVER
case istionetworking.ListenerClassSidecarOutbound:
return typeapi.WorkloadMode_CLIENT
case istionetworking.ListenerClassUndefined:
// this should not happen, just in case
return typeapi.WorkloadMode_CLIENT
}
return typeapi.WorkloadMode_CLIENT
}
type WasmPluginWrapper struct {
*extensions.WasmPlugin
Name string
Namespace string
ResourceName string
ResourceVersion string
}
func (p *WasmPluginWrapper) MatchListener(opts WorkloadSelectionOpts, li WasmPluginListenerInfo) bool {
switch getPolicyMatcher(gvk.WasmPlugin, p.Name, opts, p) {
case policyMatchDirect:
// This plugin is bound directly to this workload; just check traffic selectors
return matchTrafficSelectors(p.Match, li)
case policyMatchSelector:
// This plugin is bound based on the workload selector; check traffic selectors and workload selector
workloadMatch := (p.Selector == nil || labels.Instance(p.Selector.MatchLabels).SubsetOf(opts.WorkloadLabels))
return workloadMatch && matchTrafficSelectors(p.Match, li)
}
// If it doesn't match one of the above cases, the plugin is not bound to this workload
return false
}
func (p *WasmPluginWrapper) MatchType(pluginType WasmPluginType) bool {
return pluginType == WasmPluginTypeAny || pluginType == fromPluginType(p.WasmPlugin.Type)
}
func (p *WasmPluginWrapper) BuildHTTPWasmFilter() *httpwasm.Wasm {
if !(p.Type == extensions.PluginType_HTTP || p.Type == extensions.PluginType_UNSPECIFIED_PLUGIN_TYPE) {
return nil
}
return &httpwasm.Wasm{
Config: p.buildPluginConfig(),
}
}
func (p *WasmPluginWrapper) BuildNetworkWasmFilter() *networkwasm.Wasm {
if p.Type != extensions.PluginType_NETWORK {
return nil
}
return &networkwasm.Wasm{
Config: p.buildPluginConfig(),
}
}
func (p *WasmPluginWrapper) buildPluginConfig() *wasmextensions.PluginConfig {
cfg := &anypb.Any{}
plugin := p.WasmPlugin
if plugin.PluginConfig != nil && len(plugin.PluginConfig.Fields) > 0 {
cfgJSON, err := protomarshal.ToJSON(plugin.PluginConfig)
if err != nil {
log.Warnf("wasmplugin %v/%v discarded due to json marshaling error: %s", p.Namespace, p.Name, err)
return nil
}
cfg = protoconv.MessageToAny(&wrapperspb.StringValue{
Value: cfgJSON,
})
}
u, err := url.Parse(plugin.Url)
if err != nil {
log.Warnf("wasmplugin %v/%v discarded due to failure to parse URL: %s", p.Namespace, p.Name, err)
return nil
}
// when no scheme is given, default to oci://
if u.Scheme == "" {
u.Scheme = ociScheme
}
datasource := buildDataSource(u, plugin)
resourceName := p.Namespace + "." + p.Name
return &wasmextensions.PluginConfig{
Name: resourceName,
RootId: plugin.PluginName,
Configuration: cfg,
Vm: buildVMConfig(datasource, p.ResourceVersion, plugin),
FailOpen: plugin.FailStrategy == extensions.FailStrategy_FAIL_OPEN,
}
}
type WasmPluginListenerInfo struct {
Port int
Class istionetworking.ListenerClass
}
// If anyListener is used as a listener info,
// the listener is matched with any TrafficSelector.
var anyListener = WasmPluginListenerInfo{
Port: 0,
Class: istionetworking.ListenerClassUndefined,
}
func matchTrafficSelectors(ts []*extensions.WasmPlugin_TrafficSelector, li WasmPluginListenerInfo) bool {
if (li.Class == istionetworking.ListenerClassUndefined && li.Port == 0) || len(ts) == 0 {
return true
}
for _, match := range ts {
if matchMode(match.Mode, li.Class) && matchPorts(match.Ports, li.Port) {
return true
}
}
return false
}
func matchMode(workloadMode typeapi.WorkloadMode, class istionetworking.ListenerClass) bool {
switch workloadMode {
case typeapi.WorkloadMode_CLIENT_AND_SERVER, typeapi.WorkloadMode_UNDEFINED:
return true
default:
return workloadMode == workloadModeForListenerClass(class)
}
}
func matchPorts(portSelectors []*typeapi.PortSelector, port int) bool {
if len(portSelectors) == 0 {
// If there is no specified port, match with all the ports.
return true
}
for _, ps := range portSelectors {
if ps.GetNumber() != 0 && ps.GetNumber() == uint32(port) {
return true
}
}
return false
}
func convertToWasmPluginWrapper(originPlugin config.Config) *WasmPluginWrapper {
var ok bool
// Make a deep copy since we are going to mutate the resource later for secret env variable.
// We do not want to mutate the underlying resource at informer cache.
plugin := originPlugin.DeepCopy()
var wasmPlugin *extensions.WasmPlugin
if wasmPlugin, ok = plugin.Spec.(*extensions.WasmPlugin); !ok {
return nil
}
if wasmPlugin.PluginConfig != nil && len(wasmPlugin.PluginConfig.Fields) > 0 {
_, err := protomarshal.ToJSON(wasmPlugin.PluginConfig)
if err != nil {
log.Warnf("wasmplugin %v/%v discarded due to json marshaling error: %s", plugin.Namespace, plugin.Name, err)
return nil
}
}
u, err := url.Parse(wasmPlugin.Url)
if err != nil {
log.Warnf("wasmplugin %v/%v discarded due to failure to parse URL: %s", plugin.Namespace, plugin.Name, err)
return nil
}
// when no scheme is given, default to oci://
if u.Scheme == "" {
u.Scheme = ociScheme
}
// Normalize the image pull secret to the full resource name.
wasmPlugin.ImagePullSecret = toSecretResourceName(wasmPlugin.ImagePullSecret, plugin.Namespace)
return &WasmPluginWrapper{
Name: plugin.Name,
Namespace: plugin.Namespace,
ResourceName: WasmPluginResourceNamePrefix + plugin.Namespace + "." + plugin.Name,
WasmPlugin: wasmPlugin,
ResourceVersion: plugin.ResourceVersion,
}
}
// toSecretResourceName converts a imagePullSecret to a resource name referenced at Wasm SDS.
// NOTE: the secret referenced by WasmPlugin has to be in the same namespace as the WasmPlugin,
// so this function makes sure that the secret resource name, which will be used to retrieve secret at
// xds generation time, has the same namespace as the WasmPlugin.
func toSecretResourceName(name, pluginNamespace string) string {
if name == "" {
return ""
}
// Convert user provided secret name to secret resource name.
rn := credentials.ToResourceName(name)
// Parse the secret resource name.
sr, err := credentials.ParseResourceName(rn, pluginNamespace, "", "")
if err != nil {
log.Debugf("Failed to parse wasm secret resource name %v", err)
return ""
}
// Forcely rewrite secret namespace to plugin namespace, since we require secret resource
// referenced by WasmPlugin co-located with WasmPlugin in the same namespace.
sr.Namespace = pluginNamespace
return sr.KubernetesResourceName()
}
func buildDataSource(u *url.URL, wasmPlugin *extensions.WasmPlugin) *core.AsyncDataSource {
if u.Scheme == fileScheme {
return &core.AsyncDataSource{
Specifier: &core.AsyncDataSource_Local{
Local: &core.DataSource{
Specifier: &core.DataSource_Filename{
Filename: strings.TrimPrefix(wasmPlugin.Url, "file://"),
},
},
},
}
}
return &core.AsyncDataSource{
Specifier: &core.AsyncDataSource_Remote{
Remote: &core.RemoteDataSource{
HttpUri: &core.HttpUri{
Uri: u.String(),
Timeout: durationpb.New(30 * time.Second), // TODO: make this configurable?
HttpUpstreamType: &core.HttpUri_Cluster{
// the agent will fetch this anyway, so no need for a cluster
Cluster: "_",
},
},
Sha256: wasmPlugin.Sha256,
},
},
}
}
func buildVMConfig(
datasource *core.AsyncDataSource,
resourceVersion string,
wasmPlugin *extensions.WasmPlugin,
) *wasmextensions.PluginConfig_VmConfig {
cfg := &wasmextensions.PluginConfig_VmConfig{
VmConfig: &wasmextensions.VmConfig{
Runtime: defaultRuntime,
Code: datasource,
EnvironmentVariables: &wasmextensions.EnvironmentVariables{
KeyValues: map[string]string{},
},
},
}
if wasmPlugin.ImagePullSecret != "" {
cfg.VmConfig.EnvironmentVariables.KeyValues[WasmSecretEnv] = wasmPlugin.ImagePullSecret
}
if wasmPlugin.ImagePullPolicy != extensions.PullPolicy_UNSPECIFIED_POLICY {
cfg.VmConfig.EnvironmentVariables.KeyValues[WasmPolicyEnv] = wasmPlugin.ImagePullPolicy.String()
}
cfg.VmConfig.EnvironmentVariables.KeyValues[WasmResourceVersionEnv] = resourceVersion
vm := wasmPlugin.VmConfig
if vm != nil && len(vm.Env) != 0 {
hostEnvKeys := make([]string, 0, len(vm.Env))
for _, e := range vm.Env {
switch e.ValueFrom {
case extensions.EnvValueSource_INLINE:
cfg.VmConfig.EnvironmentVariables.KeyValues[e.Name] = e.Value
case extensions.EnvValueSource_HOST:
hostEnvKeys = append(hostEnvKeys, e.Name)
}
}
cfg.VmConfig.EnvironmentVariables.HostEnvKeys = hostEnvKeys
}
return cfg
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"errors"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/maps"
)
type FakeStore struct {
store map[config.GroupVersionKind]map[string]map[string]config.Config
}
func NewFakeStore() *FakeStore {
f := FakeStore{
store: make(map[config.GroupVersionKind]map[string]map[string]config.Config),
}
return &f
}
var _ ConfigStore = (*FakeStore)(nil)
func (s *FakeStore) Schemas() collection.Schemas {
return collections.Pilot
}
func (s *FakeStore) Get(typ config.GroupVersionKind, name, namespace string) *config.Config {
nsConfigs := s.store[typ]
if nsConfigs == nil {
return nil
}
configs := nsConfigs[namespace]
if configs == nil {
return nil
}
if config, f := configs[name]; f {
return &config
}
return nil
}
func (s *FakeStore) List(typ config.GroupVersionKind, namespace string) []config.Config {
nsConfigs := s.store[typ]
if nsConfigs == nil {
return nil
}
var res []config.Config
if namespace == NamespaceAll {
for _, configs := range nsConfigs {
for _, cfg := range configs {
res = append(res, cfg)
}
}
return res
}
return maps.Values(nsConfigs[namespace])
}
func (s *FakeStore) Create(cfg config.Config) (revision string, err error) {
nsConfigs := s.store[cfg.GroupVersionKind]
if nsConfigs == nil {
nsConfigs = make(map[string]map[string]config.Config)
s.store[cfg.GroupVersionKind] = nsConfigs
}
configs := nsConfigs[cfg.Namespace]
if configs == nil {
configs = make(map[string]config.Config)
nsConfigs[cfg.Namespace] = configs
}
configs[cfg.Name] = cfg
return "", nil
}
func (s *FakeStore) Update(cfg config.Config) (newRevision string, err error) {
nsConfigs := s.store[cfg.GroupVersionKind]
if nsConfigs != nil {
configs := nsConfigs[cfg.Namespace]
if configs != nil {
if _, f := configs[cfg.Name]; f {
configs[cfg.Name] = cfg
return "", nil
}
}
}
return "", errors.New("config not found")
}
func (*FakeStore) UpdateStatus(config config.Config) (string, error) { return "", nil }
func (*FakeStore) Patch(orig config.Config, patchFn config.PatchFunc) (string, error) {
return "", nil
}
func (s *FakeStore) Delete(typ config.GroupVersionKind, name, namespace string, rv *string) error {
nsConfigs := s.store[typ]
if nsConfigs == nil {
return nil
}
configs := nsConfigs[namespace]
if configs == nil {
return nil
}
delete(configs, name)
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strconv"
"strings"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model/credentials"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/gateway"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/monitoring"
"istio.io/istio/pkg/util/sets"
)
// ServerPort defines port for the gateway server.
type ServerPort struct {
// A valid non-negative integer port number.
Number uint32
// The protocol exposed on the port.
Protocol string
// The bind server specified on this port.
Bind string
}
// MergedServers describes set of servers defined in all gateways per port.
type MergedServers struct {
Servers []*networking.Server
RouteName string // RouteName for http servers. For HTTPS, TLSServerInfo will hold the route name.
}
// TLSServerInfo contains additional information for TLS Servers.
type TLSServerInfo struct {
RouteName string
SNIHosts []string
}
// MergedGateway describes a set of gateways for a workload merged into a single logical gateway.
type MergedGateway struct {
// ServerPorts maintains a list of unique server ports, used for stable ordering.
ServerPorts []ServerPort
// MergedServers map from physical port to virtual servers
// using TCP protocols (like HTTP1.1, H2, mysql, redis etc)
MergedServers map[ServerPort]*MergedServers
// MergedQUICTransportServers map from physical port to servers listening
// on QUIC (like HTTP3). Currently the support is experimental and
// is limited to HTTP3 only
MergedQUICTransportServers map[ServerPort]*MergedServers
// HTTP3AdvertisingRoutes represents the set of HTTP routes which advertise HTTP/3.
// This mapping is used to generate alt-svc header that is needed for HTTP/3 server discovery.
HTTP3AdvertisingRoutes sets.String
// GatewayNameForServer maps from server to the owning gateway name.
// Used for select the set of virtual services that apply to a port.
GatewayNameForServer map[*networking.Server]string
// ServersByRouteName maps from port names to virtual hosts
// Used for RDS. No two port names share same port except for HTTPS
// The typical length of the value is always 1, except for HTTP (not HTTPS),
ServersByRouteName map[string][]*networking.Server
// TLSServerInfo maps from server to a corresponding TLS information like TLS Routename and SNIHosts.
TLSServerInfo map[*networking.Server]*TLSServerInfo
// ContainsAutoPassthroughGateways determines if there are any type AUTO_PASSTHROUGH Gateways, requiring additional
// clusters to be sent to the workload
ContainsAutoPassthroughGateways bool
// PortMap defines a mapping of targetPorts to the set of Service ports that reference them
PortMap GatewayPortMap
// VerifiedCertificateReferences contains a set of all credentialNames referenced by gateways *in the same namespace as the proxy*.
// These are considered "verified", since there is mutually agreement from the pod, Secret, and Gateway, as all
// reside in the same namespace and trust boundary.
// Note: Secrets that are not referenced by any Gateway, but are in the same namespace as the pod, are explicitly *not*
// included. This ensures we don't give permission to unexpected secrets, such as the citadel root key/cert.
VerifiedCertificateReferences sets.String
}
var (
typeTag = monitoring.CreateLabel("type")
nameTag = monitoring.CreateLabel("name")
totalRejectedConfigs = monitoring.NewSum(
"pilot_total_rejected_configs",
"Total number of configs that Pilot had to reject or ignore.",
)
)
func RecordRejectedConfig(gatewayName string) {
totalRejectedConfigs.With(typeTag.Value("gateway"), nameTag.Value(gatewayName)).Increment()
}
// DisableGatewayPortTranslationLabel is a label on Service that declares that, for that particular
// service, we should not translate Gateway ports to target ports. For example, if I have a Service
// on port 80 with target port 8080, with the label. Gateways on port 80 would *not* match. Instead,
// only Gateways on port 8080 would be used. This prevents ambiguities when there are multiple
// Services on port 80 referring to different target ports. Long term, this will be replaced by
// Gateways directly referencing a Service, rather than label selectors. Warning: this label is
// intended solely for as a workaround for Knative's Istio integration, and not intended for any
// other usage. It can, and will, be removed immediately after the new direct reference is ready for
// use.
const DisableGatewayPortTranslationLabel = "experimental.istio.io/disable-gateway-port-translation"
// MergeGateways combines multiple gateways targeting the same workload into a single logical Gateway.
// Note that today any Servers in the combined gateways listening on the same port must have the same protocol.
// If servers with different protocols attempt to listen on the same port, one of the protocols will be chosen at random.
func MergeGateways(gateways []gatewayWithInstances, proxy *Proxy, ps *PushContext) *MergedGateway {
gatewayPorts := sets.New[uint32]()
nonPlainTextGatewayPortsBindMap := map[uint32]sets.String{}
mergedServers := make(map[ServerPort]*MergedServers)
mergedQUICServers := make(map[ServerPort]*MergedServers)
serverPorts := make([]ServerPort, 0)
plainTextServers := make(map[uint32]ServerPort)
serversByRouteName := make(map[string][]*networking.Server)
tlsServerInfo := make(map[*networking.Server]*TLSServerInfo)
gatewayNameForServer := make(map[*networking.Server]string)
verifiedCertificateReferences := sets.New[string]()
http3AdvertisingRoutes := sets.New[string]()
tlsHostsByPort := map[uint32]map[string]string{} // port -> host/bind map
autoPassthrough := false
log.Debugf("MergeGateways: merging %d gateways", len(gateways))
for _, gwAndInstance := range gateways {
gatewayConfig := gwAndInstance.gateway
gatewayName := gatewayConfig.Namespace + "/" + gatewayConfig.Name // Format: %s/%s
gatewayCfg := gatewayConfig.Spec.(*networking.Gateway)
log.Debugf("MergeGateways: merging gateway %q :\n%v", gatewayName, gatewayCfg)
snames := sets.String{}
for _, s := range gatewayCfg.Servers {
if len(s.Name) > 0 {
if snames.InsertContains(s.Name) {
log.Warnf("Server name %s is not unique in gateway %s and may create possible issues like stat prefix collision ",
s.Name, gatewayName)
}
}
if s.Port == nil {
// Should be rejected in validation, this is an extra check
log.Debugf("invalid server without port: %q", gatewayName)
RecordRejectedConfig(gatewayName)
continue
}
sanitizeServerHostNamespace(s, gatewayConfig.Namespace)
gatewayNameForServer[s] = gatewayName
log.Debugf("MergeGateways: gateway %q processing server %s :%v", gatewayName, s.Name, s.Hosts)
cn := s.GetTls().GetCredentialName()
if cn != "" && proxy.VerifiedIdentity != nil {
rn := credentials.ToResourceName(cn)
parse, _ := credentials.ParseResourceName(rn, proxy.VerifiedIdentity.Namespace, "", "")
if gatewayConfig.Namespace == proxy.VerifiedIdentity.Namespace && parse.Namespace == proxy.VerifiedIdentity.Namespace {
// Same namespace is always allowed
verifiedCertificateReferences.Insert(rn)
if s.GetTls().GetMode() == networking.ServerTLSSettings_MUTUAL {
verifiedCertificateReferences.Insert(rn + credentials.SdsCaSuffix)
}
} else if ps.ReferenceAllowed(gvk.Secret, rn, proxy.VerifiedIdentity.Namespace) {
// Explicitly allowed by some policy
verifiedCertificateReferences.Insert(rn)
}
}
for _, resolvedPort := range resolvePorts(s.Port.Number, gwAndInstance.instances, gwAndInstance.legacyGatewaySelector) {
routeName := gatewayRDSRouteName(s, resolvedPort, gatewayConfig)
if s.Tls != nil {
// Envoy will reject config that has multiple filter chain matches with the same matching rules.
// To avoid this, we need to make sure we don't have duplicated hosts, which will become
// SNI filter chain matches.
// When there is Bind specified in the Gateway, the listener is built per IP instead of
// sharing one wildcard listener. So different Gateways can
// have same host as long as they have different Bind.
if tlsHostsByPort[resolvedPort] == nil {
tlsHostsByPort[resolvedPort] = map[string]string{}
}
if duplicateHosts := CheckDuplicates(s.Hosts, s.Bind, tlsHostsByPort[resolvedPort]); len(duplicateHosts) != 0 {
log.Warnf("skipping server on gateway %s, duplicate host names: %v", gatewayName, duplicateHosts)
RecordRejectedConfig(gatewayName)
continue
}
tlsServerInfo[s] = &TLSServerInfo{SNIHosts: GetSNIHostsForServer(s), RouteName: routeName}
if s.Tls.Mode == networking.ServerTLSSettings_AUTO_PASSTHROUGH {
autoPassthrough = true
}
}
serverPort := ServerPort{resolvedPort, s.Port.Protocol, s.Bind}
serverProtocol := protocol.Parse(serverPort.Protocol)
if gatewayPorts.Contains(resolvedPort) {
// We have two servers on the same port. Should we merge?
// 1. Yes if both servers are plain text and HTTP
// 2. Yes if both servers are using TLS
// if using HTTPS ensure that port name is distinct so that we can setup separate RDS
// for each server (as each server ends up as a separate http connection manager due to filter chain match)
// 3. No for everything else.
if current, exists := plainTextServers[resolvedPort]; exists {
if !canMergeProtocols(serverProtocol, protocol.Parse(current.Protocol)) && current.Bind == serverPort.Bind {
log.Infof("skipping server on gateway %s port %s.%d.%s: conflict with existing server %d.%s",
gatewayConfig.Name, s.Port.Name, resolvedPort, s.Port.Protocol, serverPort.Number, serverPort.Protocol)
RecordRejectedConfig(gatewayName)
continue
}
// For TCP gateway/route the route name is empty but if they are different binds, should continue to generate the listener
// i.e gateway 10.0.0.1:8000:TCP should not conflict with 10.0.0.2:8000:TCP
if routeName == "" && current.Bind == serverPort.Bind {
log.Debugf("skipping server on gateway %s port %s.%d.%s: could not build RDS name from server",
gatewayConfig.Name, s.Port.Name, resolvedPort, s.Port.Protocol)
RecordRejectedConfig(gatewayName)
continue
}
if current.Bind != serverPort.Bind {
// Merge it to servers with the same port and bind.
if mergedServers[serverPort] == nil {
mergedServers[serverPort] = &MergedServers{Servers: []*networking.Server{}}
serverPorts = append(serverPorts, serverPort)
}
ms := mergedServers[serverPort]
ms.RouteName = routeName
ms.Servers = append(ms.Servers, s)
} else {
// Merge this to current known port with same bind.
ms := mergedServers[current]
ms.Servers = append(ms.Servers, s)
}
serversByRouteName[routeName] = append(serversByRouteName[routeName], s)
} else {
// We have duplicate port. Its not in plaintext servers. So, this has to be a TLS server.
// Check if this is also a HTTP server and if so, ensure uniqueness of port name.
if gateway.IsHTTPServer(s) {
if routeName == "" {
log.Debugf("skipping server on gateway %s port %s.%d.%s: could not build RDS name from server",
gatewayConfig.Name, s.Port.Name, resolvedPort, s.Port.Protocol)
RecordRejectedConfig(gatewayName)
continue
}
// Both servers are HTTPS servers. Make sure the port names are different so that RDS can pick out individual servers.
// We cannot have two servers with same port name because we need the port name to distinguish one HTTPS server from another.
// We cannot merge two HTTPS servers even if their TLS settings have same path to the keys, because we don't know if the contents
// of the keys are same. So we treat them as effectively different TLS settings.
// This check is largely redundant now since we create rds names for https using gateway name, namespace
// and validation ensures that all port names within a single gateway config are unique.
if _, exists := serversByRouteName[routeName]; exists {
log.Infof("skipping server on gateway %s port %s.%d.%s: non unique port name for HTTPS port",
gatewayConfig.Name, s.Port.Name, resolvedPort, s.Port.Protocol)
RecordRejectedConfig(gatewayName)
continue
}
serversByRouteName[routeName] = []*networking.Server{s}
}
// build the port bind map for none plain text protocol, thus can avoid protocol conflict if it's different bind
var newBind bool
if bindsPortMap, ok := nonPlainTextGatewayPortsBindMap[resolvedPort]; ok {
newBind = !bindsPortMap.InsertContains(serverPort.Bind)
} else {
nonPlainTextGatewayPortsBindMap[resolvedPort] = sets.New(serverPort.Bind)
newBind = true
}
// If the bind/port combination is not being used as non-plaintext, they are different
// listeners and won't get conflicted even with same port different protocol
// i.e 0.0.0.0:443:GRPC/1.0.0.1:443:GRPC/1.0.0.2:443:HTTPS they are not conflicted, otherwise
// We have another TLS server on the same port. Can differentiate servers using SNI
if s.Tls == nil && !newBind {
log.Warnf("TLS server without TLS options %s %s", gatewayName, s.String())
RecordRejectedConfig(gatewayName)
continue
}
if mergedServers[serverPort] == nil {
mergedServers[serverPort] = &MergedServers{Servers: []*networking.Server{s}}
serverPorts = append(serverPorts, serverPort)
} else {
mergedServers[serverPort].Servers = append(mergedServers[serverPort].Servers, s)
}
// We have TLS settings defined and we have already taken care of unique route names
// if it is HTTPS. So we can construct a QUIC server on the same port. It is okay as
// QUIC listens on UDP port, not TCP
if features.EnableQUICListeners && gateway.IsEligibleForHTTP3Upgrade(s) &&
udpSupportedPort(s.GetPort().GetNumber(), gwAndInstance.instances) {
log.Debugf("Server at port %d eligible for HTTP3 upgrade. Add UDP listener for QUIC", serverPort.Number)
if mergedQUICServers[serverPort] == nil {
mergedQUICServers[serverPort] = &MergedServers{Servers: []*networking.Server{}}
}
mergedQUICServers[serverPort].Servers = append(mergedQUICServers[serverPort].Servers, s)
http3AdvertisingRoutes.Insert(routeName)
}
}
} else {
// This is a new gateway on this port. Create MergedServers for it.
gatewayPorts.Insert(resolvedPort)
if !gateway.IsTLSServer(s) {
plainTextServers[serverPort.Number] = serverPort
}
if gateway.IsHTTPServer(s) {
serversByRouteName[routeName] = []*networking.Server{s}
if features.EnableQUICListeners && gateway.IsEligibleForHTTP3Upgrade(s) &&
udpSupportedPort(s.GetPort().GetNumber(), gwAndInstance.instances) {
log.Debugf("Server at port %d eligible for HTTP3 upgrade. So QUIC listener will be added", serverPort.Number)
http3AdvertisingRoutes.Insert(routeName)
if mergedQUICServers[serverPort] == nil {
// This should be treated like non-passthrough HTTPS case. There will be multiple filter
// chains, multiple routes per server port. So just like in TLS server case we do not
// track route name here. Instead, TLS server info is used (it is fine for now because
// this would be a mirror of an existing non-passthrough HTTPS server)
mergedQUICServers[serverPort] = &MergedServers{Servers: []*networking.Server{s}}
}
}
}
mergedServers[serverPort] = &MergedServers{Servers: []*networking.Server{s}, RouteName: routeName}
serverPorts = append(serverPorts, serverPort)
}
log.Debugf("MergeGateways: gateway %q merged server %v", gatewayName, s.Hosts)
}
}
}
return &MergedGateway{
MergedServers: mergedServers,
MergedQUICTransportServers: mergedQUICServers,
ServerPorts: serverPorts,
GatewayNameForServer: gatewayNameForServer,
TLSServerInfo: tlsServerInfo,
ServersByRouteName: serversByRouteName,
HTTP3AdvertisingRoutes: http3AdvertisingRoutes,
ContainsAutoPassthroughGateways: autoPassthrough,
PortMap: getTargetPortMap(serversByRouteName),
VerifiedCertificateReferences: verifiedCertificateReferences,
}
}
func udpSupportedPort(number uint32, instances []ServiceTarget) bool {
for _, w := range instances {
if int(number) == w.Port.Port && w.Port.Protocol == protocol.UDP {
return true
}
}
return false
}
// resolvePorts takes a Gateway port, and resolves it to the port that will actually be listened on.
// When legacyGatewaySelector=false, then the gateway is directly referencing a Service. In this
// case, the translation is un-ambiguous - we just find the matching port and return the targetPort
// When legacyGatewaySelector=true things are a bit more complex, as we support referencing a Service
// port and translating to the targetPort in addition to just directly referencing a port. In this
// case, we just make a best effort guess by picking the first match.
func resolvePorts(number uint32, instances []ServiceTarget, legacyGatewaySelector bool) []uint32 {
ports := sets.New[uint32]()
for _, w := range instances {
if _, disablePortTranslation := w.Service.Attributes.Labels[DisableGatewayPortTranslationLabel]; disablePortTranslation && legacyGatewaySelector {
// Skip this Service, they opted out of port translation
// This is only done for legacyGatewaySelector, as the new gateway selection mechanism *only* allows
// referencing the Service port, and references are un-ambiguous.
continue
}
if w.Port.Port == int(number) {
if legacyGatewaySelector {
// When we are using legacy gateway label selection, we only resolve to a single port
// This has pros and cons; we don't allow merging of routes when it would be desirable, but
// we also avoid accidentally merging routes when we didn't intend to. While neither option is great,
// picking the first one here preserves backwards compatibility.
return []uint32{w.Port.TargetPort}
}
ports.Insert(w.Port.TargetPort)
}
}
ret := ports.UnsortedList()
if len(ret) == 0 && legacyGatewaySelector {
// When we are using legacy gateway label selection, we should bind to the port as-is if there is
// no matching ServiceInstance.
return []uint32{number}
}
// For cases where we are directly referencing a Service, we know that they port *must* be in the Service,
// so we have no fallback. If there was no match, the Gateway is a no-op.
return ret
}
func canMergeProtocols(current protocol.Instance, p protocol.Instance) bool {
return (current.IsHTTP() || current == p) && p.IsHTTP()
}
func GetSNIHostsForServer(server *networking.Server) []string {
if server.Tls == nil {
return nil
}
// sanitize the server hosts as it could contain hosts of form ns/host
sniHosts := sets.String{}
for _, h := range server.Hosts {
if strings.Contains(h, "/") {
parts := strings.Split(h, "/")
h = parts[1]
}
// do not add hosts, that have already been added
sniHosts.Insert(h)
}
return sets.SortedList(sniHosts)
}
// CheckDuplicates returns all of the hosts provided that are already known
// If there were no duplicates, all hosts are added to the known hosts.
func CheckDuplicates(hosts []string, bind string, knownHosts map[string]string) []string {
var duplicates []string
for _, h := range hosts {
if existingBind, ok := knownHosts[h]; ok && bind == existingBind {
duplicates = append(duplicates, h)
}
}
// No duplicates found, so we can mark all of these hosts as known
if len(duplicates) == 0 {
for _, h := range hosts {
knownHosts[h] = bind
}
}
return duplicates
}
// gatewayRDSRouteName generates the RDS route config name for gateway's servers.
// Unlike sidecars where the RDS route name is the listener port number, gateways have a different
// structure for RDS.
// HTTP servers have route name set to http.<portNumber>.
//
// Multiple HTTP servers can exist on the same port and the code will combine all of them into
// one single RDS payload for http.<portNumber>
//
// HTTPS servers with TLS termination (i.e. envoy decoding the content, and making outbound http calls to backends)
// will use route name https.<portNumber>.<portName>.<gatewayName>.<namespace>. HTTPS servers using SNI passthrough or
// non-HTTPS servers (e.g., TCP+TLS) with SNI passthrough will be setup as opaque TCP proxies without terminating
// the SSL connection. They would inspect the SNI header and forward to the appropriate upstream as opaque TCP.
//
// Within HTTPS servers terminating TLS, user could setup multiple servers in the gateway. each server could have
// one or more hosts but have different TLS certificates. In this case, we end up having separate filter chain
// for each server, with the filter chain match matching on the server specific TLS certs and SNI headers.
// We have two options here: either have all filter chains use the same RDS route name (e.g. "443") and expose
// all virtual hosts on that port to every filter chain uniformly or expose only the set of virtual hosts
// configured under the server for those certificates. We adopt the latter approach. In other words, each
// filter chain in the multi-filter-chain listener will have a distinct RDS route name
// (https.<portNumber>.<portName>.<gatewayName>.<namespace>) so that when a RDS request comes in, we serve the virtual
// hosts and associated routes for that server.
//
// Note that the common case is one where multiple servers are exposed under a single multi-SAN cert on a single port.
// In this case, we have a single https.<portNumber>.<portName>.<gatewayName>.<namespace> RDS for the HTTPS server.
// While we can use the same RDS route name for two servers (say HTTP and HTTPS) exposing the same set of hosts on
// different ports, the optimization (one RDS instead of two) could quickly become useless the moment the set of
// hosts on the two servers start differing -- necessitating the need for two different RDS routes.
func gatewayRDSRouteName(server *networking.Server, portNumber uint32, cfg config.Config) string {
p := protocol.Parse(server.Port.Protocol)
bind := ""
if server.Bind != "" {
bind = "." + server.Bind
}
if p.IsHTTP() {
return "http" + "." + strconv.Itoa(int(portNumber)) + bind // Format: http.%d.%s
}
if p == protocol.HTTPS && !gateway.IsPassThroughServer(server) {
return "https" + "." + strconv.Itoa(int(server.Port.Number)) + "." +
server.Port.Name + "." + cfg.Name + "." + cfg.Namespace + bind // Format: https.%d.%s.%s.%s.%s
}
return ""
}
// ParseGatewayRDSRouteName is used by the EnvoyFilter patching logic to match
// a specific route configuration to patch.
func ParseGatewayRDSRouteName(name string) (portNumber int, portName, gatewayName string) {
parts := strings.Split(name, ".")
if strings.HasPrefix(name, "http.") {
// this is a http gateway. Parse port number and return empty string for rest
if len(parts) >= 2 {
portNumber, _ = strconv.Atoi(parts[1])
}
} else if strings.HasPrefix(name, "https.") {
if len(parts) >= 5 {
portNumber, _ = strconv.Atoi(parts[1])
portName = parts[2]
// gateway name should be ns/name
gatewayName = parts[4] + "/" + parts[3]
}
}
return
}
// convert ./host to currentNamespace/Host
// */host to just host
// */* to just *
func sanitizeServerHostNamespace(server *networking.Server, namespace string) {
for i, h := range server.Hosts {
if strings.Contains(h, "/") {
parts := strings.Split(h, "/")
if parts[0] == "." {
server.Hosts[i] = fmt.Sprintf("%s/%s", namespace, parts[1])
} else if parts[0] == "*" {
if parts[1] == "*" {
server.Hosts = []string{"*"}
return
}
server.Hosts[i] = parts[1]
}
}
}
}
type GatewayPortMap map[int]sets.Set[int]
func getTargetPortMap(serversByRouteName map[string][]*networking.Server) GatewayPortMap {
pm := GatewayPortMap{}
for r, s := range serversByRouteName {
portNumber, _, _ := ParseGatewayRDSRouteName(r)
if _, f := pm[portNumber]; !f {
pm[portNumber] = sets.New[int]()
}
for _, se := range s {
if se.Port == nil {
continue
}
pm[portNumber].Insert(int(se.Port.Number))
}
}
return pm
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
io2 "github.com/AdamKorcz/bugdetectors/io"
envoy_jwt "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/jwt_authn/v3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/monitoring"
)
const (
// https://openid.net/specs/openid-connect-discovery-1_0.html
// OpenID Providers supporting Discovery MUST make a JSON document available at the path
// formed by concatenating the string /.well-known/openid-configuration to the Issuer.
openIDDiscoveryCfgURLSuffix = "/.well-known/openid-configuration"
// OpenID Discovery web request timeout.
jwksHTTPTimeOutInSec = 5
// JwtPubKeyEvictionDuration is the life duration for cached item.
// Cached item will be removed from the cache if it hasn't been used longer than JwtPubKeyEvictionDuration or if pilot
// has failed to refresh it for more than JwtPubKeyEvictionDuration.
JwtPubKeyEvictionDuration = 24 * 7 * time.Hour
// JwtPubKeyRefreshIntervalOnFailure is the running interval of JWT pubKey refresh job on failure.
JwtPubKeyRefreshIntervalOnFailure = time.Minute
// JwtPubKeyRetryInterval is the retry interval between the attempt to retry getting the remote
// content from network.
JwtPubKeyRetryInterval = time.Second
// JwtPubKeyRefreshIntervalOnFailureResetThreshold is the threshold to reset the refresh interval on failure.
JwtPubKeyRefreshIntervalOnFailureResetThreshold = 60 * time.Minute
// How many times should we retry the failed network fetch on main flow. The main flow
// means it's called when Pilot is pushing configs. Do not retry to make sure not to block Pilot
// too long.
networkFetchRetryCountOnMainFlow = 0
// How many times should we retry the failed network fetch on refresh flow. The refresh flow
// means it's called when the periodically refresh job is triggered. We can retry more aggressively
// as it's running separately from the main flow.
networkFetchRetryCountOnRefreshFlow = 7
// jwksExtraRootCABundlePath is the path to any additional CA certificates pilot should accept when resolving JWKS URIs
jwksExtraRootCABundlePath = "/cacerts/extra.pem"
)
var (
// Close channel
closeChan = make(chan bool)
networkFetchSuccessCounter = monitoring.NewSum(
"pilot_jwks_resolver_network_fetch_success_total",
"Total number of successfully network fetch by pilot jwks resolver",
)
networkFetchFailCounter = monitoring.NewSum(
"pilot_jwks_resolver_network_fetch_fail_total",
"Total number of failed network fetch by pilot jwks resolver",
)
// JwtPubKeyRefreshInterval is the running interval of JWT pubKey refresh job.
JwtPubKeyRefreshInterval = features.PilotJwtPubKeyRefreshInterval
// channel for making jwksuri request aynsc
jwksuriChannel = make(chan jwtKey, 5)
)
// jwtPubKeyEntry is a single cached entry for jwt public key.
type jwtPubKeyEntry struct {
pubKey string
// The last success refreshed time of the pubKey.
lastRefreshedTime time.Time
// Cached item's last used time, which is set in GetPublicKey.
lastUsedTime time.Time
}
// jwtKey is a key in the JwksResolver keyEntries map.
type jwtKey struct {
jwksURI string
issuer string
}
// JwksResolver is resolver for jwksURI and jwt public key.
type JwksResolver struct {
// Callback function to invoke when detecting jwt public key change.
PushFunc func()
// cache for JWT public key.
// map key is jwtKey, map value is jwtPubKeyEntry.
keyEntries sync.Map
secureHTTPClient *http.Client
httpClient *http.Client
refreshTicker *time.Ticker
// Cached key will be removed from cache if (time.now - cachedItem.lastUsedTime >= evictionDuration), this prevents key cache growing indefinitely.
evictionDuration time.Duration
// Refresher job running interval.
refreshInterval time.Duration
// Refresher job running interval on failure.
refreshIntervalOnFailure time.Duration
// Refresher job default running interval without failure.
refreshDefaultInterval time.Duration
retryInterval time.Duration
// How many times refresh job has detected JWT public key change happened, used in unit test.
refreshJobKeyChangedCount uint64
// How many times refresh job failed to fetch the public key from network, used in unit test.
refreshJobFetchFailedCount uint64
// Whenever istiod fails to fetch the pubkey from jwksuri in main flow this variable becomes true for background trigger
jwksUribackgroundChannel bool
}
func NewJwksResolver(evictionDuration, refreshDefaultInterval, refreshIntervalOnFailure, retryInterval time.Duration) *JwksResolver {
return newJwksResolverWithCABundlePaths(
evictionDuration,
refreshDefaultInterval,
refreshIntervalOnFailure,
retryInterval,
[]string{jwksExtraRootCABundlePath},
)
}
func newJwksResolverWithCABundlePaths(
evictionDuration,
refreshDefaultInterval,
refreshIntervalOnFailure,
retryInterval time.Duration,
caBundlePaths []string,
) *JwksResolver {
ret := &JwksResolver{
evictionDuration: evictionDuration,
refreshInterval: refreshDefaultInterval,
refreshDefaultInterval: refreshDefaultInterval,
refreshIntervalOnFailure: refreshIntervalOnFailure,
retryInterval: retryInterval,
httpClient: &http.Client{
Timeout: jwksHTTPTimeOutInSec * time.Second,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DisableKeepAlives: true,
},
},
}
caCertPool, err := x509.SystemCertPool()
caCertsFound := true
if err != nil {
caCertsFound = false
log.Errorf("Failed to fetch Cert from SystemCertPool: %v", err)
}
if caCertPool != nil {
for _, pemFile := range caBundlePaths {
caCert, err := os.ReadFile(pemFile)
if err == nil {
caCertsFound = caCertPool.AppendCertsFromPEM(caCert) || caCertsFound
}
}
}
if caCertsFound {
ret.secureHTTPClient = &http.Client{
Timeout: jwksHTTPTimeOutInSec * time.Second,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
DisableKeepAlives: true,
TLSClientConfig: &tls.Config{
// nolint: gosec // user explicitly opted into insecure
InsecureSkipVerify: features.JwksResolverInsecureSkipVerify,
RootCAs: caCertPool,
MinVersion: tls.VersionTLS12,
},
},
}
}
atomic.StoreUint64(&ret.refreshJobKeyChangedCount, 0)
atomic.StoreUint64(&ret.refreshJobFetchFailedCount, 0)
go ret.refresher()
return ret
}
var errEmptyPubKeyFoundInCache = errors.New("empty public key found in cache")
// GetPublicKey returns the JWT public key if it is available in the cache
// or fetch with from jwksuri if there is a error while fetching then it adds the
// jwksURI in the cache to fetch the public key in the background process
func (r *JwksResolver) GetPublicKey(issuer string, jwksURI string) (string, error) {
now := time.Now()
key := jwtKey{issuer: issuer, jwksURI: jwksURI}
if val, found := r.keyEntries.Load(key); found {
e := val.(jwtPubKeyEntry)
// Update cached key's last used time.
e.lastUsedTime = now
r.keyEntries.Store(key, e)
if e.pubKey == "" {
return e.pubKey, errEmptyPubKeyFoundInCache
}
return e.pubKey, nil
}
var err error
var pubKey string
if jwksURI == "" {
// Fetch the jwks URI if it is not hardcoded on config.
jwksURI, err = r.resolveJwksURIUsingOpenID(issuer)
}
if err != nil {
log.Errorf("Failed to jwks URI from %q: %v", issuer, err)
} else {
var resp []byte
resp, err = r.getRemoteContentWithRetry(jwksURI, networkFetchRetryCountOnMainFlow)
if err != nil {
log.Errorf("Failed to fetch public key from %q: %v", jwksURI, err)
}
pubKey = string(resp)
}
r.keyEntries.Store(key, jwtPubKeyEntry{
pubKey: pubKey,
lastRefreshedTime: now,
lastUsedTime: now,
})
if err != nil {
// fetching the public key in the background
jwksuriChannel <- key
}
return pubKey, err
}
// BuildLocalJwks builds local Jwks by fetching the Jwt Public Key from the URL passed if it is empty.
func (r *JwksResolver) BuildLocalJwks(jwksURI, jwtIssuer, jwtPubKey string) *envoy_jwt.JwtProvider_LocalJwks {
var err error
if jwtPubKey == "" {
// jwtKeyResolver should never be nil since the function is only called in Discovery Server request processing
// workflow, where the JWT key resolver should have already been initialized on server creation.
jwtPubKey, err = r.GetPublicKey(jwtIssuer, jwksURI)
if err != nil {
log.Infof("The JWKS key is not yet fetched for issuer %s (%s), using a fake JWKS for now", jwtIssuer, jwksURI)
// This is a temporary workaround to reject a request with JWT token by using a fake jwks when istiod failed to fetch it.
// TODO(xulingqing): Find a better way to reject the request without using the fake jwks.
jwtPubKey = FakeJwks
}
}
return &envoy_jwt.JwtProvider_LocalJwks{
LocalJwks: &core.DataSource{
Specifier: &core.DataSource_InlineString{
InlineString: jwtPubKey,
},
},
}
}
// FakeJwks is a fake jwks, generated by following code
/*
fakeJwksRSAKey, _ := rsa.GenerateKey(rand.Reader, 2048)
key, _ := jwk.FromRaw(fakeJwksRSAKey)
rsaKey, _ := key.(jwk.RSAPrivateKey)
res, _ := json.Marshal(rsaKey)
fmt.Printf("{\"keys\":[ %s]}\n", string(res))
*/
// it should be static across different instances and versions.
// more details can be found: https://github.com/istio/istio/pull/47661.
// nolint: lll
const FakeJwks = `{
"keys": [
{
"d": "T6cYL1_1mWHQLtOcbOgWV6HjhS0HVh3Apt4xEar5beaMBX3IYLFITz684DOHNy5dzaxTRqvGj-zHEgNrgy2T-Izoo2Z-xJ2Zse6wQ4R0xbwd0by8IbhiePcjgNWXXzildMHkBVrxNZhUICpb_r8efTHZfEwc6FPjJDVgJKtEc6WGCOiWnRYcGTTlsB5-QrQQlDFLmrU2Z6QDmqJU33aDJFr_qzmRiVNXeHuhlNca2JnKNPpxjRVsy7Kbc8PorxiPijnLzV8_pccsMyLvA8pWUl5FRtAJNSss7x_81HEcInlj7yA896zMiELSPps1rW68yVvpuKEuYulzGi4z74gz0Q",
"dp": "YkH_MFMlgnGZntOCXLhib1LLW1JJCYmTzebn-JSluFJbG_qQgzuZkUu5s2cYBHmiZkDGmnTDOAYXrOaQSgVIBQMPxMqdUf8WjRIlEb88zvKpM_Curp59wuy6MhI7Ej3xKiixHX3bIq5Qujk3ZdsDbHUi3HH56-V7cdFKccqlg6E",
"dq": "CXCwRpRgbtqzLcsfuy-5IUZosrvEDHCrFh0C-A6OYvKpHzn8PDwb62YGddhiHzSrgr1EUgykQxiIF2xG8dBaq8xXg9Bh4G1kkgIsqJmL5DG1lwyh_-Jt4nPyiLHZ--ERc48cjj515uRpGd-CWXdIf2EWYaJNsEkiNaYEClJQIA8",
"e": "AQAB",
"kty": "RSA",
"n": "vqS7RN4b34i3_5YyhygtBe33gI6GK_0ldW8WMZaunS28T-WAzJOAoZ7E9Y0mHS8vcDES0eZIUpp6Ft9sRPhOlzQfo_7l-3DnaD9LxJVKdXjE1jugxfI9YX1qJpD9S9wRZxQIhPky9UzZDkpFh_KpL6pZUt4cbPtW0VCctjqvpI11yHNk4CEbzw-RRFLMJkLFJqgPa2JPzGZ-TqJdkSDQ7UtRiKzjRcWGnAdLsTq6WabDA1Fn1JVI9TWu-YDbLufDUDco46qyPgpxAqcRQG39cWZAQzMwNEZ-Yec_WiqDYqGTU6K8BBWeEIuMhiWfxGmtqX35rb9Qk_qeYDsqqT95Pw",
"p": "7EK8xaN7qCdWCeQ1ptXWvuc6qotZc6oD-j1ecgel9FqmfkmaioVEbEAfP_N73QAjw-sU60sK3XK8LV4fkGUoJV-MDvmiCzy3wUPe-adSaTCxFykgOm6SPA9NKCqAh8lUm6GUm9RZkjwkv4xzZ8pJjng3d74WXx7zhTEH6yi4E00",
"q": "zpJPbhAn79s_jPm4OhOvvPKT-ISN6EyLu_g6joh1Dzf-HCF149KKQfuLDtwDCsCNf1cE_BCb4qoHAVBLDjbqusQF019zNIFTHeUL8oMpbv-5of7km0K8oo-DQp5b8u05PKaEQu3OXmRZFwuO6dSTPvXO094X-8vm791FLcJ-4Ls",
"qi": "SXz-JeBcTYMcO5lDBlrI9qd2eMQAYfVFDyq523L-RFhdravaxaYutT7dWk5f4Smzbh5KtvKifcFUMnV88On4HCiTrdBjLJJhIYqZQwzP8hYbXZlw4SvCtXKUrvLwLEUQaYg6bopp4VJ5c3XCZD5z3paHlZ45oCDsMeSEWxAD6lo"
}
]
}`
// Resolve jwks_uri through openID discovery.
func (r *JwksResolver) resolveJwksURIUsingOpenID(issuer string) (string, error) {
// Try to get jwks_uri through OpenID Discovery.
issuer = strings.TrimSuffix(issuer, "/")
body, err := r.getRemoteContentWithRetry(issuer+openIDDiscoveryCfgURLSuffix, networkFetchRetryCountOnMainFlow)
if err != nil {
log.Errorf("Failed to fetch jwks_uri from %q: %v", issuer+openIDDiscoveryCfgURLSuffix, err)
return "", err
}
var data map[string]any
if err := json.Unmarshal(body, &data); err != nil {
return "", err
}
jwksURI, ok := data["jwks_uri"].(string)
if !ok {
return "", fmt.Errorf("invalid jwks_uri %v in openID discovery configuration", data["jwks_uri"])
}
return jwksURI, nil
}
func (r *JwksResolver) getRemoteContentWithRetry(uri string, retry int) ([]byte, error) {
u, err := url.Parse(uri)
if err != nil {
log.Errorf("Failed to parse %q", uri)
return nil, err
}
client := r.httpClient
if strings.EqualFold(u.Scheme, "https") {
// https client may be uninitialized because of root CA bundle missing.
if r.secureHTTPClient == nil {
return nil, fmt.Errorf("pilot does not support fetch public key through https endpoint %q", uri)
}
client = r.secureHTTPClient
}
getPublicKey := func() (b []byte, e error) {
defer func() {
if e != nil {
networkFetchFailCounter.Increment()
} else {
networkFetchSuccessCounter.Increment()
}
}()
resp, err := client.Get(uri)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := io2.ReadAll(resp.Body, "/src/istio/pilot/pkg/model/jwks_resolver.go:376:16 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
return nil, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
message := strconv.Quote(string(body))
if len(message) > 100 {
message = message[:100]
return nil, fmt.Errorf("status %d, message %s(truncated)", resp.StatusCode, message)
}
return nil, fmt.Errorf("status %d, message %s", resp.StatusCode, message)
}
return body, nil
}
for i := 0; i < retry; i++ {
body, err := getPublicKey()
if err == nil {
return body, nil
}
log.Warnf("Failed to GET from %q: %s. Retry in %v", uri, err, r.retryInterval)
time.Sleep(r.retryInterval)
}
// Return the last fetch directly, reaching here means we have tried `retry` times, this will be
// the last time for the retry.
return getPublicKey()
}
func (r *JwksResolver) refresher() {
// Wake up once in a while and refresh stale items.
r.refreshTicker = time.NewTicker(r.refreshInterval)
lastHasError := false
for {
select {
case <-r.refreshTicker.C:
if !r.jwksUribackgroundChannel {
lastHasError = r.refreshCache(lastHasError)
}
case <-closeChan:
r.refreshTicker.Stop()
return
case <-jwksuriChannel:
r.jwksUribackgroundChannel = true
lastHasError = r.refreshCache(lastHasError)
r.jwksUribackgroundChannel = false
}
}
}
func (r *JwksResolver) refreshCache(lastHasError bool) bool {
currentHasError := r.refresh()
if currentHasError {
if lastHasError {
// update to exponential backoff if last time also failed.
r.refreshInterval *= 2
if r.refreshInterval > JwtPubKeyRefreshIntervalOnFailureResetThreshold {
r.refreshInterval = JwtPubKeyRefreshIntervalOnFailureResetThreshold
}
} else {
// change to the refreshIntervalOnFailure if failed for the first time.
r.refreshInterval = r.refreshIntervalOnFailure
}
} else {
// reset the refresh interval if success.
r.refreshInterval = r.refreshDefaultInterval
}
r.refreshTicker.Reset(r.refreshInterval)
return currentHasError
}
func (r *JwksResolver) refresh() bool {
var wg sync.WaitGroup
var hasChange, hasErrors atomic.Bool
r.keyEntries.Range(func(key any, value any) bool {
now := time.Now()
k := key.(jwtKey)
e := value.(jwtPubKeyEntry)
if e.pubKey != "" && r.jwksUribackgroundChannel {
return true
}
// Remove cached item for either of the following 2 situations
// 1) it hasn't been used for a while
// 2) it hasn't been refreshed successfully for a while
// This makes sure 2 things, we don't grow the cache infinitely and also we don't reuse a cached public key
// with no success refresh for too much time.
if now.Sub(e.lastUsedTime) >= r.evictionDuration || now.Sub(e.lastRefreshedTime) >= r.evictionDuration {
log.Infof("Removed cached JWT public key (lastRefreshed: %s, lastUsed: %s) from %q",
e.lastRefreshedTime, e.lastUsedTime, k.issuer)
r.keyEntries.Delete(k)
return true
}
oldPubKey := e.pubKey
// Increment the WaitGroup counter.
wg.Add(1)
go func() {
// Decrement the counter when the goroutine completes.
defer wg.Done()
jwksURI := k.jwksURI
if jwksURI == "" {
var err error
jwksURI, err = r.resolveJwksURIUsingOpenID(k.issuer)
if err != nil {
hasErrors.Store(true)
log.Errorf("Failed to resolve Jwks from issuer %q: %v", k.issuer, err)
atomic.AddUint64(&r.refreshJobFetchFailedCount, 1)
return
}
r.keyEntries.Delete(k)
k.jwksURI = jwksURI
}
resp, err := r.getRemoteContentWithRetry(jwksURI, networkFetchRetryCountOnRefreshFlow)
if err != nil {
hasErrors.Store(true)
log.Errorf("Failed to refresh JWT public key from %q: %v", jwksURI, err)
atomic.AddUint64(&r.refreshJobFetchFailedCount, 1)
if oldPubKey == "" {
r.keyEntries.Delete(k)
}
return
}
newPubKey := string(resp)
r.keyEntries.Store(k, jwtPubKeyEntry{
pubKey: newPubKey,
lastRefreshedTime: now, // update the lastRefreshedTime if we get a success response from the network.
lastUsedTime: e.lastUsedTime, // keep original lastUsedTime.
})
isNewKey, err := compareJWKSResponse(oldPubKey, newPubKey)
if err != nil {
hasErrors.Store(true)
log.Errorf("Failed to refresh JWT public key from %q: %v", jwksURI, err)
return
}
if isNewKey {
hasChange.Store(true)
log.Infof("Updated cached JWT public key from %q", jwksURI)
}
}()
return true
})
// Wait for all go routine to complete.
wg.Wait()
if hasChange.Load() {
atomic.AddUint64(&r.refreshJobKeyChangedCount, 1)
// Push public key changes to sidecars.
if r.PushFunc != nil {
r.PushFunc()
}
}
return hasErrors.Load()
}
// Close will shut down the refresher job.
// TODO: may need to figure out the right place to call this function.
// (right now calls it from initDiscoveryService in pkg/bootstrap/server.go).
func (r *JwksResolver) Close() {
closeChan <- true
}
// Compare two JWKS responses, returning true if there is a difference and false otherwise
func compareJWKSResponse(oldKeyString string, newKeyString string) (bool, error) {
if oldKeyString == newKeyString {
return false, nil
}
var oldJWKs map[string]any
var newJWKs map[string]any
if err := json.Unmarshal([]byte(newKeyString), &newJWKs); err != nil {
// If the new key is not parseable as JSON return an error since we will not want to use this key
log.Warnf("New JWKs public key JSON is not parseable: %s", newKeyString)
return false, err
}
if err := json.Unmarshal([]byte(oldKeyString), &oldJWKs); err != nil {
log.Warnf("Previous JWKs public key JSON is not parseable: %s", oldKeyString)
return true, nil
}
// Sort both sets of keys by "kid (key ID)" to be able to directly compare
oldKeys, oldKeysExists := oldJWKs["keys"].([]any)
newKeys, newKeysExists := newJWKs["keys"].([]any)
if oldKeysExists && newKeysExists {
sort.Slice(oldKeys, func(i, j int) bool {
key1, ok1 := oldKeys[i].(map[string]any)
key2, ok2 := oldKeys[j].(map[string]any)
if ok1 && ok2 {
key1Id, kid1Exists := key1["kid"]
key2Id, kid2Exists := key2["kid"]
if kid1Exists && kid2Exists {
key1IdStr, ok1 := key1Id.(string)
key2IdStr, ok2 := key2Id.(string)
if ok1 && ok2 {
return key1IdStr < key2IdStr
}
}
}
return len(key1) < len(key2)
})
sort.Slice(newKeys, func(i, j int) bool {
key1, ok1 := newKeys[i].(map[string]any)
key2, ok2 := newKeys[j].(map[string]any)
if ok1 && ok2 {
key1Id, kid1Exists := key1["kid"]
key2Id, kid2Exists := key2["kid"]
if kid1Exists && kid2Exists {
key1IdStr, ok1 := key1Id.(string)
key2IdStr, ok2 := key2Id.(string)
if ok1 && ok2 {
return key1IdStr < key2IdStr
}
}
}
return len(key1) < len(key2)
})
// Once sorted, return the result of deep comparison of the arrays of keys
return !reflect.DeepEqual(oldKeys, newKeys), nil
}
// If we aren't able to compare using keys, we should return true
// since we already checked exact equality of the responses
return true, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kstatus
import (
"reflect"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/slices"
)
const (
StatusTrue = "True"
StatusFalse = "False"
)
// InvertStatus returns the opposite of the provided status. If an invalid status is passed in, False is returned
func InvertStatus(status metav1.ConditionStatus) metav1.ConditionStatus {
switch status {
case StatusFalse:
return StatusTrue
default:
return StatusFalse
}
}
// WrappedStatus provides a wrapper around a status message that keeps track of whether or not any
// changes have been made. This allows users to declarative write status, without worrying about
// tracking changes. When read to commit (typically to Kubernetes), any messages with Dirty=false can
// be discarded.
type WrappedStatus struct {
// Status is the object that is wrapped.
config.Status
// Dirty indicates if this object has been modified at all.
// Note: only changes wrapped in Mutate are tracked.
Dirty bool
}
func Wrap(s config.Status) *WrappedStatus {
return &WrappedStatus{config.DeepCopy(s), false}
}
func (w *WrappedStatus) Mutate(f func(s config.Status) config.Status) {
if w.Status == nil {
return
}
old := config.DeepCopy(w.Status)
w.Status = f(w.Status)
// TODO: change this to be more efficient. Likely we allow modifications via WrappedStatus that
// modify specific things (ie conditions).
if !reflect.DeepEqual(old, w.Status) {
w.Dirty = true
}
}
func (w *WrappedStatus) Unwrap() config.Status {
return w.Status
}
var EmptyCondition = metav1.Condition{}
func GetCondition(conditions []metav1.Condition, condition string) metav1.Condition {
for _, cond := range conditions {
if cond.Type == condition {
return cond
}
}
return EmptyCondition
}
// UpdateConditionIfChanged updates a condition if it has been changed.
func UpdateConditionIfChanged(conditions []metav1.Condition, condition metav1.Condition) []metav1.Condition {
ret := slices.Clone(conditions)
existing := slices.FindFunc(ret, func(cond metav1.Condition) bool {
return cond.Type == condition.Type
})
if existing == nil {
ret = append(ret, condition)
return ret
}
if existing.Status == condition.Status {
if existing.Message == condition.Message &&
existing.ObservedGeneration == condition.ObservedGeneration {
// Skip update, no changes
return conditions
}
// retain LastTransitionTime if status is not changed
condition.LastTransitionTime = existing.LastTransitionTime
}
*existing = condition
return ret
}
// CreateCondition sets a condition only if it has not already been set
func CreateCondition(conditions []metav1.Condition, condition metav1.Condition, unsetReason string) []metav1.Condition {
ret := append([]metav1.Condition(nil), conditions...)
idx := -1
for i, cond := range ret {
if cond.Type == condition.Type {
idx = i
if cond.Reason == unsetReason {
// Condition is set, but its for unsetReason. This is needed because some conditions have defaults
ret[idx] = condition
return ret
}
break
}
}
if idx == -1 {
// Not found! We should set it
ret = append(ret, condition)
}
return ret
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import "istio.io/istio/pkg/monitoring"
var providerLookupClusterFailures = monitoring.NewSum(
"provider_lookup_cluster_failures",
"Number of times a cluster lookup failed",
)
func IncLookupClusterFailures(provider string) {
providerLookupClusterFailures.With(typeTag.Value(provider)).Increment()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"cmp"
"fmt"
"net"
"sort"
"sync"
"time"
"github.com/hashicorp/go-multierror"
"github.com/miekg/dns"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/istiomultierror"
netutil "istio.io/istio/pkg/util/net"
"istio.io/istio/pkg/util/sets"
)
// NetworkGateway is the gateway of a network
type NetworkGateway struct {
// Network is the ID of the network where this Gateway resides.
Network network.ID
// Cluster is the ID of the k8s cluster where this Gateway resides.
Cluster cluster.ID
// gateway ip address
Addr string
// gateway port
Port uint32
}
type NetworkGatewaysWatcher interface {
NetworkGateways() []NetworkGateway
AppendNetworkGatewayHandler(h func())
}
// NetworkGatewaysHandler can be embedded to easily implement NetworkGatewaysWatcher.
type NetworkGatewaysHandler struct {
handlers []func()
}
func (ngh *NetworkGatewaysHandler) AppendNetworkGatewayHandler(h func()) {
ngh.handlers = append(ngh.handlers, h)
}
func (ngh *NetworkGatewaysHandler) NotifyGatewayHandlers() {
for _, handler := range ngh.handlers {
handler()
}
}
type NetworkGateways struct {
mu *sync.RWMutex
// least common multiple of gateway number of {per network, per cluster}
lcm uint32
byNetwork map[network.ID][]NetworkGateway
byNetworkAndCluster map[networkAndCluster][]NetworkGateway
}
// NetworkManager provides gateway details for accessing remote networks.
type NetworkManager struct {
env *Environment
// exported for test
NameCache *networkGatewayNameCache
xdsUpdater XDSUpdater
// just to ensure NetworkGateways and Unresolved are updated together
mu sync.RWMutex
// embedded NetworkGateways only includes gateways with IPs
// hostnames are resolved in control plane (or filtered out if feature is disabled)
*NetworkGateways
// includes all gateways with no DNS resolution or filtering, regardless of feature flags
Unresolved *NetworkGateways
}
// NewNetworkManager creates a new NetworkManager from the Environment by merging
// together the MeshNetworks and ServiceRegistry-specific gateways.
func NewNetworkManager(env *Environment, xdsUpdater XDSUpdater) (*NetworkManager, error) {
nameCache, err := newNetworkGatewayNameCache()
if err != nil {
return nil, err
}
mgr := &NetworkManager{
env: env,
NameCache: nameCache,
xdsUpdater: xdsUpdater,
NetworkGateways: &NetworkGateways{},
Unresolved: &NetworkGateways{},
}
// share lock with root NetworkManager
mgr.NetworkGateways.mu = &mgr.mu
mgr.Unresolved.mu = &mgr.mu
env.AddNetworksHandler(mgr.reloadGateways)
// register to per registry, will be called when gateway service changed
env.AppendNetworkGatewayHandler(mgr.reloadGateways)
nameCache.AppendNetworkGatewayHandler(mgr.reloadGateways)
mgr.reload()
return mgr, nil
}
// reloadGateways reloads NetworkGateways and triggers a push if they change.
func (mgr *NetworkManager) reloadGateways() {
changed := mgr.reload()
if changed && mgr.xdsUpdater != nil {
log.Infof("gateways changed, triggering push")
mgr.xdsUpdater.ConfigUpdate(&PushRequest{Full: true, Reason: NewReasonStats(NetworksTrigger)})
}
}
func (mgr *NetworkManager) reload() bool {
mgr.mu.Lock()
defer mgr.mu.Unlock()
log.Infof("reloading network gateways")
// Generate a snapshot of the state of gateways by merging the contents of
// MeshNetworks and the ServiceRegistries.
// Store all gateways in a set initially to eliminate duplicates.
gatewaySet := make(NetworkGatewaySet)
// First, load gateways from the static MeshNetworks config.
meshNetworks := mgr.env.NetworksWatcher.Networks()
if meshNetworks != nil {
for nw, networkConf := range meshNetworks.Networks {
for _, gw := range networkConf.Gateways {
if gw.GetAddress() == "" {
// registryServiceName addresses will be populated via kube service registry
continue
}
gatewaySet.Insert(NetworkGateway{
Cluster: "", /* TODO(nmittler): Add Cluster to the API */
Network: network.ID(nw),
Addr: gw.GetAddress(),
Port: gw.Port,
})
}
}
}
// Second, load registry-specific gateways.
// - the internal map of label gateways - these get deleted if the service is deleted, updated if the ip changes etc.
// - the computed map from meshNetworks (triggered by reloadNetworkLookup, the ported logic from getGatewayAddresses)
gatewaySet.InsertAll(mgr.env.NetworkGateways()...)
resolvedGatewaySet := mgr.resolveHostnameGateways(gatewaySet)
return mgr.NetworkGateways.update(resolvedGatewaySet) || mgr.Unresolved.update(gatewaySet)
}
// update calls should with the lock held
func (gws *NetworkGateways) update(gatewaySet NetworkGatewaySet) bool {
if gatewaySet.Equals(sets.New(gws.allGateways()...)) {
return false
}
// index by network or network+cluster for quick lookup
byNetwork := make(map[network.ID][]NetworkGateway)
byNetworkAndCluster := make(map[networkAndCluster][]NetworkGateway)
for gw := range gatewaySet {
byNetwork[gw.Network] = append(byNetwork[gw.Network], gw)
nc := networkAndClusterForGateway(&gw)
byNetworkAndCluster[nc] = append(byNetworkAndCluster[nc], gw)
}
var gwNum []int
// Sort the gateways in byNetwork, and also calculate the max number
// of gateways per network.
for k, gws := range byNetwork {
byNetwork[k] = SortGateways(gws)
gwNum = append(gwNum, len(gws))
}
// Sort the gateways in byNetworkAndCluster.
for k, gws := range byNetworkAndCluster {
byNetworkAndCluster[k] = SortGateways(gws)
gwNum = append(gwNum, len(gws))
}
lcmVal := 1
// calculate lcm
for _, num := range gwNum {
lcmVal = lcm(lcmVal, num)
}
gws.lcm = uint32(lcmVal)
gws.byNetwork = byNetwork
gws.byNetworkAndCluster = byNetworkAndCluster
return true
}
// resolveHostnameGateway either resolves or removes gateways that use a non-IP Address
func (mgr *NetworkManager) resolveHostnameGateways(gatewaySet NetworkGatewaySet) NetworkGatewaySet {
resolvedGatewaySet := make(NetworkGatewaySet, len(gatewaySet))
// filter the list of gateways to resolve
hostnameGateways := map[string][]NetworkGateway{}
names := sets.New[string]()
for gw := range gatewaySet {
if netutil.IsValidIPAddress(gw.Addr) {
resolvedGatewaySet.Insert(gw)
continue
}
if !features.ResolveHostnameGateways {
log.Warnf("Failed parsing gateway address %s from Service Registry. "+
"Set RESOLVE_HOSTNAME_GATEWAYS on istiod to enable resolving hostnames in the control plane.",
gw.Addr)
continue
}
hostnameGateways[gw.Addr] = append(hostnameGateways[gw.Addr], gw)
names.Insert(gw.Addr)
}
if !features.ResolveHostnameGateways {
return resolvedGatewaySet
}
// resolve each hostname
for host, addrs := range mgr.NameCache.Resolve(names) {
gwsForHost := hostnameGateways[host]
if len(addrs) == 0 {
log.Warnf("could not resolve hostname %q for %d gateways", host, len(gwsForHost))
}
// expand each resolved address into a NetworkGateway
for _, gw := range gwsForHost {
for _, resolved := range addrs {
// copy the base gateway to preserve the port/network, but update with the resolved IP
resolvedGw := gw
resolvedGw.Addr = resolved
resolvedGatewaySet.Insert(resolvedGw)
}
}
}
return resolvedGatewaySet
}
func (gws *NetworkGateways) IsMultiNetworkEnabled() bool {
if gws == nil {
return false
}
gws.mu.RLock()
defer gws.mu.RUnlock()
return len(gws.byNetwork) > 0
}
// GetLBWeightScaleFactor returns the least common multiple of the number of gateways per network.
func (gws *NetworkGateways) GetLBWeightScaleFactor() uint32 {
gws.mu.RLock()
defer gws.mu.RUnlock()
return gws.lcm
}
func (gws *NetworkGateways) AllGateways() []NetworkGateway {
gws.mu.RLock()
defer gws.mu.RUnlock()
return gws.allGateways()
}
func (gws *NetworkGateways) allGateways() []NetworkGateway {
if gws.byNetwork == nil {
return nil
}
out := make([]NetworkGateway, 0)
for _, gateways := range gws.byNetwork {
out = append(out, gateways...)
}
return SortGateways(out)
}
func (gws *NetworkGateways) GatewaysForNetwork(nw network.ID) []NetworkGateway {
gws.mu.RLock()
defer gws.mu.RUnlock()
if gws.byNetwork == nil {
return nil
}
return gws.byNetwork[nw]
}
func (gws *NetworkGateways) GatewaysForNetworkAndCluster(nw network.ID, c cluster.ID) []NetworkGateway {
gws.mu.RLock()
defer gws.mu.RUnlock()
if gws.byNetworkAndCluster == nil {
return nil
}
return gws.byNetworkAndCluster[networkAndClusterFor(nw, c)]
}
type networkAndCluster struct {
network network.ID
cluster cluster.ID
}
func networkAndClusterForGateway(g *NetworkGateway) networkAndCluster {
return networkAndClusterFor(g.Network, g.Cluster)
}
func networkAndClusterFor(nw network.ID, c cluster.ID) networkAndCluster {
return networkAndCluster{
network: nw,
cluster: c,
}
}
// SortGateways sorts the array so that it's stable.
func SortGateways(gws []NetworkGateway) []NetworkGateway {
return slices.SortFunc(gws, func(a, b NetworkGateway) int {
if r := cmp.Compare(a.Addr, b.Addr); r != 0 {
return r
}
return cmp.Compare(a.Port, b.Port)
})
}
// greatest common divisor of x and y
func gcd(x, y int) int {
var tmp int
for {
tmp = x % y
if tmp > 0 {
x = y
y = tmp
} else {
return y
}
}
}
// least common multiple of x and y
func lcm(x, y int) int {
return x * y / gcd(x, y)
}
// NetworkGatewaySet is a helper to manage a set of NetworkGateway instances.
type NetworkGatewaySet = sets.Set[NetworkGateway]
var (
// MinGatewayTTL is exported for testing
MinGatewayTTL = 30 * time.Second
// https://github.com/coredns/coredns/blob/v1.10.1/plugin/pkg/dnsutil/ttl.go#L51
MaxGatewayTTL = 1 * time.Hour
)
type networkGatewayNameCache struct {
NetworkGatewaysHandler
client *dnsClient
sync.Mutex
cache map[string]nameCacheEntry
}
type nameCacheEntry struct {
value []string
expiry time.Time
timer *time.Timer
}
func newNetworkGatewayNameCache() (*networkGatewayNameCache, error) {
c, err := newClient()
if err != nil {
return nil, err
}
return newNetworkGatewayNameCacheWithClient(c), nil
}
// newNetworkGatewayNameCacheWithClient exported for test
func newNetworkGatewayNameCacheWithClient(c *dnsClient) *networkGatewayNameCache {
return &networkGatewayNameCache{client: c, cache: map[string]nameCacheEntry{}}
}
// Resolve takes a list of hostnames and returns a map of names to addresses
func (n *networkGatewayNameCache) Resolve(names sets.String) map[string][]string {
n.Lock()
defer n.Unlock()
n.cleanupWatches(names)
out := make(map[string][]string, len(names))
for name := range names {
out[name] = n.resolveFromCache(name)
}
return out
}
// cleanupWatches cancels any scheduled re-resolve for names we no longer care about
func (n *networkGatewayNameCache) cleanupWatches(names sets.String) {
for name, entry := range n.cache {
if names.Contains(name) {
continue
}
entry.timer.Stop()
delete(n.cache, name)
}
}
func (n *networkGatewayNameCache) resolveFromCache(name string) []string {
if entry, ok := n.cache[name]; ok && entry.expiry.After(time.Now()) {
return entry.value
}
// ideally this will not happen more than once for each name and the cache auto-updates in the background
// even if it does, this happens on the SotW ingestion path (kube or meshnetworks changes) and not xds push path.
return n.resolveAndCache(name)
}
func (n *networkGatewayNameCache) resolveAndCache(name string) []string {
entry, ok := n.cache[name]
if ok {
entry.timer.Stop()
}
delete(n.cache, name)
addrs, ttl, err := n.resolve(name)
// avoid excessive pushes due to small TTL
if ttl < MinGatewayTTL {
ttl = MinGatewayTTL
}
expiry := time.Now().Add(ttl)
if err != nil {
// gracefully retain old addresses in case the DNS server is unavailable
addrs = entry.value
}
n.cache[name] = nameCacheEntry{
value: addrs,
expiry: expiry,
// TTL expires, try to refresh TODO should this be < ttl?
timer: time.AfterFunc(ttl, n.refreshAndNotify(name)),
}
return addrs
}
// refreshAndNotify is triggered via time.AfterFunc and will recursively schedule itself that way until timer is cleaned
// up via cleanupWatches.
func (n *networkGatewayNameCache) refreshAndNotify(name string) func() {
return func() {
log.Debugf("network gateways: refreshing DNS for %s", name)
n.Lock()
old := n.cache[name]
addrs := n.resolveAndCache(name)
n.Unlock()
if !slices.Equal(old.value, addrs) {
log.Debugf("network gateways: DNS for %s changed: %v -> %v", name, old.value, addrs)
n.NotifyGatewayHandlers()
}
}
}
// resolve gets all the A and AAAA records for the given name
func (n *networkGatewayNameCache) resolve(name string) ([]string, time.Duration, error) {
ttl := MaxGatewayTTL
var out []string
errs := istiomultierror.New()
var mu sync.Mutex
var wg sync.WaitGroup
doResolve := func(dnsType uint16) {
defer wg.Done()
res := n.client.Query(new(dns.Msg).SetQuestion(dns.Fqdn(name), dnsType))
mu.Lock()
defer mu.Unlock()
if res.Rcode == dns.RcodeServerFailure {
errs = multierror.Append(errs, fmt.Errorf("upstream dns failure, qtype: %v", dnsType))
return
}
for _, rr := range res.Answer {
switch record := rr.(type) {
case *dns.A:
out = append(out, record.A.String())
case *dns.AAAA:
out = append(out, record.AAAA.String())
}
}
if nextTTL := minimalTTL(res); nextTTL < ttl {
ttl = nextTTL
}
}
wg.Add(2)
go doResolve(dns.TypeA)
go doResolve(dns.TypeAAAA)
wg.Wait()
sort.Strings(out)
if errs.Len() == 2 {
// return error only if all requests are failed
return out, MinGatewayTTL, errs
}
return out, ttl, nil
}
// https://github.com/coredns/coredns/blob/v1.10.1/plugin/pkg/dnsutil/ttl.go
func minimalTTL(m *dns.Msg) time.Duration {
// No records or OPT is the only record, return a short ttl as a fail safe.
if len(m.Answer)+len(m.Ns) == 0 &&
(len(m.Extra) == 0 || (len(m.Extra) == 1 && m.Extra[0].Header().Rrtype == dns.TypeOPT)) {
return MinGatewayTTL
}
minTTL := MaxGatewayTTL
for _, r := range m.Answer {
if r.Header().Ttl < uint32(minTTL.Seconds()) {
minTTL = time.Duration(r.Header().Ttl) * time.Second
}
}
for _, r := range m.Ns {
if r.Header().Ttl < uint32(minTTL.Seconds()) {
minTTL = time.Duration(r.Header().Ttl) * time.Second
}
}
for _, r := range m.Extra {
if r.Header().Rrtype == dns.TypeOPT {
// OPT records use TTL field for extended rcode and flags
continue
}
if r.Header().Ttl < uint32(minTTL.Seconds()) {
minTTL = time.Duration(r.Header().Ttl) * time.Second
}
}
return minTTL
}
// TODO share code with pkg/dns
type dnsClient struct {
*dns.Client
resolvConfServers []string
}
// NetworkGatewayTestDNSServers if set will ignore resolv.conf and use the given DNS servers for tests.
var NetworkGatewayTestDNSServers []string
func newClient() (*dnsClient, error) {
servers := NetworkGatewayTestDNSServers
if len(servers) == 0 {
dnsConfig, err := dns.ClientConfigFromFile("/etc/resolv.conf")
if err != nil {
return nil, err
}
if dnsConfig != nil {
for _, s := range dnsConfig.Servers {
servers = append(servers, net.JoinHostPort(s, dnsConfig.Port))
}
}
// TODO take search namespaces into account
// TODO what about /etc/hosts?
}
c := &dnsClient{
Client: &dns.Client{
DialTimeout: 5 * time.Second,
ReadTimeout: 5 * time.Second,
WriteTimeout: 5 * time.Second,
},
}
c.resolvConfServers = append(c.resolvConfServers, servers...)
return c, nil
}
// for more informative logging of dns errors
func getReqNames(req *dns.Msg) []string {
names := make([]string, 0, 1)
for _, qq := range req.Question {
names = append(names, qq.Name)
}
return names
}
func (c *dnsClient) Query(req *dns.Msg) *dns.Msg {
var response *dns.Msg
for _, upstream := range c.resolvConfServers {
cResponse, _, err := c.Exchange(req, upstream)
rcode := dns.RcodeServerFailure
if err == nil && cResponse != nil {
rcode = cResponse.Rcode
}
if rcode == dns.RcodeServerFailure {
// RcodeServerFailure means the upstream cannot serve the request
// https://github.com/coredns/coredns/blob/v1.10.1/plugin/forward/forward.go#L193
log.Infof("upstream dns failure: %v: %v: %v", upstream, getReqNames(req), err)
continue
}
response = cResponse
if rcode == dns.RcodeSuccess {
break
}
codeString := dns.RcodeToString[rcode]
log.Debugf("upstream dns error: %v: %v: %v", upstream, getReqNames(req), codeString)
}
if response == nil {
response = new(dns.Msg)
response.SetReply(req)
response.Rcode = dns.RcodeServerFailure
}
return response
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"istio.io/api/type/v1beta1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/gvk"
)
type policyTargetGetter interface {
GetTargetRef() *v1beta1.PolicyTargetReference
GetSelector() *v1beta1.WorkloadSelector
}
type WorkloadSelectionOpts struct {
RootNamespace string
Namespace string
WorkloadLabels labels.Instance
IsWaypoint bool
}
type policyMatch string
const (
// policyMatchSelector is the default behavior. If the workload matches the policy's selector, the policy is applied
policyMatchSelector policyMatch = "selector"
// policyMatchDirect is used when the policy has a targetRef, and the workload matches the targetRef.
// Note that the actual targetRef matching is done within `getPolicyMatcher`
policyMatchDirect policyMatch = "direct"
// policyMatchIgnore indicates that there is no match between the workload and the policy, and the policy should be ignored
policyMatchIgnore policyMatch = "ignore"
)
func KubernetesGatewayNameAndExists(l labels.Instance) (string, bool) {
gwName, exists := l[constants.GatewayNameLabel]
if !exists {
// TODO: Remove deprecated gateway name label (1.22 or 1.23)
gwName, exists = l[constants.DeprecatedGatewayNameLabel]
}
return gwName, exists
}
func getPolicyMatcher(kind config.GroupVersionKind, policyName string, opts WorkloadSelectionOpts, policy policyTargetGetter) policyMatch {
gatewayName, isGatewayAPI := KubernetesGatewayNameAndExists(opts.WorkloadLabels)
targetRef := policy.GetTargetRef()
if isGatewayAPI && targetRef == nil && policy.GetSelector() != nil {
if opts.IsWaypoint || !features.EnableSelectorBasedK8sGatewayPolicy {
log.Debugf("Ignoring workload-scoped %s/%s %s.%s for gateway %s because it has no targetRef", kind.Group, kind.Kind, opts.Namespace, policyName, gatewayName)
return policyMatchIgnore
}
}
if !isGatewayAPI && targetRef != nil {
return policyMatchIgnore
}
if isGatewayAPI && targetRef != nil {
// There's a targetRef specified for this RA, and the proxy is a Gateway API Gateway. Use targetRef instead of workload selector
// TODO: Account for `kind`s that are not `KubernetesGateway`
if targetRef.GetGroup() == gvk.KubernetesGateway.Group &&
targetRef.GetName() == gatewayName &&
(targetRef.GetNamespace() == "" || targetRef.GetNamespace() == opts.Namespace) &&
targetRef.GetKind() == gvk.KubernetesGateway.Kind {
return policyMatchDirect
}
// This config doesn't match this workload. Ignore
return policyMatchIgnore
}
// Default case
return policyMatchSelector
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"istio.io/api/annotation"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/api/networking/v1beta1"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/util/protomarshal"
)
// ProxyConfigs organizes ProxyConfig configuration by namespace.
type ProxyConfigs struct {
// namespaceToProxyConfigs
namespaceToProxyConfigs map[string][]*v1beta1.ProxyConfig
// root namespace
rootNamespace string
}
// EffectiveProxyConfig generates the correct merged ProxyConfig for a given ProxyConfigTarget.
func (p *ProxyConfigs) EffectiveProxyConfig(meta *NodeMetadata, mc *meshconfig.MeshConfig) *meshconfig.ProxyConfig {
if p == nil || meta == nil {
return nil
}
effectiveProxyConfig := mesh.DefaultProxyConfig()
// Merge the proxy config from default config.
effectiveProxyConfig = mergeWithPrecedence(mc.GetDefaultConfig(), effectiveProxyConfig)
if p.rootNamespace != "" {
effectiveProxyConfig = mergeWithPrecedence(p.mergedGlobalConfig(), effectiveProxyConfig)
}
if meta.Namespace != p.rootNamespace {
namespacedConfig := p.mergedNamespaceConfig(meta.Namespace)
effectiveProxyConfig = mergeWithPrecedence(namespacedConfig, effectiveProxyConfig)
}
workloadConfig := p.mergedWorkloadConfig(meta.Namespace, meta.Labels)
// Check for proxy.istio.io/config annotation and merge it with lower priority than the
// workload-matching ProxyConfig CRs.
if v, ok := meta.Annotations[annotation.ProxyConfig.Name]; ok {
pca, err := proxyConfigFromAnnotation(v)
if err == nil {
workloadConfig = mergeWithPrecedence(workloadConfig, pca)
}
}
effectiveProxyConfig = mergeWithPrecedence(workloadConfig, effectiveProxyConfig)
return effectiveProxyConfig
}
func GetProxyConfigs(store ConfigStore, mc *meshconfig.MeshConfig) *ProxyConfigs {
proxyconfigs := &ProxyConfigs{
namespaceToProxyConfigs: map[string][]*v1beta1.ProxyConfig{},
rootNamespace: mc.GetRootNamespace(),
}
resources := store.List(gvk.ProxyConfig, NamespaceAll)
sortConfigByCreationTime(resources)
ns := proxyconfigs.namespaceToProxyConfigs
for _, resource := range resources {
ns[resource.Namespace] = append(ns[resource.Namespace], resource.Spec.(*v1beta1.ProxyConfig))
}
return proxyconfigs
}
func (p *ProxyConfigs) mergedGlobalConfig() *meshconfig.ProxyConfig {
return p.mergedNamespaceConfig(p.rootNamespace)
}
// mergedNamespaceConfig merges ProxyConfig resources matching the given namespace.
func (p *ProxyConfigs) mergedNamespaceConfig(namespace string) *meshconfig.ProxyConfig {
for _, pc := range p.namespaceToProxyConfigs[namespace] {
if pc.GetSelector() == nil {
// return the first match. this is consistent since
// we sort the resources by creation time beforehand.
return toMeshConfigProxyConfig(pc)
}
}
return nil
}
// mergedWorkloadConfig merges ProxyConfig resources matching the given namespace and labels.
func (p *ProxyConfigs) mergedWorkloadConfig(namespace string, l map[string]string) *meshconfig.ProxyConfig {
for _, pc := range p.namespaceToProxyConfigs[namespace] {
if len(pc.GetSelector().GetMatchLabels()) == 0 {
continue
}
selector := labels.Instance(pc.GetSelector().GetMatchLabels())
if selector.SubsetOf(l) {
// return the first match. this is consistent since
// we sort the resources by creation time beforehand.
return toMeshConfigProxyConfig(pc)
}
}
return nil
}
// mergeWithPrecedence merges the ProxyConfigs together with earlier items having
// the highest priority.
func mergeWithPrecedence(pcs ...*meshconfig.ProxyConfig) *meshconfig.ProxyConfig {
merged := &meshconfig.ProxyConfig{}
for i := len(pcs) - 1; i >= 0; i-- {
if pcs[i] == nil {
continue
}
proxyConfigYaml, err := protomarshal.ToYAML(pcs[i])
if err != nil {
continue
}
mergedConfig, err := mesh.MergeProxyConfig(proxyConfigYaml, merged)
if err == nil {
continue
}
merged = mergedConfig
}
return merged
}
func toMeshConfigProxyConfig(pc *v1beta1.ProxyConfig) *meshconfig.ProxyConfig {
mcpc := &meshconfig.ProxyConfig{}
if pc.Concurrency != nil {
mcpc.Concurrency = pc.Concurrency
}
if pc.EnvironmentVariables != nil {
mcpc.ProxyMetadata = pc.EnvironmentVariables
}
if pc.Image != nil {
mcpc.Image = pc.Image
}
return mcpc
}
func proxyConfigFromAnnotation(pcAnnotation string) (*meshconfig.ProxyConfig, error) {
pc := &meshconfig.ProxyConfig{}
if err := protomarshal.ApplyYAML(pcAnnotation, pc); err != nil {
return nil, err
}
return pc, nil
}
// Copyright Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strings"
"istio.io/istio/pkg/util/identifier"
"istio.io/istio/pkg/util/sets"
)
// ProxyView provides a restricted view of mesh endpoints for a Proxy.
type ProxyView interface {
fmt.Stringer
IsVisible(ep *IstioEndpoint) bool
}
// ProxyViewAll is a ProxyView where all endpoints are visible.
var ProxyViewAll ProxyView = proxyViewAll{}
type proxyViewAll struct{}
func (v proxyViewAll) IsVisible(*IstioEndpoint) bool {
return true
}
func (v proxyViewAll) String() string {
return ""
}
func newProxyView(node *Proxy) ProxyView {
if node == nil || node.Metadata == nil || len(node.Metadata.RequestedNetworkView) == 0 {
return ProxyViewAll
}
// Restrict the view to the requested networks.
return &proxyViewImpl{
visible: sets.New(node.Metadata.RequestedNetworkView...).Insert(identifier.Undefined),
getValue: func(ep *IstioEndpoint) string {
return ep.Network.String()
},
}
}
type proxyViewImpl struct {
visible sets.String
getValue func(ep *IstioEndpoint) string
}
func (v *proxyViewImpl) IsVisible(ep *IstioEndpoint) bool {
return v.visible.Contains(v.getValue(ep))
}
func (v *proxyViewImpl) String() string {
return strings.Join(sets.SortedList(v.visible), ",")
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"cmp"
"encoding/json"
"fmt"
"math"
"net/netip"
"sort"
"strings"
"sync"
"time"
"go.uber.org/atomic"
"k8s.io/apimachinery/pkg/types"
extensions "istio.io/api/extensions/v1alpha1"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/api/security/v1beta1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/config/security"
"istio.io/istio/pkg/config/visibility"
"istio.io/istio/pkg/jwt"
"istio.io/istio/pkg/monitoring"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/spiffe"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/workloadapi"
)
// Metrics is an interface for capturing metrics on a per-node basis.
type Metrics interface {
// AddMetric will add an case to the metric for the given node.
AddMetric(metric monitoring.Metric, key string, proxyID, msg string)
}
var _ Metrics = &PushContext{}
// serviceIndex is an index of all services by various fields for easy access during push.
type serviceIndex struct {
// privateByNamespace are services that can reachable within the same namespace, with exportTo "."
privateByNamespace map[string][]*Service
// public are services reachable within the mesh with exportTo "*"
public []*Service
// exportedToNamespace are services that were made visible to this namespace
// by an exportTo explicitly specifying this namespace.
exportedToNamespace map[string][]*Service
// HostnameAndNamespace has all services, indexed by hostname then namespace.
HostnameAndNamespace map[host.Name]map[string]*Service `json:"-"`
// instancesByPort contains a map of service key and instances by port. It is stored here
// to avoid recomputations during push. This caches instanceByPort calls with empty labels.
// Call InstancesByPort directly when instances need to be filtered by actual labels.
instancesByPort map[string]map[int][]*IstioEndpoint
}
func newServiceIndex() serviceIndex {
return serviceIndex{
public: []*Service{},
privateByNamespace: map[string][]*Service{},
exportedToNamespace: map[string][]*Service{},
HostnameAndNamespace: map[host.Name]map[string]*Service{},
instancesByPort: map[string]map[int][]*IstioEndpoint{},
}
}
// exportToDefaults contains the default exportTo values.
type exportToDefaults struct {
service sets.Set[visibility.Instance]
virtualService sets.Set[visibility.Instance]
destinationRule sets.Set[visibility.Instance]
}
// virtualServiceIndex is the index of virtual services by various fields.
type virtualServiceIndex struct {
exportedToNamespaceByGateway map[types.NamespacedName][]config.Config
// this contains all the virtual services with exportTo "." and current namespace. The keys are namespace,gateway.
privateByNamespaceAndGateway map[types.NamespacedName][]config.Config
// This contains all virtual services whose exportTo is "*", keyed by gateway
publicByGateway map[string][]config.Config
// root vs namespace/name ->delegate vs virtualservice gvk/namespace/name
delegates map[ConfigKey][]ConfigKey
// This contains destination hosts of virtual services, keyed by gateway's namespace/name,
// only used when PILOT_FILTER_GATEWAY_CLUSTER_CONFIG is enabled
destinationsByGateway map[string]sets.String
// Map of VS hostname -> referenced hostnames
referencedDestinations map[string]sets.String
}
func newVirtualServiceIndex() virtualServiceIndex {
out := virtualServiceIndex{
publicByGateway: map[string][]config.Config{},
privateByNamespaceAndGateway: map[types.NamespacedName][]config.Config{},
exportedToNamespaceByGateway: map[types.NamespacedName][]config.Config{},
delegates: map[ConfigKey][]ConfigKey{},
referencedDestinations: map[string]sets.String{},
}
if features.FilterGatewayClusterConfig {
out.destinationsByGateway = make(map[string]sets.String)
}
return out
}
// destinationRuleIndex is the index of destination rules by various fields.
type destinationRuleIndex struct {
// namespaceLocal contains all public/private dest rules pertaining to a service defined in a given namespace.
namespaceLocal map[string]*consolidatedDestRules
// exportedByNamespace contains all dest rules pertaining to a service exported by a namespace.
exportedByNamespace map[string]*consolidatedDestRules
rootNamespaceLocal *consolidatedDestRules
}
func newDestinationRuleIndex() destinationRuleIndex {
return destinationRuleIndex{
namespaceLocal: map[string]*consolidatedDestRules{},
exportedByNamespace: map[string]*consolidatedDestRules{},
}
}
// sidecarIndex is the index of sidecar rules
type sidecarIndex struct {
// user configured sidecars for each namespace if available.
sidecarsByNamespace map[string][]*SidecarScope
// the Sidecar for the root namespace (if present). This applies to any namespace without its own Sidecar.
meshRootSidecarConfig *config.Config
// meshRootSidecarsByNamespace contains the default sidecar for namespaces that do not have a sidecar.
// These are converted from root namespace sidecar if it exists.
// These are lazy-loaded. Access protected by derivedSidecarMutex.
meshRootSidecarsByNamespace map[string]*SidecarScope
// defaultSidecarsByNamespace contains the default sidecar for namespaces that do not have a sidecar,
// These are *always* computed from DefaultSidecarScopeForNamespace i.e. a sidecar that has listeners
// for all services in the mesh. This will be used if there is no sidecar specified in root namespace.
// These are lazy-loaded. Access protected by derivedSidecarMutex.
defaultSidecarsByNamespace map[string]*SidecarScope
// mutex to protect derived sidecars i.e. not specified by user.
derivedSidecarMutex *sync.RWMutex
}
func newSidecarIndex() sidecarIndex {
return sidecarIndex{
sidecarsByNamespace: map[string][]*SidecarScope{},
meshRootSidecarsByNamespace: map[string]*SidecarScope{},
defaultSidecarsByNamespace: map[string]*SidecarScope{},
derivedSidecarMutex: &sync.RWMutex{},
}
}
// gatewayIndex is the index of gateways by various fields.
type gatewayIndex struct {
// namespace contains gateways by namespace.
namespace map[string][]config.Config
// all contains all gateways.
all []config.Config
}
func newGatewayIndex() gatewayIndex {
return gatewayIndex{
namespace: map[string][]config.Config{},
all: []config.Config{},
}
}
type serviceAccountKey struct {
hostname host.Name
namespace string
}
// PushContext tracks the status of a push - metrics and errors.
// Metrics are reset after a push - at the beginning all
// values are zero, and when push completes the status is reset.
// The struct is exposed in a debug endpoint - fields public to allow
// easy serialization as json.
type PushContext struct {
proxyStatusMutex sync.RWMutex
// ProxyStatus is keyed by the error code, and holds a map keyed
// by the ID.
ProxyStatus map[string]map[string]ProxyPushStatus
// Synthesized from env.Mesh
exportToDefaults exportToDefaults
// ServiceIndex is the index of services by various fields.
ServiceIndex serviceIndex
// serviceAccounts contains a map of hostname and port to service accounts.
serviceAccounts map[serviceAccountKey][]string
// virtualServiceIndex is the index of virtual services by various fields.
virtualServiceIndex virtualServiceIndex
// destinationRuleIndex is the index of destination rules by various fields.
destinationRuleIndex destinationRuleIndex
// gatewayIndex is the index of gateways.
gatewayIndex gatewayIndex
// clusterLocalHosts extracted from the MeshConfig
clusterLocalHosts ClusterLocalHosts
// sidecarIndex stores sidecar resources
sidecarIndex sidecarIndex
// envoy filters for each namespace including global config namespace
envoyFiltersByNamespace map[string][]*EnvoyFilterWrapper
// wasm plugins for each namespace including global config namespace
wasmPluginsByNamespace map[string][]*WasmPluginWrapper
// AuthnPolicies contains Authn policies by namespace.
AuthnPolicies *AuthenticationPolicies `json:"-"`
// AuthzPolicies stores the existing authorization policies in the cluster. Could be nil if there
// are no authorization policies in the cluster.
AuthzPolicies *AuthorizationPolicies `json:"-"`
// Telemetry stores the existing Telemetry resources for the cluster.
Telemetry *Telemetries `json:"-"`
// ProxyConfig stores the existing ProxyConfig resources for the cluster.
ProxyConfigs *ProxyConfigs `json:"-"`
// The following data is either a global index or used in the inbound path.
// Namespace specific views do not apply here.
// Mesh configuration for the mesh.
Mesh *meshconfig.MeshConfig `json:"-"`
// PushVersion describes the push version this push context was computed for
PushVersion string
// LedgerVersion is the version of the configuration ledger
LedgerVersion string
// JwtKeyResolver holds a reference to the JWT key resolver instance.
JwtKeyResolver *JwksResolver
// GatewayAPIController holds a reference to the gateway API controller.
GatewayAPIController GatewayController
// cache gateways addresses for each network
// this is mainly used for kubernetes multi-cluster scenario
networkMgr *NetworkManager
Networks *meshconfig.MeshNetworks
InitDone atomic.Bool
initializeMutex sync.Mutex
ambientIndex AmbientIndexes
}
type consolidatedDestRules struct {
// Map of dest rule host to the list of namespaces to which this destination rule has been exported to
exportTo map[host.Name]sets.Set[visibility.Instance]
// Map of dest rule host and the merged destination rules for that host.
// Only stores specific non-wildcard destination rules
specificDestRules map[host.Name][]*ConsolidatedDestRule
// Map of dest rule host and the merged destination rules for that host.
// Only stores wildcard destination rules
wildcardDestRules map[host.Name][]*ConsolidatedDestRule
}
// ConsolidatedDestRule represents a dr and from which it is consolidated.
type ConsolidatedDestRule struct {
// rule is merged from the following destinationRules.
rule *config.Config
// the original dest rules from which above rule is merged.
from []types.NamespacedName
}
// XDSUpdater is used for direct updates of the xDS model and incremental push.
// Pilot uses multiple registries - for example each K8S cluster is a registry
// instance. Each registry is responsible for tracking a set
// of endpoints associated with mesh services, and calling the EDSUpdate on changes.
// A registry may group endpoints for a service in smaller subsets - for example by
// deployment, or to deal with very large number of endpoints for a service. We want
// to avoid passing around large objects - like full list of endpoints for a registry,
// or the full list of endpoints for a service across registries, since it limits
// scalability.
//
// Future optimizations will include grouping the endpoints by labels, gateway or region to
// reduce the time when subsetting or split-horizon is used. This design assumes pilot
// tracks all endpoints in the mesh and they fit in RAM - so limit is few M endpoints.
// It is possible to split the endpoint tracking in future.
type XDSUpdater interface {
// EDSUpdate is called when the list of endpoints or labels in a Service is changed.
// For each cluster and hostname, the full list of active endpoints (including empty list)
// must be sent. The shard name is used as a key - current implementation is using the
// registry name.
EDSUpdate(shard ShardKey, hostname string, namespace string, entry []*IstioEndpoint)
// EDSCacheUpdate is called when the list of endpoints or labels in a Service is changed.
// For each cluster and hostname, the full list of active endpoints (including empty list)
// must be sent. The shard name is used as a key - current implementation is using the
// registry name.
// Note: the difference with `EDSUpdate` is that it only update the cache rather than requesting a push
EDSCacheUpdate(shard ShardKey, hostname string, namespace string, entry []*IstioEndpoint)
// SvcUpdate is called when a service definition is updated/deleted.
SvcUpdate(shard ShardKey, hostname string, namespace string, event Event)
// ConfigUpdate is called to notify the XDS server of config updates and request a push.
// The requests may be collapsed and throttled.
ConfigUpdate(req *PushRequest)
// ProxyUpdate is called to notify the XDS server to send a push to the specified proxy.
// The requests may be collapsed and throttled.
ProxyUpdate(clusterID cluster.ID, ip string)
// RemoveShard removes all endpoints for the given shard key
RemoveShard(shardKey ShardKey)
}
// PushRequest defines a request to push to proxies
// It is used to send updates to the config update debouncer and pass to the PushQueue.
type PushRequest struct {
// Full determines whether a full push is required or not. If false, an incremental update will be sent.
// Incremental pushes:
// * Do not recompute the push context
// * Do not recompute proxy state (such as ServiceInstances)
// * Are not reported in standard metrics such as push time
// As a result, configuration updates should never be incremental. Generally, only EDS will set this, but
// in the future SDS will as well.
Full bool
// ConfigsUpdated keeps track of configs that have changed.
// This is used as an optimization to avoid unnecessary pushes to proxies that are scoped with a Sidecar.
// If this is empty, then all proxies will get an update.
// Otherwise only proxies depend on these configs will get an update.
// The kind of resources are defined in pkg/config/schemas.
ConfigsUpdated sets.Set[ConfigKey]
// Push stores the push context to use for the update. This may initially be nil, as we will
// debounce changes before a PushContext is eventually created.
Push *PushContext
// Start represents the time a push was started. This represents the time of adding to the PushQueue.
// Note that this does not include time spent debouncing.
Start time.Time
// Reason represents the reason for requesting a push. This should only be a fixed set of values,
// to avoid unbounded cardinality in metrics. If this is not set, it may be automatically filled in later.
// There should only be multiple reasons if the push request is the result of two distinct triggers, rather than
// classifying a single trigger as having multiple reasons.
Reason ReasonStats
// Delta defines the resources that were added or removed as part of this push request.
// This is set only on requests from the client which change the set of resources they (un)subscribe from.
Delta ResourceDelta
}
// ResourceDelta records the difference in requested resources by an XDS client
type ResourceDelta struct {
// Subscribed indicates the client requested these additional resources
Subscribed sets.String
// Unsubscribed indicates the client no longer requires these resources
Unsubscribed sets.String
}
func (rd ResourceDelta) IsEmpty() bool {
return len(rd.Subscribed) == 0 && len(rd.Unsubscribed) == 0
}
type ReasonStats map[TriggerReason]int
func NewReasonStats(reasons ...TriggerReason) ReasonStats {
ret := make(ReasonStats)
for _, reason := range reasons {
ret.Add(reason)
}
return ret
}
func (r ReasonStats) Add(reason TriggerReason) {
r[reason]++
}
func (r ReasonStats) Merge(other ReasonStats) {
for reason, count := range other {
r[reason] += count
}
}
func (r ReasonStats) CopyMerge(other ReasonStats) ReasonStats {
if len(r) == 0 {
return other
}
if len(other) == 0 {
return r
}
merged := make(ReasonStats, len(r)+len(other))
merged.Merge(r)
merged.Merge(other)
return merged
}
func (r ReasonStats) Count() int {
var ret int
for _, count := range r {
ret += count
}
return ret
}
func (r ReasonStats) Has(reason TriggerReason) bool {
return r[reason] > 0
}
type TriggerReason string
// If adding a new reason, update xds/monitoring.go:triggerMetric
const (
// EndpointUpdate describes a push triggered by an Endpoint change
EndpointUpdate TriggerReason = "endpoint"
// HeadlessEndpointUpdate describes a push triggered by an Endpoint change for headless service
HeadlessEndpointUpdate TriggerReason = "headlessendpoint"
// ConfigUpdate describes a push triggered by a config (generally and Istio CRD) change.
ConfigUpdate TriggerReason = "config"
// ServiceUpdate describes a push triggered by a Service change
ServiceUpdate TriggerReason = "service"
// ProxyUpdate describes a push triggered by a change to an individual proxy (such as label change)
ProxyUpdate TriggerReason = "proxy"
// GlobalUpdate describes a push triggered by a change to global config, such as mesh config
GlobalUpdate TriggerReason = "global"
// AmbientUpdate describes a push triggered by a change to ambient mesh config
AmbientUpdate TriggerReason = "ambient"
// UnknownTrigger describes a push triggered by an unknown reason
UnknownTrigger TriggerReason = "unknown"
// DebugTrigger describes a push triggered for debugging
DebugTrigger TriggerReason = "debug"
// SecretTrigger describes a push triggered for a Secret change
SecretTrigger TriggerReason = "secret"
// NetworksTrigger describes a push triggered for Networks change
NetworksTrigger TriggerReason = "networks"
// ProxyRequest describes a push triggered based on proxy request
ProxyRequest TriggerReason = "proxyrequest"
// NamespaceUpdate describes a push triggered by a Namespace change
NamespaceUpdate TriggerReason = "namespace"
// ClusterUpdate describes a push triggered by a Cluster change
ClusterUpdate TriggerReason = "cluster"
)
// Merge two update requests together
// Merge behaves similarly to a list append; usage should in the form `a = a.merge(b)`.
// Importantly, Merge may decide to allocate a new PushRequest object or reuse the existing one - both
// inputs should not be used after completion.
func (pr *PushRequest) Merge(other *PushRequest) *PushRequest {
if pr == nil {
return other
}
if other == nil {
return pr
}
// Keep the first (older) start time
// Merge the two reasons. Note that we shouldn't deduplicate here, or we would under count
if len(other.Reason) > 0 {
if pr.Reason == nil {
pr.Reason = make(map[TriggerReason]int)
}
pr.Reason.Merge(other.Reason)
}
// If either is full we need a full push
pr.Full = pr.Full || other.Full
// The other push context is presumed to be later and more up to date
if other.Push != nil {
pr.Push = other.Push
}
// Do not merge when any one is empty
if len(pr.ConfigsUpdated) == 0 || len(other.ConfigsUpdated) == 0 {
pr.ConfigsUpdated = nil
} else {
for conf := range other.ConfigsUpdated {
pr.ConfigsUpdated.Insert(conf)
}
}
return pr
}
// CopyMerge two update requests together. Unlike Merge, this will not mutate either input.
// This should be used when we are modifying a shared PushRequest (typically any time it's in the context
// of a single proxy)
func (pr *PushRequest) CopyMerge(other *PushRequest) *PushRequest {
if pr == nil {
return other
}
if other == nil {
return pr
}
var reason ReasonStats
if len(pr.Reason)+len(other.Reason) > 0 {
reason = make(ReasonStats)
reason.Merge(pr.Reason)
reason.Merge(other.Reason)
}
merged := &PushRequest{
// Keep the first (older) start time
Start: pr.Start,
// If either is full we need a full push
Full: pr.Full || other.Full,
// The other push context is presumed to be later and more up to date
Push: other.Push,
// Merge the two reasons. Note that we shouldn't deduplicate here, or we would under count
Reason: reason,
}
// Do not merge when any one is empty
if len(pr.ConfigsUpdated) > 0 && len(other.ConfigsUpdated) > 0 {
merged.ConfigsUpdated = make(sets.Set[ConfigKey], len(pr.ConfigsUpdated)+len(other.ConfigsUpdated))
merged.ConfigsUpdated.Merge(pr.ConfigsUpdated)
merged.ConfigsUpdated.Merge(other.ConfigsUpdated)
}
return merged
}
func (pr *PushRequest) IsRequest() bool {
return len(pr.Reason) == 1 && pr.Reason.Has(ProxyRequest)
}
func (pr *PushRequest) IsProxyUpdate() bool {
return pr.Reason.Has(ProxyUpdate)
}
func (pr *PushRequest) PushReason() string {
if pr.IsRequest() {
return " request"
}
return ""
}
// ProxyPushStatus represents an event captured during config push to proxies.
// It may contain additional message and the affected proxy.
type ProxyPushStatus struct {
Proxy string `json:"proxy,omitempty"`
Message string `json:"message,omitempty"`
}
// AddMetric will add an case to the metric.
func (ps *PushContext) AddMetric(metric monitoring.Metric, key string, proxyID, msg string) {
if ps == nil {
log.Infof("Metric without context %s %v %s", key, proxyID, msg)
return
}
ps.proxyStatusMutex.Lock()
defer ps.proxyStatusMutex.Unlock()
metricMap, f := ps.ProxyStatus[metric.Name()]
if !f {
metricMap = map[string]ProxyPushStatus{}
ps.ProxyStatus[metric.Name()] = metricMap
}
ev := ProxyPushStatus{Message: msg, Proxy: proxyID}
metricMap[key] = ev
}
var (
// EndpointNoPod tracks endpoints without an associated pod. This is an error condition, since
// we can't figure out the labels. It may be a transient problem, if endpoint is processed before
// pod.
EndpointNoPod = monitoring.NewGauge(
"endpoint_no_pod",
"Endpoints without an associated pod.",
)
// ProxyStatusNoService represents proxies not selected by any service
// This can be normal - for workloads that act only as client, or are not covered by a Service.
// It can also be an error, for example in cases the Endpoint list of a service was not updated by the time
// the sidecar calls.
// Updated by GetProxyServiceTargets
ProxyStatusNoService = monitoring.NewGauge(
"pilot_no_ip",
"Pods not found in the endpoint table, possibly invalid.",
)
// ProxyStatusEndpointNotReady represents proxies found not be ready.
// Updated by GetProxyServiceTargets. Normal condition when starting
// an app with readiness, error if it doesn't change to 0.
ProxyStatusEndpointNotReady = monitoring.NewGauge(
"pilot_endpoint_not_ready",
"Endpoint found in unready state.",
)
// ProxyStatusConflictOutboundListenerTCPOverTCP metric tracks number of
// TCP listeners that conflicted with existing TCP listeners on same port
ProxyStatusConflictOutboundListenerTCPOverTCP = monitoring.NewGauge(
"pilot_conflict_outbound_listener_tcp_over_current_tcp",
"Number of conflicting tcp listeners with current tcp listener.",
)
// ProxyStatusConflictInboundListener tracks cases of multiple inbound
// listeners - 2 services selecting the same port of the pod.
ProxyStatusConflictInboundListener = monitoring.NewGauge(
"pilot_conflict_inbound_listener",
"Number of conflicting inbound listeners.",
)
// DuplicatedClusters tracks duplicate clusters seen while computing CDS
DuplicatedClusters = monitoring.NewGauge(
"pilot_duplicate_envoy_clusters",
"Duplicate envoy clusters caused by service entries with same hostname",
)
// DNSNoEndpointClusters tracks dns clusters without endpoints
DNSNoEndpointClusters = monitoring.NewGauge(
"pilot_dns_cluster_without_endpoints",
"DNS clusters without endpoints caused by the endpoint field in "+
"STRICT_DNS type cluster is not set or the corresponding subset cannot select any endpoint",
)
// ProxyStatusClusterNoInstances tracks clusters (services) without workloads.
ProxyStatusClusterNoInstances = monitoring.NewGauge(
"pilot_eds_no_instances",
"Number of clusters without instances.",
)
// DuplicatedDomains tracks rejected VirtualServices due to duplicated hostname.
DuplicatedDomains = monitoring.NewGauge(
"pilot_vservice_dup_domain",
"Virtual services with dup domains.",
)
// DuplicatedSubsets tracks duplicate subsets that we rejected while merging multiple destination rules for same host
DuplicatedSubsets = monitoring.NewGauge(
"pilot_destrule_subsets",
"Duplicate subsets across destination rules for same host",
)
// totalVirtualServices tracks the total number of virtual service
totalVirtualServices = monitoring.NewGauge(
"pilot_virt_services",
"Total virtual services known to pilot.",
)
// LastPushStatus preserves the metrics and data collected during lasts global push.
// It can be used by debugging tools to inspect the push event. It will be reset after each push with the
// new version.
LastPushStatus *PushContext
// LastPushMutex will protect the LastPushStatus
LastPushMutex sync.Mutex
// All metrics we registered.
metrics = []monitoring.Metric{
DNSNoEndpointClusters,
EndpointNoPod,
ProxyStatusNoService,
ProxyStatusEndpointNotReady,
ProxyStatusConflictOutboundListenerTCPOverTCP,
ProxyStatusConflictInboundListener,
DuplicatedClusters,
ProxyStatusClusterNoInstances,
DuplicatedDomains,
DuplicatedSubsets,
}
)
// NewPushContext creates a new PushContext structure to track push status.
func NewPushContext() *PushContext {
return &PushContext{
ServiceIndex: newServiceIndex(),
virtualServiceIndex: newVirtualServiceIndex(),
destinationRuleIndex: newDestinationRuleIndex(),
sidecarIndex: newSidecarIndex(),
envoyFiltersByNamespace: map[string][]*EnvoyFilterWrapper{},
gatewayIndex: newGatewayIndex(),
ProxyStatus: map[string]map[string]ProxyPushStatus{},
serviceAccounts: map[serviceAccountKey][]string{},
}
}
// AddPublicServices adds the services to context public services - mainly used in tests.
func (ps *PushContext) AddPublicServices(services []*Service) {
ps.ServiceIndex.public = append(ps.ServiceIndex.public, services...)
}
// AddServiceInstances adds instances to the context service instances - mainly used in tests.
func (ps *PushContext) AddServiceInstances(service *Service, instances map[int][]*IstioEndpoint) {
svcKey := service.Key()
for port, inst := range instances {
if _, exists := ps.ServiceIndex.instancesByPort[svcKey]; !exists {
ps.ServiceIndex.instancesByPort[svcKey] = make(map[int][]*IstioEndpoint)
}
ps.ServiceIndex.instancesByPort[svcKey][port] = append(ps.ServiceIndex.instancesByPort[svcKey][port], inst...)
}
}
// StatusJSON implements json.Marshaller, with a lock.
func (ps *PushContext) StatusJSON() ([]byte, error) {
if ps == nil {
return []byte{'{', '}'}, nil
}
ps.proxyStatusMutex.RLock()
defer ps.proxyStatusMutex.RUnlock()
return json.MarshalIndent(ps.ProxyStatus, "", " ")
}
// OnConfigChange is called when a config change is detected.
func (ps *PushContext) OnConfigChange() {
LastPushMutex.Lock()
LastPushStatus = ps
LastPushMutex.Unlock()
ps.UpdateMetrics()
}
// UpdateMetrics will update the prometheus metrics based on the
// current status of the push.
func (ps *PushContext) UpdateMetrics() {
ps.proxyStatusMutex.RLock()
defer ps.proxyStatusMutex.RUnlock()
for _, pm := range metrics {
mmap := ps.ProxyStatus[pm.Name()]
pm.Record(float64(len(mmap)))
}
}
// It is called after virtual service short host name is resolved to FQDN
func virtualServiceDestinations(v *networking.VirtualService) map[string]sets.Set[int] {
if v == nil {
return nil
}
out := make(map[string]sets.Set[int])
addDestination := func(host string, port *networking.PortSelector) {
// Use the value 0 as a sentinel indicating that one of the destinations
// in the Virtual Service does not specify a port for this host.
pn := 0
if port != nil {
pn = int(port.Number)
}
sets.InsertOrNew(out, host, pn)
}
for _, h := range v.Http {
for _, r := range h.Route {
if r.Destination != nil {
addDestination(r.Destination.Host, r.Destination.GetPort())
}
}
if h.Mirror != nil {
addDestination(h.Mirror.Host, h.Mirror.GetPort())
}
for _, m := range h.Mirrors {
if m.Destination != nil {
addDestination(m.Destination.Host, m.Destination.GetPort())
}
}
}
for _, t := range v.Tcp {
for _, r := range t.Route {
if r.Destination != nil {
addDestination(r.Destination.Host, r.Destination.GetPort())
}
}
}
for _, t := range v.Tls {
for _, r := range t.Route {
if r.Destination != nil {
addDestination(r.Destination.Host, r.Destination.GetPort())
}
}
}
return out
}
// GatewayServices returns the set of services which are referred from the proxy gateways.
func (ps *PushContext) GatewayServices(proxy *Proxy) []*Service {
svcs := proxy.SidecarScope.services
// MergedGateway will be nil when there are no configs in the
// system during initial installation.
if proxy.MergedGateway == nil {
return nil
}
// host set.
hostsFromGateways := ps.extraGatewayServices(proxy)
for _, gw := range proxy.MergedGateway.GatewayNameForServer {
hostsFromGateways.Merge(ps.virtualServiceIndex.destinationsByGateway[gw])
}
log.Debugf("GatewayServices: gateway %v is exposing these hosts:%v", proxy.ID, hostsFromGateways)
gwSvcs := make([]*Service, 0, len(svcs))
for _, s := range svcs {
svcHost := string(s.Hostname)
if _, ok := hostsFromGateways[svcHost]; ok {
gwSvcs = append(gwSvcs, s)
}
}
log.Debugf("GatewayServices: gateways len(services)=%d, len(filtered)=%d", len(svcs), len(gwSvcs))
return gwSvcs
}
func (ps *PushContext) ServicesAttachedToMesh() map[string]sets.String {
return ps.virtualServiceIndex.referencedDestinations
}
func (ps *PushContext) ServiceAttachedToGateway(hostname string, proxy *Proxy) bool {
gw := proxy.MergedGateway
// MergedGateway will be nil when there are no configs in the
// system during initial installation.
if gw == nil {
return false
}
if gw.ContainsAutoPassthroughGateways {
return true
}
for _, g := range gw.GatewayNameForServer {
if hosts := ps.virtualServiceIndex.destinationsByGateway[g]; hosts != nil {
if hosts.Contains(hostname) {
return true
}
}
}
return ps.extraGatewayServices(proxy).Contains(hostname)
}
// wellknownProviders is a list of all known providers.
// This exists
var wellknownProviders = sets.New(
"envoy_ext_authz_http",
"envoy_ext_authz_grpc",
"zipkin",
"lightstep",
"datadog",
"opencensus",
"stackdriver",
"prometheus",
"skywalking",
"envoy_http_als",
"envoy_tcp_als",
"envoy_otel_als",
"opentelemetry",
"envoy_file_access_log",
)
func AssertProvidersHandled(expected int) {
if expected != len(wellknownProviders) {
panic(fmt.Sprintf("Not all providers handled; This function handles %v but there are %v known providers", expected, len(wellknownProviders)))
}
}
// addHostsFromMeshConfigProvidersHandled contains the number of providers we handle below.
// This is to ensure this stays in sync as new handlers are added
// STOP. DO NOT UPDATE THIS WITHOUT UPDATING extraGatewayServices.
const addHostsFromMeshConfigProvidersHandled = 14
// extraGatewayServices returns a subset of services referred from the proxy gateways, including:
// 1. MeshConfig.ExtensionProviders
// 2. RequestAuthentication.JwtRules.JwksUri
// TODO: include cluster from EnvoyFilter such as global ratelimit [demo](https://istio.io/latest/docs/tasks/policy-enforcement/rate-limit/#global-rate-limit)
func (ps *PushContext) extraGatewayServices(proxy *Proxy) sets.String {
hosts := sets.String{}
// add services from MeshConfig.ExtensionProviders
for _, prov := range ps.Mesh.ExtensionProviders {
switch p := prov.Provider.(type) {
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyExtAuthzHttp:
hosts.Insert(p.EnvoyExtAuthzHttp.Service)
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyExtAuthzGrpc:
hosts.Insert(p.EnvoyExtAuthzGrpc.Service)
case *meshconfig.MeshConfig_ExtensionProvider_Zipkin:
hosts.Insert(p.Zipkin.Service)
//nolint: staticcheck // Lightstep deprecated
case *meshconfig.MeshConfig_ExtensionProvider_Lightstep:
hosts.Insert(p.Lightstep.Service)
case *meshconfig.MeshConfig_ExtensionProvider_Datadog:
hosts.Insert(p.Datadog.Service)
case *meshconfig.MeshConfig_ExtensionProvider_Skywalking:
hosts.Insert(p.Skywalking.Service)
case *meshconfig.MeshConfig_ExtensionProvider_Opencensus:
//nolint: staticcheck
hosts.Insert(p.Opencensus.Service)
case *meshconfig.MeshConfig_ExtensionProvider_Opentelemetry:
hosts.Insert(p.Opentelemetry.Service)
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyHttpAls:
hosts.Insert(p.EnvoyHttpAls.Service)
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyTcpAls:
hosts.Insert(p.EnvoyTcpAls.Service)
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyOtelAls:
hosts.Insert(p.EnvoyOtelAls.Service)
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLog: // No services
case *meshconfig.MeshConfig_ExtensionProvider_Prometheus: // No services
case *meshconfig.MeshConfig_ExtensionProvider_Stackdriver: // No services
}
}
// add services from RequestAuthentication.JwtRules.JwksUri
if features.JwksFetchMode != jwt.Istiod {
jwtPolicies := ps.AuthnPolicies.GetJwtPoliciesForWorkload(proxy.Metadata.Namespace, proxy.Labels, false)
for _, cfg := range jwtPolicies {
rules := cfg.Spec.(*v1beta1.RequestAuthentication).JwtRules
for _, r := range rules {
if uri := r.GetJwksUri(); len(uri) > 0 {
jwksInfo, err := security.ParseJwksURI(uri)
if err == nil {
hosts.Insert(jwksInfo.Hostname.String())
}
}
}
}
}
return hosts
}
// servicesExportedToNamespace returns the list of services that are visible to a namespace.
// namespace "" indicates all namespaces
func (ps *PushContext) servicesExportedToNamespace(ns string) []*Service {
var out []*Service
// First add private services and explicitly exportedTo services
if ns == NamespaceAll {
out = make([]*Service, 0, len(ps.ServiceIndex.privateByNamespace)+len(ps.ServiceIndex.public))
for _, privateServices := range ps.ServiceIndex.privateByNamespace {
out = append(out, privateServices...)
}
} else {
out = make([]*Service, 0, len(ps.ServiceIndex.privateByNamespace[ns])+
len(ps.ServiceIndex.exportedToNamespace[ns])+len(ps.ServiceIndex.public))
out = append(out, ps.ServiceIndex.privateByNamespace[ns]...)
out = append(out, ps.ServiceIndex.exportedToNamespace[ns]...)
}
// Second add public services
out = append(out, ps.ServiceIndex.public...)
return out
}
// GetAllServices returns the total services within the mesh.
// Note: per proxy services should use SidecarScope.Services.
func (ps *PushContext) GetAllServices() []*Service {
return ps.servicesExportedToNamespace(NamespaceAll)
}
// ServiceForHostname returns the service associated with a given hostname following SidecarScope
func (ps *PushContext) ServiceForHostname(proxy *Proxy, hostname host.Name) *Service {
if proxy != nil && proxy.SidecarScope != nil {
return proxy.SidecarScope.servicesByHostname[hostname]
}
// SidecarScope shouldn't be null here. If it is, we can't disambiguate the hostname to use for a namespace,
// so the selection must be undefined.
for _, service := range ps.ServiceIndex.HostnameAndNamespace[hostname] {
return service
}
// No service found
return nil
}
// IsServiceVisible returns true if the input service is visible to the given namespace.
func (ps *PushContext) IsServiceVisible(service *Service, namespace string) bool {
if service == nil {
return false
}
ns := service.Attributes.Namespace
if service.Attributes.ExportTo.IsEmpty() {
if ps.exportToDefaults.service.Contains(visibility.Private) {
return ns == namespace
} else if ps.exportToDefaults.service.Contains(visibility.Public) {
return true
}
}
return service.Attributes.ExportTo.Contains(visibility.Public) ||
(service.Attributes.ExportTo.Contains(visibility.Private) && ns == namespace) ||
service.Attributes.ExportTo.Contains(visibility.Instance(namespace))
}
// VirtualServicesForGateway lists all virtual services bound to the specified gateways
// This replaces store.VirtualServices. Used only by the gateways
// Sidecars use the egressListener.VirtualServices().
//
// Note that for generating the imported virtual services of sidecar egress
// listener, we don't call this function to copy configs for performance issues.
// Instead, we pass the virtualServiceIndex directly into SelectVirtualServices
// function.
func (ps *PushContext) VirtualServicesForGateway(proxyNamespace, gateway string) []config.Config {
name := types.NamespacedName{
Namespace: proxyNamespace,
Name: gateway,
}
res := make([]config.Config, 0, len(ps.virtualServiceIndex.privateByNamespaceAndGateway[name])+
len(ps.virtualServiceIndex.exportedToNamespaceByGateway[name])+
len(ps.virtualServiceIndex.publicByGateway[gateway]))
res = append(res, ps.virtualServiceIndex.privateByNamespaceAndGateway[name]...)
res = append(res, ps.virtualServiceIndex.exportedToNamespaceByGateway[name]...)
// Favor same-namespace Gateway routes, to give the "consumer override" preference.
// We do 2 iterations here to avoid extra allocations.
for _, vs := range ps.virtualServiceIndex.publicByGateway[gateway] {
if UseGatewaySemantics(vs) && vs.Namespace == proxyNamespace {
res = append(res, vs)
}
}
for _, vs := range ps.virtualServiceIndex.publicByGateway[gateway] {
if !(UseGatewaySemantics(vs) && vs.Namespace == proxyNamespace) {
res = append(res, vs)
}
}
return res
}
// DelegateVirtualServices lists all the delegate virtual services configkeys associated with the provided virtual services
func (ps *PushContext) DelegateVirtualServices(vses []config.Config) []ConfigHash {
var out []ConfigHash
for _, vs := range vses {
for _, delegate := range ps.virtualServiceIndex.delegates[ConfigKey{Kind: kind.VirtualService, Namespace: vs.Namespace, Name: vs.Name}] {
out = append(out, delegate.HashCode())
}
}
return out
}
// getSidecarScope returns a SidecarScope object associated with the
// proxy. The SidecarScope object is a semi-processed view of the service
// registry, and config state associated with the sidecar crd. The scope contains
// a set of inbound and outbound listeners, services/configs per listener,
// etc. The sidecar scopes are precomputed in the initSidecarContext
// function based on the Sidecar API objects in each namespace. If there is
// no sidecar api object, a default sidecarscope is assigned to the
// namespace which enables connectivity to all services in the mesh.
//
// Callers can check if the sidecarScope is from user generated object or not
// by checking the sidecarScope.Config field, that contains the user provided config
func (ps *PushContext) getSidecarScope(proxy *Proxy, workloadLabels labels.Instance) *SidecarScope {
// TODO: logic to merge multiple sidecar resources
// Currently we assume that there will be only one sidecar config for a namespace.
sidecars, hasSidecar := ps.sidecarIndex.sidecarsByNamespace[proxy.ConfigNamespace]
switch proxy.Type {
case Router, Waypoint:
ps.sidecarIndex.derivedSidecarMutex.Lock()
defer ps.sidecarIndex.derivedSidecarMutex.Unlock()
// Gateways always use default sidecar scope.
if sc, f := ps.sidecarIndex.defaultSidecarsByNamespace[proxy.ConfigNamespace]; f {
return sc
}
// We need to compute this namespace
computed := DefaultSidecarScopeForNamespace(ps, proxy.ConfigNamespace)
ps.sidecarIndex.defaultSidecarsByNamespace[proxy.ConfigNamespace] = computed
return computed
case SidecarProxy:
if hasSidecar {
for _, wrapper := range sidecars {
if wrapper.Sidecar != nil {
sidecar := wrapper.Sidecar
// if there is no workload selector, the config applies to all workloads
// if there is a workload selector, check for matching workload labels
if sidecar.GetWorkloadSelector() != nil {
workloadSelector := labels.Instance(sidecar.GetWorkloadSelector().GetLabels())
// exclude workload selector that not match
if !workloadSelector.SubsetOf(workloadLabels) {
continue
}
}
// it is guaranteed sidecars with selectors are put in front
// and the sidecars are sorted by creation timestamp,
// return exact/wildcard matching one directly
return wrapper
}
// this happens at last, it is the default sidecar scope
return wrapper
}
}
ps.sidecarIndex.derivedSidecarMutex.Lock()
defer ps.sidecarIndex.derivedSidecarMutex.Unlock()
if ps.sidecarIndex.meshRootSidecarConfig != nil {
if sc, exists := ps.sidecarIndex.meshRootSidecarsByNamespace[proxy.ConfigNamespace]; exists {
// We have already computed the scope for this namespace, just return it.
return sc
}
// We need to compute this namespace
computed := convertToSidecarScope(ps, ps.sidecarIndex.meshRootSidecarConfig, proxy.ConfigNamespace)
ps.sidecarIndex.meshRootSidecarsByNamespace[proxy.ConfigNamespace] = computed
return computed
}
if sc, exists := ps.sidecarIndex.defaultSidecarsByNamespace[proxy.ConfigNamespace]; exists {
// We have already computed the scope for this namespace, just return it.
return sc
}
// We need to compute this namespace
computed := convertToSidecarScope(ps, ps.sidecarIndex.meshRootSidecarConfig, proxy.ConfigNamespace)
ps.sidecarIndex.defaultSidecarsByNamespace[proxy.ConfigNamespace] = computed
return computed
}
return nil
}
// destinationRule returns a destination rule for a service name in a given namespace.
func (ps *PushContext) destinationRule(proxyNameSpace string, service *Service) []*ConsolidatedDestRule {
if service == nil {
return nil
}
// If the proxy config namespace is same as the root config namespace
// look for dest rules in the service's namespace first. This hack is needed
// because sometimes, istio-system tends to become the root config namespace.
// Destination rules are defined here for global purposes. We do not want these
// catch all destination rules to be the only dest rule, when processing CDS for
// proxies like the istio-ingressgateway or istio-egressgateway.
// If there are no service specific dest rules, we will end up picking up the same
// rules anyway, later in the code
// 1. select destination rule from proxy config namespace
if proxyNameSpace != ps.Mesh.RootNamespace {
// search through the DestinationRules in proxy's namespace first
if ps.destinationRuleIndex.namespaceLocal[proxyNameSpace] != nil {
if _, drs, ok := MostSpecificHostMatch(service.Hostname,
ps.destinationRuleIndex.namespaceLocal[proxyNameSpace].specificDestRules,
ps.destinationRuleIndex.namespaceLocal[proxyNameSpace].wildcardDestRules,
); ok {
return drs
}
}
} else {
// If this is a namespace local DR in the same namespace, this must be meant for this proxy, so we do not
// need to worry about overriding other DRs with *.local type rules here. If we ignore this, then exportTo=. in
// root namespace would always be ignored
if _, drs, ok := MostSpecificHostMatch(service.Hostname,
ps.destinationRuleIndex.rootNamespaceLocal.specificDestRules,
ps.destinationRuleIndex.rootNamespaceLocal.wildcardDestRules,
); ok {
return drs
}
}
// 2. select destination rule from service namespace
svcNs := service.Attributes.Namespace
// This can happen when finding the subset labels for a proxy in root namespace.
// Because based on a pure cluster's fqdn, we do not know the service and
// construct a fake service without setting Attributes at all.
if svcNs == "" {
for _, svc := range ps.servicesExportedToNamespace(proxyNameSpace) {
if service.Hostname == svc.Hostname && svc.Attributes.Namespace != "" {
svcNs = svc.Attributes.Namespace
break
}
}
}
// 3. if no private/public rule matched in the calling proxy's namespace,
// check the target service's namespace for exported rules
if svcNs != "" {
if out := ps.getExportedDestinationRuleFromNamespace(svcNs, service.Hostname, proxyNameSpace); out != nil {
return out
}
}
// 4. if no public/private rule in calling proxy's namespace matched, and no public rule in the
// target service's namespace matched, search for any exported destination rule in the config root namespace
if out := ps.getExportedDestinationRuleFromNamespace(ps.Mesh.RootNamespace, service.Hostname, proxyNameSpace); out != nil {
return out
}
return nil
}
func (ps *PushContext) getExportedDestinationRuleFromNamespace(owningNamespace string, hostname host.Name, clientNamespace string) []*ConsolidatedDestRule {
if ps.destinationRuleIndex.exportedByNamespace[owningNamespace] != nil {
if specificHostname, drs, ok := MostSpecificHostMatch(hostname,
ps.destinationRuleIndex.exportedByNamespace[owningNamespace].specificDestRules,
ps.destinationRuleIndex.exportedByNamespace[owningNamespace].wildcardDestRules,
); ok {
// Check if the dest rule for this host is actually exported to the proxy's (client) namespace
exportToSet := ps.destinationRuleIndex.exportedByNamespace[owningNamespace].exportTo[specificHostname]
if exportToSet.IsEmpty() || exportToSet.Contains(visibility.Public) || exportToSet.Contains(visibility.Instance(clientNamespace)) {
return drs
}
}
}
return nil
}
// IsClusterLocal indicates whether the endpoints for the service should only be accessible to clients
// within the cluster.
func (ps *PushContext) IsClusterLocal(service *Service) bool {
if ps == nil || service == nil {
return false
}
return ps.clusterLocalHosts.IsClusterLocal(service.Hostname)
}
// InitContext will initialize the data structures used for code generation.
// This should be called before starting the push, from the thread creating
// the push context.
func (ps *PushContext) InitContext(env *Environment, oldPushContext *PushContext, pushReq *PushRequest) error {
// Acquire a lock to ensure we don't concurrently initialize the same PushContext.
// If this does happen, one thread will block then exit early from InitDone=true
ps.initializeMutex.Lock()
defer ps.initializeMutex.Unlock()
if ps.InitDone.Load() {
return nil
}
ps.Mesh = env.Mesh()
ps.Networks = env.MeshNetworks()
ps.LedgerVersion = env.Version()
// Must be initialized first as initServiceRegistry/VirtualServices/Destrules
// use the default export map.
ps.initDefaultExportMaps()
// create new or incremental update
if pushReq == nil || oldPushContext == nil || !oldPushContext.InitDone.Load() || len(pushReq.ConfigsUpdated) == 0 {
if err := ps.createNewContext(env); err != nil {
return err
}
} else {
if err := ps.updateContext(env, oldPushContext, pushReq); err != nil {
return err
}
}
ps.networkMgr = env.NetworkManager
ps.clusterLocalHosts = env.ClusterLocal().GetClusterLocalHosts()
ps.InitDone.Store(true)
return nil
}
func (ps *PushContext) createNewContext(env *Environment) error {
ps.initServiceRegistry(env, nil)
if err := ps.initKubernetesGateways(env); err != nil {
return err
}
ps.initVirtualServices(env)
ps.initDestinationRules(env)
ps.initAuthnPolicies(env)
ps.initAuthorizationPolicies(env)
ps.initTelemetry(env)
ps.initProxyConfigs(env)
ps.initWasmPlugins(env)
ps.initEnvoyFilters(env, nil, nil)
ps.initGateways(env)
ps.initAmbient(env)
// Must be initialized in the end
ps.initSidecarScopes(env)
return nil
}
func (ps *PushContext) updateContext(
env *Environment,
oldPushContext *PushContext,
pushReq *PushRequest,
) error {
var servicesChanged, virtualServicesChanged, destinationRulesChanged, gatewayChanged,
authnChanged, authzChanged, envoyFiltersChanged, sidecarsChanged, telemetryChanged, gatewayAPIChanged,
wasmPluginsChanged, proxyConfigsChanged bool
changedEnvoyFilters := sets.New[ConfigKey]()
for conf := range pushReq.ConfigsUpdated {
switch conf.Kind {
case kind.ServiceEntry:
servicesChanged = true
case kind.DestinationRule:
destinationRulesChanged = true
case kind.VirtualService:
virtualServicesChanged = true
case kind.Gateway:
gatewayChanged = true
case kind.Sidecar:
sidecarsChanged = true
case kind.WasmPlugin:
wasmPluginsChanged = true
case kind.EnvoyFilter:
envoyFiltersChanged = true
if features.OptimizedConfigRebuild {
changedEnvoyFilters.Insert(conf)
}
case kind.AuthorizationPolicy:
authzChanged = true
case kind.RequestAuthentication,
kind.PeerAuthentication:
authnChanged = true
case kind.HTTPRoute, kind.TCPRoute, kind.GatewayClass, kind.KubernetesGateway, kind.TLSRoute, kind.ReferenceGrant:
gatewayAPIChanged = true
// VS and GW are derived from gatewayAPI, so if it changed we need to update those as well
virtualServicesChanged = true
gatewayChanged = true
case kind.Telemetry:
telemetryChanged = true
case kind.ProxyConfig:
proxyConfigsChanged = true
}
}
if servicesChanged {
// Services have changed. initialize service registry
ps.initServiceRegistry(env, pushReq.ConfigsUpdated)
} else {
// make sure we copy over things that would be generated in initServiceRegistry
ps.ServiceIndex = oldPushContext.ServiceIndex
ps.serviceAccounts = oldPushContext.serviceAccounts
}
if servicesChanged || gatewayAPIChanged {
// Gateway status depends on services, so recompute if they change as well
if err := ps.initKubernetesGateways(env); err != nil {
return err
}
}
if virtualServicesChanged {
ps.initVirtualServices(env)
} else {
ps.virtualServiceIndex = oldPushContext.virtualServiceIndex
}
if destinationRulesChanged {
ps.initDestinationRules(env)
} else {
ps.destinationRuleIndex = oldPushContext.destinationRuleIndex
}
if authnChanged {
ps.initAuthnPolicies(env)
} else {
ps.AuthnPolicies = oldPushContext.AuthnPolicies
}
if authzChanged {
ps.initAuthorizationPolicies(env)
} else {
ps.AuthzPolicies = oldPushContext.AuthzPolicies
}
if telemetryChanged {
ps.initTelemetry(env)
} else {
ps.Telemetry = oldPushContext.Telemetry
}
if proxyConfigsChanged {
ps.initProxyConfigs(env)
} else {
ps.ProxyConfigs = oldPushContext.ProxyConfigs
}
if wasmPluginsChanged {
ps.initWasmPlugins(env)
} else {
ps.wasmPluginsByNamespace = oldPushContext.wasmPluginsByNamespace
}
if envoyFiltersChanged {
ps.initEnvoyFilters(env, changedEnvoyFilters, oldPushContext.envoyFiltersByNamespace)
} else {
ps.envoyFiltersByNamespace = oldPushContext.envoyFiltersByNamespace
}
if gatewayChanged {
ps.initGateways(env)
} else {
ps.gatewayIndex = oldPushContext.gatewayIndex
}
ps.initAmbient(env)
// Must be initialized in the end
// Sidecars need to be updated if services, virtual services, destination rules, or the sidecar configs change
if servicesChanged || virtualServicesChanged || destinationRulesChanged || sidecarsChanged {
ps.initSidecarScopes(env)
} else {
// new ADS connection may insert new entry to computedSidecarsByNamespace/gatewayDefaultSidecarsByNamespace.
oldPushContext.sidecarIndex.derivedSidecarMutex.RLock()
ps.sidecarIndex = oldPushContext.sidecarIndex
oldPushContext.sidecarIndex.derivedSidecarMutex.RUnlock()
}
return nil
}
// Caches list of services in the registry, and creates a map
// of hostname to service
func (ps *PushContext) initServiceRegistry(env *Environment, configsUpdate sets.Set[ConfigKey]) {
// Sort the services in order of creation.
allServices := SortServicesByCreationTime(env.Services())
if features.EnableExternalNameAlias {
resolveServiceAliases(allServices, configsUpdate)
}
for _, s := range allServices {
portMap := map[string]int{}
for _, port := range s.Ports {
portMap[port.Name] = port.Port
}
svcKey := s.Key()
if _, ok := ps.ServiceIndex.instancesByPort[svcKey]; !ok {
ps.ServiceIndex.instancesByPort[svcKey] = make(map[int][]*IstioEndpoint)
}
shards, ok := env.EndpointIndex.ShardsForService(string(s.Hostname), s.Attributes.Namespace)
if ok {
ps.ServiceIndex.instancesByPort[svcKey] = shards.CopyEndpoints(portMap)
}
if _, f := ps.ServiceIndex.HostnameAndNamespace[s.Hostname]; !f {
ps.ServiceIndex.HostnameAndNamespace[s.Hostname] = map[string]*Service{}
}
// In some scenarios, there may be multiple Services defined for the same hostname due to ServiceEntry allowing
// arbitrary hostnames. In these cases, we want to pick the first Service, which is the oldest. This ensures
// newly created Services cannot take ownership unexpectedly.
// However, the Service is from Kubernetes it should take precedence over ones not. This prevents someone from
// "domain squatting" on the hostname before a Kubernetes Service is created.
if existing := ps.ServiceIndex.HostnameAndNamespace[s.Hostname][s.Attributes.Namespace]; existing != nil &&
!(existing.Attributes.ServiceRegistry != provider.Kubernetes && s.Attributes.ServiceRegistry == provider.Kubernetes) {
log.Debugf("Service %s/%s from registry %s ignored by %s/%s/%s", s.Attributes.Namespace, s.Hostname, s.Attributes.ServiceRegistry,
existing.Attributes.ServiceRegistry, existing.Attributes.Namespace, existing.Hostname)
} else {
ps.ServiceIndex.HostnameAndNamespace[s.Hostname][s.Attributes.Namespace] = s
}
ns := s.Attributes.Namespace
if s.Attributes.ExportTo.IsEmpty() {
if ps.exportToDefaults.service.Contains(visibility.Private) {
ps.ServiceIndex.privateByNamespace[ns] = append(ps.ServiceIndex.privateByNamespace[ns], s)
} else if ps.exportToDefaults.service.Contains(visibility.Public) {
ps.ServiceIndex.public = append(ps.ServiceIndex.public, s)
}
} else {
// if service has exportTo *, make it public and ignore all other exportTos.
// if service does not have exportTo *, but has exportTo ~ - i.e. not visible to anyone, ignore all exportTos.
// if service has exportTo ., replace with current namespace.
if s.Attributes.ExportTo.Contains(visibility.Public) {
ps.ServiceIndex.public = append(ps.ServiceIndex.public, s)
continue
} else if s.Attributes.ExportTo.Contains(visibility.None) {
continue
}
// . or other namespaces
for exportTo := range s.Attributes.ExportTo {
if exportTo == visibility.Private || string(exportTo) == ns {
// exportTo with same namespace is effectively private
ps.ServiceIndex.privateByNamespace[ns] = append(ps.ServiceIndex.privateByNamespace[ns], s)
} else {
// exportTo is a specific target namespace
ps.ServiceIndex.exportedToNamespace[string(exportTo)] = append(ps.ServiceIndex.exportedToNamespace[string(exportTo)], s)
}
}
}
}
ps.initServiceAccounts(env, allServices)
}
// resolveServiceAliases sets the Aliases attributes on all services. The incoming Service's will just have AliasFor set,
// but in our usage we often need the opposite: for a given service, what are all the aliases?
// resolveServiceAliases walks this 'graph' of services and updates the Alias field in-place.
func resolveServiceAliases(allServices []*Service, configsUpdated sets.Set[ConfigKey]) {
// rawAlias builds a map of Service -> AliasFor. So this will be ExternalName -> Service.
// In an edge case, we can have ExternalName -> ExternalName; we resolve that below.
rawAlias := map[NamespacedHostname]host.Name{}
for _, s := range allServices {
if s.Resolution != Alias {
continue
}
nh := NamespacedHostname{
Hostname: s.Hostname,
Namespace: s.Attributes.Namespace,
}
rawAlias[nh] = host.Name(s.Attributes.K8sAttributes.ExternalName)
}
// unnamespacedRawAlias is like rawAlias but without namespaces.
// This is because an `ExternalName` isn't namespaced. If there is a conflict, the behavior is undefined.
// This is split from above as a minor optimization to right-size the map
unnamespacedRawAlias := make(map[host.Name]host.Name, len(rawAlias))
for k, v := range rawAlias {
unnamespacedRawAlias[k.Hostname] = v
}
// resolvedAliases builds a map of Alias -> Concrete, fully resolving through multiple hops.
// Ex: Alias1 -> Alias2 -> Concrete will flatten to Alias1 -> Concrete.
resolvedAliases := make(map[NamespacedHostname]host.Name, len(rawAlias))
for alias, referencedService := range rawAlias {
// referencedService may be another alias or a concrete service.
if _, f := unnamespacedRawAlias[referencedService]; !f {
// Common case: alias pointing to a concrete service
resolvedAliases[alias] = referencedService
continue
}
// Otherwise, we need to traverse the alias "graph".
// In an obscure edge case, a user could make a loop, so we will need to handle that.
seen := sets.New(alias.Hostname, referencedService)
for {
n, f := unnamespacedRawAlias[referencedService]
if !f {
// The destination we are pointing to is not an alias, so this is the terminal step
resolvedAliases[alias] = referencedService
break
}
if seen.InsertContains(n) {
// We did a loop!
// Kubernetes will make these NXDomain, so we can just treat it like it doesn't exist at all
break
}
referencedService = n
}
}
// aliasesForService builds a map of Concrete -> []Aliases
// This basically reverses our resolvedAliased map, which is Alias -> Concrete,
aliasesForService := map[host.Name][]NamespacedHostname{}
for alias, concrete := range resolvedAliases {
aliasesForService[concrete] = append(aliasesForService[concrete], alias)
// We also need to update configsUpdated, such that any "alias" updated also marks the concrete service as updated.
aliasKey := ConfigKey{
Kind: kind.ServiceEntry,
Name: alias.Hostname.String(),
Namespace: alias.Namespace,
}
// Alias. We should mark all the concrete services as updated as well.
if configsUpdated.Contains(aliasKey) {
// We only have the hostname, but we need the namespace...
for _, svc := range allServices {
if svc.Hostname == concrete {
configsUpdated.Insert(ConfigKey{
Kind: kind.ServiceEntry,
Name: concrete.String(),
Namespace: svc.Attributes.Namespace,
})
}
}
}
}
// Sort aliases so order is deterministic.
for _, v := range aliasesForService {
slices.SortFunc(v, func(a, b NamespacedHostname) int {
if r := cmp.Compare(a.Namespace, b.Namespace); r != 0 {
return r
}
return cmp.Compare(a.Hostname, b.Hostname)
})
}
// Finally, we can traverse all services and update the ones that have aliases
for i, s := range allServices {
if aliases, f := aliasesForService[s.Hostname]; f {
// This service has an alias; set it. We need to make a copy since the underlying Service is shared
s = s.DeepCopy()
s.Attributes.Aliases = aliases
allServices[i] = s
}
}
}
// SortServicesByCreationTime sorts the list of services in ascending order by their creation time (if available).
func SortServicesByCreationTime(services []*Service) []*Service {
sort.SliceStable(services, func(i, j int) bool {
// If creation time is the same, then behavior is nondeterministic. In this case, we can
// pick an arbitrary but consistent ordering based on name and namespace, which is unique.
// CreationTimestamp is stored in seconds, so this is not uncommon.
if services[i].CreationTime.Equal(services[j].CreationTime) {
in := services[i].Attributes.Name + "." + services[i].Attributes.Namespace
jn := services[j].Attributes.Name + "." + services[j].Attributes.Namespace
return in < jn
}
return services[i].CreationTime.Before(services[j].CreationTime)
})
return services
}
// Caches list of service accounts in the registry
func (ps *PushContext) initServiceAccounts(env *Environment, services []*Service) {
for _, svc := range services {
var accounts sets.String
// First get endpoint level service accounts
shard, f := env.EndpointIndex.ShardsForService(string(svc.Hostname), svc.Attributes.Namespace)
if f {
shard.RLock()
// copy here to reduce the lock time
// endpoints could update frequently, so the longer it locks, the more likely it will block other threads.
accounts = shard.ServiceAccounts.Copy()
shard.RUnlock()
}
if len(svc.ServiceAccounts) > 0 {
if accounts == nil {
accounts = sets.New(svc.ServiceAccounts...)
} else {
accounts = accounts.InsertAll(svc.ServiceAccounts...)
}
}
sa := sets.SortedList(spiffe.ExpandWithTrustDomains(accounts, ps.Mesh.TrustDomainAliases))
key := serviceAccountKey{
hostname: svc.Hostname,
namespace: svc.Attributes.Namespace,
}
ps.serviceAccounts[key] = sa
}
}
// Caches list of authentication policies
func (ps *PushContext) initAuthnPolicies(env *Environment) {
ps.AuthnPolicies = initAuthenticationPolicies(env)
}
// Caches list of virtual services
func (ps *PushContext) initVirtualServices(env *Environment) {
ps.virtualServiceIndex.exportedToNamespaceByGateway = map[types.NamespacedName][]config.Config{}
ps.virtualServiceIndex.privateByNamespaceAndGateway = map[types.NamespacedName][]config.Config{}
ps.virtualServiceIndex.publicByGateway = map[string][]config.Config{}
ps.virtualServiceIndex.referencedDestinations = map[string]sets.String{}
if features.FilterGatewayClusterConfig {
ps.virtualServiceIndex.destinationsByGateway = make(map[string]sets.String)
}
virtualServices := env.List(gvk.VirtualService, NamespaceAll)
// values returned from ConfigStore.List are immutable.
// Therefore, we make a copy
vservices := make([]config.Config, len(virtualServices))
for i := range vservices {
vservices[i] = virtualServices[i].DeepCopy()
}
totalVirtualServices.Record(float64(len(virtualServices)))
// TODO(rshriram): parse each virtual service and maintain a map of the
// virtualservice name, the list of registry hosts in the VS and non
// registry DNS names in the VS. This should cut down processing in
// the RDS code. See separateVSHostsAndServices in route/route.go
sortConfigByCreationTime(vservices)
// convert all shortnames in virtual services into FQDNs
for _, r := range vservices {
resolveVirtualServiceShortnames(r.Spec.(*networking.VirtualService), r.Meta)
}
vservices, ps.virtualServiceIndex.delegates = mergeVirtualServicesIfNeeded(vservices, ps.exportToDefaults.virtualService)
for _, virtualService := range vservices {
ns := virtualService.Namespace
rule := virtualService.Spec.(*networking.VirtualService)
gwNames := getGatewayNames(rule)
if len(rule.ExportTo) == 0 {
// No exportTo in virtualService. Use the global default
// We only honor ., *
if ps.exportToDefaults.virtualService.Contains(visibility.Private) {
// add to local namespace only
private := ps.virtualServiceIndex.privateByNamespaceAndGateway
for _, gw := range gwNames {
n := types.NamespacedName{Namespace: ns, Name: gw}
private[n] = append(private[n], virtualService)
}
} else if ps.exportToDefaults.virtualService.Contains(visibility.Public) {
for _, gw := range gwNames {
ps.virtualServiceIndex.publicByGateway[gw] = append(ps.virtualServiceIndex.publicByGateway[gw], virtualService)
}
}
} else {
exportToSet := sets.NewWithLength[visibility.Instance](len(rule.ExportTo))
for _, e := range rule.ExportTo {
exportToSet.Insert(visibility.Instance(e))
}
// if vs has exportTo ~ - i.e. not visible to anyone, ignore all exportTos
// if vs has exportTo *, make public and ignore all other exportTos
// if vs has exportTo ., replace with current namespace
if exportToSet.Contains(visibility.Public) {
for _, gw := range gwNames {
ps.virtualServiceIndex.publicByGateway[gw] = append(ps.virtualServiceIndex.publicByGateway[gw], virtualService)
}
} else if !exportToSet.Contains(visibility.None) {
// . or other namespaces
for exportTo := range exportToSet {
if exportTo == visibility.Private || string(exportTo) == ns {
// add to local namespace only
for _, gw := range gwNames {
n := types.NamespacedName{Namespace: ns, Name: gw}
ps.virtualServiceIndex.privateByNamespaceAndGateway[n] = append(ps.virtualServiceIndex.privateByNamespaceAndGateway[n], virtualService)
}
} else {
exported := ps.virtualServiceIndex.exportedToNamespaceByGateway
// add to local namespace only
for _, gw := range gwNames {
n := types.NamespacedName{Namespace: string(exportTo), Name: gw}
exported[n] = append(exported[n], virtualService)
}
}
}
}
}
if features.FilterGatewayClusterConfig {
for _, gw := range gwNames {
if gw == constants.IstioMeshGateway {
continue
}
for host := range virtualServiceDestinations(rule) {
sets.InsertOrNew(ps.virtualServiceIndex.destinationsByGateway, gw, host)
}
}
}
// For mesh virtual services, build a map of host -> referenced destinations
if features.EnableAmbientControllers && (len(rule.Gateways) == 0 || slices.Contains(rule.Gateways, constants.IstioMeshGateway)) {
for host := range virtualServiceDestinations(rule) {
for _, rhost := range rule.Hosts {
if _, f := ps.virtualServiceIndex.referencedDestinations[rhost]; !f {
ps.virtualServiceIndex.referencedDestinations[rhost] = sets.New[string]()
}
ps.virtualServiceIndex.referencedDestinations[rhost].Insert(host)
}
}
}
}
}
var meshGateways = []string{constants.IstioMeshGateway}
func getGatewayNames(vs *networking.VirtualService) []string {
if len(vs.Gateways) == 0 {
return meshGateways
}
res := make([]string, 0, len(vs.Gateways))
res = append(res, vs.Gateways...)
return res
}
func (ps *PushContext) initDefaultExportMaps() {
ps.exportToDefaults.destinationRule = sets.New[visibility.Instance]()
if ps.Mesh.DefaultDestinationRuleExportTo != nil {
for _, e := range ps.Mesh.DefaultDestinationRuleExportTo {
ps.exportToDefaults.destinationRule.Insert(visibility.Instance(e))
}
} else {
// default to *
ps.exportToDefaults.destinationRule.Insert(visibility.Public)
}
ps.exportToDefaults.service = sets.New[visibility.Instance]()
if ps.Mesh.DefaultServiceExportTo != nil {
for _, e := range ps.Mesh.DefaultServiceExportTo {
ps.exportToDefaults.service.Insert(visibility.Instance(e))
}
} else {
ps.exportToDefaults.service.Insert(visibility.Public)
}
ps.exportToDefaults.virtualService = sets.New[visibility.Instance]()
if ps.Mesh.DefaultVirtualServiceExportTo != nil {
for _, e := range ps.Mesh.DefaultVirtualServiceExportTo {
ps.exportToDefaults.virtualService.Insert(visibility.Instance(e))
}
} else {
ps.exportToDefaults.virtualService.Insert(visibility.Public)
}
}
// initSidecarScopes synthesizes Sidecar CRDs into objects called
// SidecarScope. The SidecarScope object is a semi-processed view of the
// service registry, and config state associated with the sidecar CRD. The
// scope contains a set of inbound and outbound listeners, services/configs
// per listener, etc. The sidecar scopes are precomputed based on the
// Sidecar API objects in each namespace. If there is no sidecar api object
// for a namespace, a default sidecarscope is assigned to the namespace
// which enables connectivity to all services in the mesh.
//
// When proxies connect to Pilot, we identify the sidecar scope associated
// with the proxy and derive listeners/routes/clusters based on the sidecar
// scope.
func (ps *PushContext) initSidecarScopes(env *Environment) {
rawSidecarConfigs := env.List(gvk.Sidecar, NamespaceAll)
sortConfigByCreationTime(rawSidecarConfigs)
sidecarConfigs := make([]config.Config, 0, len(rawSidecarConfigs))
for _, sidecarConfig := range rawSidecarConfigs {
sidecar := sidecarConfig.Spec.(*networking.Sidecar)
// sidecars with selector take preference
if sidecar.WorkloadSelector != nil {
sidecarConfigs = append(sidecarConfigs, sidecarConfig)
}
}
for _, sidecarConfig := range rawSidecarConfigs {
sidecar := sidecarConfig.Spec.(*networking.Sidecar)
// sidecars without selector placed behind
if sidecar.WorkloadSelector == nil {
sidecarConfigs = append(sidecarConfigs, sidecarConfig)
}
}
// Hold reference root namespace's sidecar config
// Root namespace can have only one sidecar config object
// Currently we expect that it has no workloadSelectors
var rootNSConfig *config.Config
for i, sidecarConfig := range sidecarConfigs {
if sidecarConfig.Namespace == ps.Mesh.RootNamespace &&
sidecarConfig.Spec.(*networking.Sidecar).WorkloadSelector == nil {
rootNSConfig = &sidecarConfigs[i]
break
}
}
ps.sidecarIndex.meshRootSidecarConfig = rootNSConfig
ps.sidecarIndex.sidecarsByNamespace = make(map[string][]*SidecarScope)
ps.convertSidecarScopes(sidecarConfigs)
}
func (ps *PushContext) convertSidecarScopes(sidecarConfigs []config.Config) {
if len(sidecarConfigs) == 0 {
return
}
if features.ConvertSidecarScopeConcurrency > 1 {
ps.concurrentConvertToSidecarScope(sidecarConfigs)
} else {
for _, sidecarConfig := range sidecarConfigs {
ps.sidecarIndex.sidecarsByNamespace[sidecarConfig.Namespace] = append(ps.sidecarIndex.sidecarsByNamespace[sidecarConfig.Namespace],
convertToSidecarScope(ps, &sidecarConfig, sidecarConfig.Namespace))
}
}
}
func (ps *PushContext) concurrentConvertToSidecarScope(sidecarConfigs []config.Config) {
type taskItem struct {
idx int
cfg config.Config
}
var wg sync.WaitGroup
taskItems := make(chan taskItem)
sidecarScopes := make([]*SidecarScope, len(sidecarConfigs))
for i := 0; i < features.ConvertSidecarScopeConcurrency; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
item, ok := <-taskItems
if !ok {
break
}
sc := convertToSidecarScope(ps, &item.cfg, item.cfg.Namespace)
sidecarScopes[item.idx] = sc
}
}()
}
// note: sidecarScopes order matters and needs to be kept in the same order as sidecarConfigs.
// The order indicates priority, see getSidecarScope.
for idx, cfg := range sidecarConfigs {
taskItems <- taskItem{idx: idx, cfg: cfg}
}
close(taskItems)
wg.Wait()
for _, sc := range sidecarScopes {
ps.sidecarIndex.sidecarsByNamespace[sc.Namespace] = append(ps.sidecarIndex.sidecarsByNamespace[sc.Namespace], sc)
}
}
// Split out of DestinationRule expensive conversions - once per push.
func (ps *PushContext) initDestinationRules(env *Environment) {
configs := env.List(gvk.DestinationRule, NamespaceAll)
// values returned from ConfigStore.List are immutable.
// Therefore, we make a copy
destRules := make([]config.Config, len(configs))
for i := range destRules {
destRules[i] = configs[i]
}
ps.setDestinationRules(destRules)
}
func newConsolidatedDestRules() *consolidatedDestRules {
return &consolidatedDestRules{
exportTo: map[host.Name]sets.Set[visibility.Instance]{},
specificDestRules: map[host.Name][]*ConsolidatedDestRule{},
wildcardDestRules: map[host.Name][]*ConsolidatedDestRule{},
}
}
// Testing Only. This allows tests to inject a config without having the mock.
func (ps *PushContext) SetDestinationRulesForTesting(configs []config.Config) {
ps.setDestinationRules(configs)
}
// setDestinationRules updates internal structures using a set of configs.
// Split out of DestinationRule expensive conversions, computed once per push.
// This will not work properly for Sidecars, which will precompute their
// destination rules on init.
func (ps *PushContext) setDestinationRules(configs []config.Config) {
// Sort by time first. So if two destination rule have top level traffic policies
// we take the first one.
sortConfigByCreationTime(configs)
namespaceLocalDestRules := make(map[string]*consolidatedDestRules)
exportedDestRulesByNamespace := make(map[string]*consolidatedDestRules)
rootNamespaceLocalDestRules := newConsolidatedDestRules()
for i := range configs {
rule := configs[i].Spec.(*networking.DestinationRule)
rule.Host = string(ResolveShortnameToFQDN(rule.Host, configs[i].Meta))
var exportToSet sets.Set[visibility.Instance]
// destination rules with workloadSelector should not be exported to other namespaces
if rule.GetWorkloadSelector() == nil {
exportToSet = sets.NewWithLength[visibility.Instance](len(rule.ExportTo))
for _, e := range rule.ExportTo {
exportToSet.Insert(visibility.Instance(e))
}
} else {
exportToSet = sets.New[visibility.Instance](visibility.Private)
}
// add only if the dest rule is exported with . or * or explicit exportTo containing this namespace
// The global exportTo doesn't matter here (its either . or * - both of which are applicable here)
if exportToSet.IsEmpty() || exportToSet.Contains(visibility.Public) || exportToSet.Contains(visibility.Private) ||
exportToSet.Contains(visibility.Instance(configs[i].Namespace)) {
// Store in an index for the config's namespace
// a proxy from this namespace will first look here for the destination rule for a given service
// This pool consists of both public/private destination rules.
if _, exist := namespaceLocalDestRules[configs[i].Namespace]; !exist {
namespaceLocalDestRules[configs[i].Namespace] = newConsolidatedDestRules()
}
// Merge this destination rule with any public/private dest rules for same host in the same namespace
// If there are no duplicates, the dest rule will be added to the list
ps.mergeDestinationRule(namespaceLocalDestRules[configs[i].Namespace], configs[i], exportToSet)
}
isPrivateOnly := false
// No exportTo in destinationRule. Use the global default
// We only honor . and *
if exportToSet.IsEmpty() && ps.exportToDefaults.destinationRule.Contains(visibility.Private) {
isPrivateOnly = true
} else if exportToSet.Len() == 1 && (exportToSet.Contains(visibility.Private) || exportToSet.Contains(visibility.Instance(configs[i].Namespace))) {
isPrivateOnly = true
}
if !isPrivateOnly {
if _, exist := exportedDestRulesByNamespace[configs[i].Namespace]; !exist {
exportedDestRulesByNamespace[configs[i].Namespace] = newConsolidatedDestRules()
}
// Merge this destination rule with any other exported dest rule for the same host in the same namespace
// If there are no duplicates, the dest rule will be added to the list
ps.mergeDestinationRule(exportedDestRulesByNamespace[configs[i].Namespace], configs[i], exportToSet)
} else if configs[i].Namespace == ps.Mesh.RootNamespace {
// Keep track of private root namespace destination rules
ps.mergeDestinationRule(rootNamespaceLocalDestRules, configs[i], exportToSet)
}
}
ps.destinationRuleIndex.namespaceLocal = namespaceLocalDestRules
ps.destinationRuleIndex.exportedByNamespace = exportedDestRulesByNamespace
ps.destinationRuleIndex.rootNamespaceLocal = rootNamespaceLocalDestRules
}
// pre computes all AuthorizationPolicies per namespace
func (ps *PushContext) initAuthorizationPolicies(env *Environment) {
ps.AuthzPolicies = GetAuthorizationPolicies(env)
}
func (ps *PushContext) initTelemetry(env *Environment) {
ps.Telemetry = getTelemetries(env)
}
func (ps *PushContext) initProxyConfigs(env *Environment) {
ps.ProxyConfigs = GetProxyConfigs(env.ConfigStore, env.Mesh())
}
// pre computes WasmPlugins per namespace
func (ps *PushContext) initWasmPlugins(env *Environment) {
wasmplugins := env.List(gvk.WasmPlugin, NamespaceAll)
sortConfigByCreationTime(wasmplugins)
ps.wasmPluginsByNamespace = map[string][]*WasmPluginWrapper{}
for _, plugin := range wasmplugins {
if pluginWrapper := convertToWasmPluginWrapper(plugin); pluginWrapper != nil {
ps.wasmPluginsByNamespace[plugin.Namespace] = append(ps.wasmPluginsByNamespace[plugin.Namespace], pluginWrapper)
}
}
}
// WasmPlugins return the WasmPluginWrappers of a proxy.
func (ps *PushContext) WasmPlugins(proxy *Proxy) map[extensions.PluginPhase][]*WasmPluginWrapper {
return ps.WasmPluginsByListenerInfo(proxy, anyListener, WasmPluginTypeAny)
}
// WasmPluginsByListenerInfo return the WasmPluginWrappers which are matched with TrafficSelector in the given proxy.
func (ps *PushContext) WasmPluginsByListenerInfo(proxy *Proxy, info WasmPluginListenerInfo,
pluginType WasmPluginType,
) map[extensions.PluginPhase][]*WasmPluginWrapper {
if proxy == nil {
return nil
}
var lookupInNamespaces []string
matchedPlugins := make(map[extensions.PluginPhase][]*WasmPluginWrapper)
if proxy.ConfigNamespace != ps.Mesh.RootNamespace {
// Only check the root namespace if the (workload) namespace is not already the root namespace
// to avoid double inclusion.
lookupInNamespaces = []string{proxy.ConfigNamespace, ps.Mesh.RootNamespace}
} else {
lookupInNamespaces = []string{proxy.ConfigNamespace}
}
for _, ns := range lookupInNamespaces {
if wasmPlugins, ok := ps.wasmPluginsByNamespace[ns]; ok {
for _, plugin := range wasmPlugins {
opts := WorkloadSelectionOpts{
RootNamespace: ps.Mesh.RootNamespace,
Namespace: proxy.ConfigNamespace,
WorkloadLabels: proxy.Labels,
IsWaypoint: proxy.IsWaypointProxy(),
}
if plugin.MatchListener(opts, info) && plugin.MatchType(pluginType) {
matchedPlugins[plugin.Phase] = append(matchedPlugins[plugin.Phase], plugin)
}
}
}
}
// sort slices by priority
for i, slice := range matchedPlugins {
sort.SliceStable(slice, func(i, j int) bool {
iPriority := int32(math.MinInt32)
if prio := slice[i].Priority; prio != nil {
iPriority = prio.Value
}
jPriority := int32(math.MinInt32)
if prio := slice[j].Priority; prio != nil {
jPriority = prio.Value
}
return iPriority > jPriority
})
matchedPlugins[i] = slice
}
return matchedPlugins
}
// pre computes envoy filters per namespace
func (ps *PushContext) initEnvoyFilters(env *Environment, changed sets.Set[ConfigKey], previousIndex map[string][]*EnvoyFilterWrapper) {
envoyFilterConfigs := env.List(gvk.EnvoyFilter, NamespaceAll)
var previous map[ConfigKey]*EnvoyFilterWrapper
if features.OptimizedConfigRebuild {
previous = make(map[ConfigKey]*EnvoyFilterWrapper)
for namespace, nsEnvoyFilters := range previousIndex {
for _, envoyFilter := range nsEnvoyFilters {
previous[ConfigKey{Kind: kind.EnvoyFilter, Namespace: namespace, Name: envoyFilter.Name}] = envoyFilter
}
}
}
sort.Slice(envoyFilterConfigs, func(i, j int) bool {
ifilter := envoyFilterConfigs[i].Spec.(*networking.EnvoyFilter)
jfilter := envoyFilterConfigs[j].Spec.(*networking.EnvoyFilter)
if ifilter.Priority != jfilter.Priority {
return ifilter.Priority < jfilter.Priority
}
// If priority is same fallback to name and creation timestamp, else use priority.
// If creation time is the same, then behavior is nondeterministic. In this case, we can
// pick an arbitrary but consistent ordering based on name and namespace, which is unique.
// CreationTimestamp is stored in seconds, so this is not uncommon.
if envoyFilterConfigs[i].CreationTimestamp != envoyFilterConfigs[j].CreationTimestamp {
return envoyFilterConfigs[i].CreationTimestamp.Before(envoyFilterConfigs[j].CreationTimestamp)
}
in := envoyFilterConfigs[i].Name + "." + envoyFilterConfigs[i].Namespace
jn := envoyFilterConfigs[j].Name + "." + envoyFilterConfigs[j].Namespace
return in < jn
})
for _, envoyFilterConfig := range envoyFilterConfigs {
var efw *EnvoyFilterWrapper
if features.OptimizedConfigRebuild {
key := ConfigKey{Kind: kind.EnvoyFilter, Namespace: envoyFilterConfig.Namespace, Name: envoyFilterConfig.Name}
if prev, ok := previous[key]; ok && !changed.Contains(key) {
// Reuse the previous EnvoyFilterWrapper if it exists and hasn't changed when optimized config rebuild is enabled
efw = prev
}
}
// Rebuild the envoy filter in all other cases.
if efw == nil {
efw = convertToEnvoyFilterWrapper(&envoyFilterConfig)
}
ps.envoyFiltersByNamespace[envoyFilterConfig.Namespace] = append(ps.envoyFiltersByNamespace[envoyFilterConfig.Namespace], efw)
}
}
// EnvoyFilters return the merged EnvoyFilterWrapper of a proxy
func (ps *PushContext) EnvoyFilters(proxy *Proxy) *EnvoyFilterWrapper {
// this should never happen
if proxy == nil {
return nil
}
var matchedEnvoyFilters []*EnvoyFilterWrapper
// EnvoyFilters supports inheritance (global ones plus namespace local ones).
// First get all the filter configs from the config root namespace
// and then add the ones from proxy's own namespace
if ps.Mesh.RootNamespace != "" {
matchedEnvoyFilters = ps.getMatchedEnvoyFilters(proxy, ps.Mesh.RootNamespace)
}
// To prevent duplicate envoyfilters in case root namespace equals proxy's namespace
if proxy.ConfigNamespace != ps.Mesh.RootNamespace {
matched := ps.getMatchedEnvoyFilters(proxy, proxy.ConfigNamespace)
matchedEnvoyFilters = append(matchedEnvoyFilters, matched...)
}
var out *EnvoyFilterWrapper
if len(matchedEnvoyFilters) > 0 {
out = &EnvoyFilterWrapper{
// no need populate workloadSelector, as it is not used later.
Patches: make(map[networking.EnvoyFilter_ApplyTo][]*EnvoyFilterConfigPatchWrapper),
}
// merge EnvoyFilterWrapper
for _, efw := range matchedEnvoyFilters {
for applyTo, cps := range efw.Patches {
for _, cp := range cps {
if proxyMatch(proxy, cp) {
out.Patches[applyTo] = append(out.Patches[applyTo], cp)
}
}
}
}
}
return out
}
// if there is no workload selector, the config applies to all workloads
// if there is a workload selector, check for matching workload labels
func (ps *PushContext) getMatchedEnvoyFilters(proxy *Proxy, namespaces string) []*EnvoyFilterWrapper {
matchedEnvoyFilters := make([]*EnvoyFilterWrapper, 0)
for _, efw := range ps.envoyFiltersByNamespace[namespaces] {
if efw.workloadSelector == nil || efw.workloadSelector.SubsetOf(proxy.Labels) {
matchedEnvoyFilters = append(matchedEnvoyFilters, efw)
}
}
return matchedEnvoyFilters
}
// HasEnvoyFilters checks if an EnvoyFilter exists with the given name at the given namespace.
func (ps *PushContext) HasEnvoyFilters(name, namespace string) bool {
for _, efw := range ps.envoyFiltersByNamespace[namespace] {
if efw.Name == name {
return true
}
}
return false
}
// pre computes gateways per namespace
func (ps *PushContext) initGateways(env *Environment) {
gatewayConfigs := env.List(gvk.Gateway, NamespaceAll)
sortConfigByCreationTime(gatewayConfigs)
if features.ScopeGatewayToNamespace {
ps.gatewayIndex.namespace = make(map[string][]config.Config)
for _, gatewayConfig := range gatewayConfigs {
if _, exists := ps.gatewayIndex.namespace[gatewayConfig.Namespace]; !exists {
ps.gatewayIndex.namespace[gatewayConfig.Namespace] = make([]config.Config, 0)
}
ps.gatewayIndex.namespace[gatewayConfig.Namespace] = append(ps.gatewayIndex.namespace[gatewayConfig.Namespace], gatewayConfig)
}
} else {
ps.gatewayIndex.all = gatewayConfigs
}
}
func (ps *PushContext) initAmbient(env *Environment) {
ps.ambientIndex = env
}
// InternalGatewayServiceAnnotation represents the hostname of the service a gateway will use. This is
// only used internally to transfer information from the Kubernetes Gateway API to the Istio Gateway API
// which does not have a field to represent this.
// The format is a comma separated list of hostnames. For example, "ingress.istio-system.svc.cluster.local,ingress.example.com"
// The Gateway will apply to all ServiceInstances of these services, *in the same namespace as the Gateway*.
const InternalGatewayServiceAnnotation = "internal.istio.io/gateway-service"
type gatewayWithInstances struct {
gateway config.Config
// If true, ports that are not present in any instance will be used directly (without targetPort translation)
// This supports the legacy behavior of selecting gateways by pod label selector
legacyGatewaySelector bool
instances []ServiceTarget
}
func (ps *PushContext) mergeGateways(proxy *Proxy) *MergedGateway {
// this should never happen
if proxy == nil {
return nil
}
gatewayInstances := make([]gatewayWithInstances, 0)
var configs []config.Config
if features.ScopeGatewayToNamespace {
configs = ps.gatewayIndex.namespace[proxy.ConfigNamespace]
} else {
configs = ps.gatewayIndex.all
}
for _, cfg := range configs {
gw := cfg.Spec.(*networking.Gateway)
if gwsvcstr, f := cfg.Annotations[InternalGatewayServiceAnnotation]; f {
gwsvcs := strings.Split(gwsvcstr, ",")
known := sets.New[string](gwsvcs...)
matchingInstances := make([]ServiceTarget, 0, len(proxy.ServiceTargets))
for _, si := range proxy.ServiceTargets {
if _, f := known[string(si.Service.Hostname)]; f && si.Service.Attributes.Namespace == cfg.Namespace {
matchingInstances = append(matchingInstances, si)
}
}
// Only if we have a matching instance should we apply the configuration
if len(matchingInstances) > 0 {
gatewayInstances = append(gatewayInstances, gatewayWithInstances{cfg, false, matchingInstances})
}
} else if gw.GetSelector() == nil {
// no selector. Applies to all workloads asking for the gateway
gatewayInstances = append(gatewayInstances, gatewayWithInstances{cfg, true, proxy.ServiceTargets})
} else {
gatewaySelector := labels.Instance(gw.GetSelector())
if gatewaySelector.SubsetOf(proxy.Labels) {
gatewayInstances = append(gatewayInstances, gatewayWithInstances{cfg, true, proxy.ServiceTargets})
}
}
}
if len(gatewayInstances) == 0 {
return nil
}
return MergeGateways(gatewayInstances, proxy, ps)
}
func (ps *PushContext) NetworkManager() *NetworkManager {
return ps.networkMgr
}
// BestEffortInferServiceMTLSMode infers the mTLS mode for the service + port from all authentication
// policies (both alpha and beta) in the system. The function always returns MTLSUnknown for external service.
// The result is a best effort. It is because the PeerAuthentication is workload-based, this function is unable
// to compute the correct service mTLS mode without knowing service to workload binding. For now, this
// function uses only mesh and namespace level PeerAuthentication and ignore workload & port level policies.
// This function is used to give a hint for auto-mTLS configuration on client side.
func (ps *PushContext) BestEffortInferServiceMTLSMode(tp *networking.TrafficPolicy, service *Service, port *Port) MutualTLSMode {
if service.MeshExternal {
// Only need the authentication mTLS mode when service is not external.
return MTLSUnknown
}
// For passthrough traffic (headless service or explicitly defined in DestinationRule), we look at the instances
// If ALL instances have a sidecar, we enable TLS, otherwise we disable
// TODO(https://github.com/istio/istio/issues/27376) enable mixed deployments
// A service with passthrough resolution is always passthrough, regardless of the TrafficPolicy.
if service.Resolution == Passthrough || tp.GetLoadBalancer().GetSimple() == networking.LoadBalancerSettings_PASSTHROUGH {
instances := ps.ServiceEndpointsByPort(service, port.Port, nil)
if len(instances) == 0 {
return MTLSDisable
}
for _, i := range instances {
// Infer mTls disabled if any of the endpoint is with tls disabled
if i.TLSMode == DisabledTLSModeLabel {
return MTLSDisable
}
}
}
// 2. check mTLS settings from beta policy (i.e PeerAuthentication) at namespace / mesh level.
// If the mode is not unknown, use it.
if serviceMTLSMode := ps.AuthnPolicies.GetNamespaceMutualTLSMode(service.Attributes.Namespace); serviceMTLSMode != MTLSUnknown {
return serviceMTLSMode
}
// Fallback to permissive.
return MTLSPermissive
}
// ServiceEndpointsByPort returns the cached instances by port if it exists.
func (ps *PushContext) ServiceEndpointsByPort(svc *Service, port int, labels labels.Instance) []*IstioEndpoint {
var out []*IstioEndpoint
if instances, exists := ps.ServiceIndex.instancesByPort[svc.Key()][port]; exists {
// Use cached version of instances by port when labels are empty.
if len(labels) == 0 {
return instances
}
// If there are labels, we will filter instances by pod labels.
for _, instance := range instances {
// check that one of the input labels is a subset of the labels
if labels.SubsetOf(instance.Labels) {
out = append(out, instance)
}
}
}
return out
}
// ServiceEndpoints returns the cached instances by svc if exists.
func (ps *PushContext) ServiceEndpoints(svcKey string) map[int][]*IstioEndpoint {
if instances, exists := ps.ServiceIndex.instancesByPort[svcKey]; exists {
return instances
}
return nil
}
// initKubernetesGateways initializes Kubernetes gateway-api objects
func (ps *PushContext) initKubernetesGateways(env *Environment) error {
if env.GatewayAPIController != nil {
ps.GatewayAPIController = env.GatewayAPIController
return env.GatewayAPIController.Reconcile(ps)
}
return nil
}
// ReferenceAllowed determines if a given resource (of type `kind` and name `resourceName`) can be
// accessed by `namespace`, based of specific reference policies.
// Note: this function only determines if a reference is *explicitly* allowed; the reference may not require
// explicit authorization to be made at all in most cases. Today, this only is for allowing cross-namespace
// secret access.
func (ps *PushContext) ReferenceAllowed(kind config.GroupVersionKind, resourceName string, namespace string) bool {
// Currently, only Secret has reference policy, and only implemented by Gateway API controller.
switch kind {
case gvk.Secret:
if ps.GatewayAPIController != nil {
return ps.GatewayAPIController.SecretAllowed(resourceName, namespace)
}
default:
}
return false
}
func (ps *PushContext) ServiceAccounts(hostname host.Name, namespace string) []string {
return ps.serviceAccounts[serviceAccountKey{
hostname: hostname,
namespace: namespace,
}]
}
// SupportsTunnel checks if a given IP address supports tunneling.
// This currently only accepts workload IPs as arguments; services will always return "false".
func (ps *PushContext) SupportsTunnel(n network.ID, ip string) bool {
// There should be a 1:1 relationship between IP and Workload but the interface doesn't allow this lookup.
// We should get 0 or 1 workloads, so just return the first.
infos, _ := ps.ambientIndex.AddressInformation(sets.New(n.String() + "/" + ip))
for _, wl := range ExtractWorkloadsFromAddresses(infos) {
if wl.TunnelProtocol == workloadapi.TunnelProtocol_HBONE {
return true
}
}
return false
}
func (ps *PushContext) WaypointsFor(scope WaypointScope) []netip.Addr {
return ps.ambientIndex.Waypoint(scope)
}
// WorkloadsForWaypoint returns all workloads associated with a given WaypointScope
func (ps *PushContext) WorkloadsForWaypoint(scope WaypointScope) []*WorkloadInfo {
return ps.ambientIndex.WorkloadsForWaypoint(scope)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file describes the abstract model of services (and their instances) as
// represented in Istio. This model is independent of the underlying platform
// (Kubernetes, Mesos, etc.). Platform specific adapters found populate the
// model object with various fields, from the metadata found in the platform.
// The platform independent proxy code uses the representation in the model to
// generate the configuration files for the Layer 7 proxy sidecar. The proxy
// code is specific to individual proxy implementations
package model
import (
"fmt"
"net/netip"
"sort"
"strconv"
"strings"
"sync/atomic"
"time"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/mitchellh/copystructure"
"google.golang.org/protobuf/proto"
"istio.io/api/label"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/visibility"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/workloadapi"
"istio.io/istio/pkg/workloadapi/security"
)
// Service describes an Istio service (e.g., catalog.mystore.com:8080)
// Each service has a fully qualified domain name (FQDN) and one or more
// ports where the service is listening for connections. *Optionally*, a
// service can have a single load balancer/virtual IP address associated
// with it, such that the DNS queries for the FQDN resolves to the virtual
// IP address (a load balancer IP).
//
// E.g., in kubernetes, a service foo is associated with
// foo.default.svc.cluster.local hostname, has a virtual IP of 10.0.1.1 and
// listens on ports 80, 8080
type Service struct {
// Attributes contains additional attributes associated with the service
// used mostly by RBAC for policy enforcement purposes.
Attributes ServiceAttributes
// Ports is the set of network ports where the service is listening for
// connections
Ports PortList `json:"ports,omitempty"`
// ServiceAccounts specifies the service accounts that run the service.
ServiceAccounts []string `json:"serviceAccounts,omitempty"`
// CreationTime records the time this service was created, if available.
CreationTime time.Time `json:"creationTime,omitempty"`
// Name of the service, e.g. "catalog.mystore.com"
Hostname host.Name `json:"hostname"`
// ClusterVIPs specifies the service address of the load balancer
// in each of the clusters where the service resides
ClusterVIPs AddressMap `json:"clusterVIPs,omitempty"`
// DefaultAddress specifies the default service IP of the load balancer.
// Do not access directly. Use GetAddressForProxy
DefaultAddress string `json:"defaultAddress,omitempty"`
// AutoAllocatedIPv4Address and AutoAllocatedIPv6Address specifies
// the automatically allocated IPv4/IPv6 address out of the reserved
// Class E subnet (240.240.0.0/16) or reserved Benchmarking IP range
// (2001:2::/48) in RFC5180.for service entries with non-wildcard
// hostnames. The IPs assigned to services are not
// synchronized across istiod replicas as the DNS resolution
// for these service entries happens completely inside a pod
// whose proxy is managed by one istiod. That said, the algorithm
// to allocate IPs is pretty deterministic that at stable state, two
// istiods will allocate the exact same set of IPs for a given set of
// service entries.
AutoAllocatedIPv4Address string `json:"autoAllocatedIPv4Address,omitempty"`
AutoAllocatedIPv6Address string `json:"autoAllocatedIPv6Address,omitempty"`
// Resolution indicates how the service instances need to be resolved before routing
// traffic. Most services in the service registry will use static load balancing wherein
// the proxy will decide the service instance that will receive the traffic. Service entries
// could either use DNS load balancing (i.e. proxy will query DNS server for the IP of the service)
// or use the passthrough model (i.e. proxy will forward the traffic to the network endpoint requested
// by the caller)
Resolution Resolution
// MeshExternal (if true) indicates that the service is external to the mesh.
// These services are defined using Istio's ServiceEntry spec.
MeshExternal bool
// ResourceVersion represents the internal version of this object.
ResourceVersion string
}
func (s *Service) Key() string {
if s == nil {
return ""
}
return s.Attributes.Namespace + "/" + string(s.Hostname)
}
var serviceCmpOpts = []cmp.Option{cmpopts.IgnoreFields(AddressMap{}, "mutex")}
func (s *Service) CmpOpts() []cmp.Option {
return serviceCmpOpts
}
// Resolution indicates how the service instances need to be resolved before routing traffic.
type Resolution int
const (
// ClientSideLB implies that the proxy will decide the endpoint from its local lb pool
ClientSideLB Resolution = iota
// DNSLB implies that the proxy will resolve a DNS address and forward to the resolved address
DNSLB
// Passthrough implies that the proxy should forward traffic to the destination IP requested by the caller
Passthrough
// DNSRoundRobinLB implies that the proxy will resolve a DNS address and forward to the resolved address
DNSRoundRobinLB
// Alias defines a Service that is an alias for another.
Alias
)
// String converts Resolution in to String.
func (resolution Resolution) String() string {
switch resolution {
case ClientSideLB:
return "ClientSide"
case DNSLB:
return "DNS"
case DNSRoundRobinLB:
return "DNSRoundRobin"
case Passthrough:
return "Passthrough"
default:
return fmt.Sprintf("%d", int(resolution))
}
}
const (
// LocalityLabel indicates the region/zone/subzone of an instance. It is used to override the native
// registry's value.
//
// Note: because k8s labels does not support `/`, so we use `.` instead in k8s.
LocalityLabel = "istio-locality"
// k8s istio-locality label separator
k8sSeparator = "."
)
const (
// TunnelLabel defines the label workloads describe to indicate that they support tunneling.
// Values are expected to be a CSV list, sorted by preference, of protocols supported.
// Currently supported values:
// * "http": indicates tunneling over HTTP over TCP. HTTP/2 vs HTTP/1.1 may be supported by ALPN negotiation.
// Planned future values:
// * "http3": indicates tunneling over HTTP over QUIC. This is distinct from "http", since we cannot do ALPN
// negotiation for QUIC vs TCP.
// Users should appropriately parse the full list rather than doing a string literal check to
// ensure future-proofing against new protocols being added.
TunnelLabel = "networking.istio.io/tunnel"
// TunnelLabelShortName is a short name for TunnelLabel to be used in optimized scenarios.
TunnelLabelShortName = "tunnel"
// TunnelHTTP indicates tunneling over HTTP over TCP. HTTP/2 vs HTTP/1.1 may be supported by ALPN
// negotiation. Note: ALPN negotiation is not currently implemented; HTTP/2 will always be used.
// This is future-proofed, however, because only the `h2` ALPN is exposed.
TunnelHTTP = "http"
)
const (
// TLSModeLabelShortname name used for determining endpoint level tls transport socket configuration
TLSModeLabelShortname = "tlsMode"
// DisabledTLSModeLabel implies that this endpoint should receive traffic as is (mostly plaintext)
DisabledTLSModeLabel = "disabled"
// IstioMutualTLSModeLabel implies that the endpoint is ready to receive Istio mTLS connections.
IstioMutualTLSModeLabel = "istio"
// IstioCanonicalServiceLabelName is the name of label for the Istio Canonical Service for a workload instance.
IstioCanonicalServiceLabelName = "service.istio.io/canonical-name"
// IstioCanonicalServiceRevisionLabelName is the name of label for the Istio Canonical Service revision for a workload instance.
IstioCanonicalServiceRevisionLabelName = "service.istio.io/canonical-revision"
)
func SupportsTunnel(labels map[string]string, tunnelType string) bool {
return sets.New(strings.Split(labels[TunnelLabel], ",")...).Contains(tunnelType)
}
// Port represents a network port where a service is listening for
// connections. The port should be annotated with the type of protocol
// used by the port.
type Port struct {
// Name ascribes a human readable name for the port object. When a
// service has multiple ports, the name field is mandatory
Name string `json:"name,omitempty"`
// Port number where the service can be reached. Does not necessarily
// map to the corresponding port numbers for the instances behind the
// service.
Port int `json:"port"`
// Protocol to be used for the port.
Protocol protocol.Instance `json:"protocol,omitempty"`
}
func (p Port) String() string {
return fmt.Sprintf("Name:%s Port:%d Protocol:%v", p.Name, p.Port, p.Protocol)
}
// PortList is a set of ports
type PortList []*Port
// TrafficDirection defines whether traffic exists a service instance or enters a service instance
type TrafficDirection string
const (
// TrafficDirectionInbound indicates inbound traffic
TrafficDirectionInbound TrafficDirection = "inbound"
// TrafficDirectionInboundVIP indicates inbound traffic for vip
TrafficDirectionInboundVIP TrafficDirection = "inbound-vip"
// TrafficDirectionOutbound indicates outbound traffic
TrafficDirectionOutbound TrafficDirection = "outbound"
// trafficDirectionOutboundSrvPrefix the prefix for a DNS SRV type subset key
trafficDirectionOutboundSrvPrefix = string(TrafficDirectionOutbound) + "_"
// trafficDirectionInboundSrvPrefix the prefix for a DNS SRV type subset key
trafficDirectionInboundSrvPrefix = string(TrafficDirectionInbound) + "_"
)
// ServiceInstance represents an individual instance of a specific version
// of a service. It binds a network endpoint (ip:port), the service
// description (which is oblivious to various versions) and a set of labels
// that describe the service version associated with this instance.
//
// Since a ServiceInstance has a single IstioEndpoint, which has a single port,
// multiple ServiceInstances are required to represent a workload that listens
// on multiple ports.
//
// The labels associated with a service instance are unique per a network endpoint.
// There is one well defined set of labels for each service instance network endpoint.
//
// For example, the set of service instances associated with catalog.mystore.com
// are modeled like this
//
// --> IstioEndpoint(172.16.0.1:8888), Service(catalog.myservice.com), Labels(foo=bar)
// --> IstioEndpoint(172.16.0.2:8888), Service(catalog.myservice.com), Labels(foo=bar)
// --> IstioEndpoint(172.16.0.3:8888), Service(catalog.myservice.com), Labels(kitty=cat)
// --> IstioEndpoint(172.16.0.4:8888), Service(catalog.myservice.com), Labels(kitty=cat)
type ServiceInstance struct {
Service *Service `json:"service,omitempty"`
ServicePort *Port `json:"servicePort,omitempty"`
Endpoint *IstioEndpoint `json:"endpoint,omitempty"`
}
func (instance *ServiceInstance) CmpOpts() []cmp.Option {
res := []cmp.Option{}
res = append(res, istioEndpointCmpOpts...)
res = append(res, serviceCmpOpts...)
return res
}
// ServiceTarget includes a Service object, along with a specific service port
// and target port. This is basically a smaller version of ServiceInstance,
// intended to avoid the need to have the full object when only port information
// is needed.
type ServiceTarget struct {
Service *Service
Port ServiceInstancePort
}
type (
ServicePort = *Port
// ServiceInstancePort defines a port that has both a port and targetPort (which distinguishes it from model.Port)
// Note: ServiceInstancePort only makes sense in the context of a specific ServiceInstance, because TargetPort depends on a specific instance.
ServiceInstancePort struct {
ServicePort
TargetPort uint32
}
)
func ServiceInstanceToTarget(e *ServiceInstance) ServiceTarget {
return ServiceTarget{
Service: e.Service,
Port: ServiceInstancePort{
ServicePort: e.ServicePort,
TargetPort: e.Endpoint.EndpointPort,
},
}
}
// DeepCopy creates a copy of ServiceInstance.
func (instance *ServiceInstance) DeepCopy() *ServiceInstance {
return &ServiceInstance{
Service: instance.Service.DeepCopy(),
Endpoint: instance.Endpoint.DeepCopy(),
ServicePort: &Port{
Name: instance.ServicePort.Name,
Port: instance.ServicePort.Port,
Protocol: instance.ServicePort.Protocol,
},
}
}
type workloadKind int
const (
// PodKind indicates the workload is from pod
PodKind workloadKind = iota
// WorkloadEntryKind indicates the workload is from workloadentry
WorkloadEntryKind
)
func (k workloadKind) String() string {
if k == PodKind {
return "Pod"
}
if k == WorkloadEntryKind {
return "WorkloadEntry"
}
return ""
}
type WorkloadInstance struct {
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
// Where the workloadInstance come from, valid values are`Pod` or `WorkloadEntry`
Kind workloadKind `json:"kind"`
Endpoint *IstioEndpoint `json:"endpoint,omitempty"`
PortMap map[string]uint32 `json:"portMap,omitempty"`
// Can only be selected by service entry of DNS type.
DNSServiceEntryOnly bool `json:"dnsServiceEntryOnly,omitempty"`
}
func (instance *WorkloadInstance) CmpOpts() []cmp.Option {
return istioEndpointCmpOpts
}
// DeepCopy creates a copy of WorkloadInstance.
func (instance *WorkloadInstance) DeepCopy() *WorkloadInstance {
pmap := map[string]uint32{}
for k, v := range instance.PortMap {
pmap[k] = v
}
return &WorkloadInstance{
Name: instance.Name,
Namespace: instance.Namespace,
Kind: instance.Kind,
PortMap: pmap,
Endpoint: instance.Endpoint.DeepCopy(),
}
}
// WorkloadInstancesEqual is a custom comparison of workload instances based on the fields that we need.
// Returns true if equal, false otherwise.
func WorkloadInstancesEqual(first, second *WorkloadInstance) bool {
if first.Endpoint == nil || second.Endpoint == nil {
return first.Endpoint == second.Endpoint
}
if first.Endpoint.Address != second.Endpoint.Address {
return false
}
if first.Endpoint.Network != second.Endpoint.Network {
return false
}
if first.Endpoint.TLSMode != second.Endpoint.TLSMode {
return false
}
if !first.Endpoint.Labels.Equals(second.Endpoint.Labels) {
return false
}
if first.Endpoint.ServiceAccount != second.Endpoint.ServiceAccount {
return false
}
if first.Endpoint.Locality != second.Endpoint.Locality {
return false
}
if first.Endpoint.GetLoadBalancingWeight() != second.Endpoint.GetLoadBalancingWeight() {
return false
}
if first.Namespace != second.Namespace {
return false
}
if first.Name != second.Name {
return false
}
if first.Kind != second.Kind {
return false
}
if !maps.Equal(first.PortMap, second.PortMap) {
return false
}
return true
}
// GetLocalityLabel returns the locality from the supplied label. Because Kubernetes
// labels don't support `/`, we replace "." with "/" in the supplied label as a workaround.
func GetLocalityLabel(label string) string {
if len(label) > 0 {
// if there are /'s present we don't need to replace
if strings.Contains(label, "/") {
return label
}
// replace "." with "/"
return strings.Replace(label, k8sSeparator, "/", -1)
}
return ""
}
// Locality information for an IstioEndpoint
type Locality struct {
// Label for locality on the endpoint. This is a "/" separated string.
Label string
// ClusterID where the endpoint is located
ClusterID cluster.ID
}
// Endpoint health status.
type HealthStatus int32
const (
// Healthy.
Healthy HealthStatus = 1
// Unhealthy.
UnHealthy HealthStatus = 2
// Draining - the constant matches envoy
Draining HealthStatus = 3
)
// IstioEndpoint defines a network address (IP:port) associated with an instance of the
// service. A service has one or more instances each running in a
// container/VM/pod. If a service has multiple ports, then the same
// instance IP is expected to be listening on multiple ports (one per each
// service port). Note that the port associated with an instance does not
// have to be the same as the port associated with the service. Depending
// on the network setup (NAT, overlays), this could vary.
//
// For e.g., if catalog.mystore.com is accessible through port 80 and 8080,
// and it maps to an instance with IP 172.16.0.1, such that connections to
// port 80 are forwarded to port 55446, and connections to port 8080 are
// forwarded to port 33333,
//
// then internally, we have two endpoint structs for the
// service catalog.mystore.com
//
// --> 172.16.0.1:55446 (with ServicePort pointing to 80) and
// --> 172.16.0.1:33333 (with ServicePort pointing to 8080)
//
// TODO: Investigate removing ServiceInstance entirely.
type IstioEndpoint struct {
// Labels points to the workload or deployment labels.
Labels labels.Instance
// Address is the address of the endpoint, using envoy proto.
Address string
// ServicePortName tracks the name of the port, this is used to select the IstioEndpoint by service port.
ServicePortName string
// ServiceAccount holds the associated service account.
ServiceAccount string
// Network holds the network where this endpoint is present
Network network.ID
// The locality where the endpoint is present.
Locality Locality
// EndpointPort is the port where the workload is listening, can be different
// from the service port.
EndpointPort uint32
// The load balancing weight associated with this endpoint.
LbWeight uint32
// TLSMode endpoint is injected with istio sidecar and ready to configure Istio mTLS
TLSMode string
// Namespace that this endpoint belongs to. This is for telemetry purpose.
Namespace string
// Name of the workload that this endpoint belongs to. This is for telemetry purpose.
WorkloadName string
// Specifies the hostname of the Pod, empty for vm workload.
HostName string
// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
SubDomain string
// Determines the discoverability of this endpoint throughout the mesh.
DiscoverabilityPolicy EndpointDiscoverabilityPolicy `json:"-"`
// Indicates the endpoint health status.
HealthStatus HealthStatus
// If in k8s, the node where the pod resides
NodeName string
// precomputedEnvoyEndpoint is a cached LbEndpoint, converted from the data, to
// avoid recomputation
precomputedEnvoyEndpoint atomic.Pointer[endpoint.LbEndpoint]
}
func (ep *IstioEndpoint) EnvoyEndpoint() *endpoint.LbEndpoint {
return ep.precomputedEnvoyEndpoint.Load()
}
func (ep *IstioEndpoint) ComputeEnvoyEndpoint(now *endpoint.LbEndpoint) {
ep.precomputedEnvoyEndpoint.Store(now)
}
func (ep *IstioEndpoint) SupportsTunnel(tunnelType string) bool {
return SupportsTunnel(ep.Labels, tunnelType)
}
// GetLoadBalancingWeight returns the weight for this endpoint, normalized to always be > 0.
func (ep *IstioEndpoint) GetLoadBalancingWeight() uint32 {
if ep.LbWeight > 0 {
return ep.LbWeight
}
return 1
}
// IsDiscoverableFromProxy indicates whether this endpoint is discoverable from the given Proxy.
func (ep *IstioEndpoint) IsDiscoverableFromProxy(p *Proxy) bool {
if ep == nil || ep.DiscoverabilityPolicy == nil {
// If no policy was assigned, default to discoverable mesh-wide.
// TODO(nmittler): Will need to re-think this default when cluster.local is actually cluster-local.
return true
}
return ep.DiscoverabilityPolicy.IsDiscoverableFromProxy(ep, p)
}
// MetadataClone returns the cloned endpoint metadata used for telemetry purposes.
// This should be used when the endpoint labels should be updated.
func (ep *IstioEndpoint) MetadataClone() *EndpointMetadata {
return &EndpointMetadata{
Network: ep.Network,
TLSMode: ep.TLSMode,
WorkloadName: ep.WorkloadName,
Namespace: ep.Namespace,
Labels: maps.Clone(ep.Labels),
ClusterID: ep.Locality.ClusterID,
}
}
// Metadata returns the endpoint metadata used for telemetry purposes.
func (ep *IstioEndpoint) Metadata() *EndpointMetadata {
return &EndpointMetadata{
Network: ep.Network,
TLSMode: ep.TLSMode,
WorkloadName: ep.WorkloadName,
Namespace: ep.Namespace,
Labels: ep.Labels,
ClusterID: ep.Locality.ClusterID,
}
}
var istioEndpointCmpOpts = []cmp.Option{cmpopts.IgnoreUnexported(IstioEndpoint{}), endpointDiscoverabilityPolicyImplCmpOpt, cmp.AllowUnexported()}
func (ep *IstioEndpoint) CmpOpts() []cmp.Option {
return istioEndpointCmpOpts
}
// EndpointMetadata represents metadata set on Envoy LbEndpoint used for telemetry purposes.
type EndpointMetadata struct {
// Network holds the network where this endpoint is present
Network network.ID
// TLSMode endpoint is injected with istio sidecar and ready to configure Istio mTLS
TLSMode string
// Name of the workload that this endpoint belongs to. This is for telemetry purpose.
WorkloadName string
// Namespace that this endpoint belongs to. This is for telemetry purpose.
Namespace string
// Labels points to the workload or deployment labels.
Labels labels.Instance
// ClusterID where the endpoint is located
ClusterID cluster.ID
}
// EndpointDiscoverabilityPolicy determines the discoverability of an endpoint throughout the mesh.
type EndpointDiscoverabilityPolicy interface {
// IsDiscoverableFromProxy indicates whether an endpoint is discoverable from the given Proxy.
IsDiscoverableFromProxy(*IstioEndpoint, *Proxy) bool
// String returns name of this policy.
String() string
}
type endpointDiscoverabilityPolicyImpl struct {
name string
f func(*IstioEndpoint, *Proxy) bool
}
func (p *endpointDiscoverabilityPolicyImpl) IsDiscoverableFromProxy(ep *IstioEndpoint, proxy *Proxy) bool {
return p.f(ep, proxy)
}
func (p *endpointDiscoverabilityPolicyImpl) String() string {
return p.name
}
var endpointDiscoverabilityPolicyImplCmpOpt = cmp.Comparer(func(x, y endpointDiscoverabilityPolicyImpl) bool {
return x.String() == y.String()
})
func (p *endpointDiscoverabilityPolicyImpl) CmpOpts() []cmp.Option {
return []cmp.Option{endpointDiscoverabilityPolicyImplCmpOpt}
}
// AlwaysDiscoverable is an EndpointDiscoverabilityPolicy that allows an endpoint to be discoverable throughout the mesh.
var AlwaysDiscoverable EndpointDiscoverabilityPolicy = &endpointDiscoverabilityPolicyImpl{
name: "AlwaysDiscoverable",
f: func(*IstioEndpoint, *Proxy) bool {
return true
},
}
// DiscoverableFromSameCluster is an EndpointDiscoverabilityPolicy that only allows an endpoint to be discoverable
// from proxies within the same cluster.
var DiscoverableFromSameCluster EndpointDiscoverabilityPolicy = &endpointDiscoverabilityPolicyImpl{
name: "DiscoverableFromSameCluster",
f: func(ep *IstioEndpoint, p *Proxy) bool {
return p.InCluster(ep.Locality.ClusterID)
},
}
// ServiceAttributes represents a group of custom attributes of the service.
type ServiceAttributes struct {
// ServiceRegistry indicates the backing service registry system where this service
// was sourced from.
// TODO: move the ServiceRegistry type from platform.go to model
ServiceRegistry provider.ID
// Name is "destination.service.name" attribute
Name string
// Namespace is "destination.service.namespace" attribute
Namespace string
// Labels applied to the service
Labels map[string]string
// ExportTo defines the visibility of Service in
// a namespace when the namespace is imported.
ExportTo sets.Set[visibility.Instance]
// LabelSelectors are the labels used by the service to select workloads.
// Applicable to both Kubernetes and ServiceEntries.
LabelSelectors map[string]string
// Aliases is the resolved set of aliases for this service. This is computed based on a global view of all Service's `AliasFor`
// fields.
// For example, if I had two Services with `externalName: foo`, "a" and "b", then the "foo" service would have Aliases=[a,b].
Aliases []NamespacedHostname
// For Kubernetes platform
// ClusterExternalAddresses is a mapping between a cluster name and the external
// address(es) to access the service from outside the cluster.
// Used by the aggregator to aggregate the Attributes.ClusterExternalAddresses
// for clusters where the service resides
ClusterExternalAddresses *AddressMap
// ClusterExternalPorts is a mapping between a cluster name and the service port
// to node port mappings for a given service. When accessing the service via
// node port IPs, we need to use the kubernetes assigned node ports of the service
// The port that the user provides in the meshNetworks config is the service port.
// We translate that to the appropriate node port here.
ClusterExternalPorts map[cluster.ID]map[uint32]uint32
K8sAttributes
}
type NamespacedHostname struct {
Hostname host.Name
Namespace string
}
type K8sAttributes struct {
// Type holds the value of the corev1.Type of the Kubernetes service
// spec.Type
Type string
// spec.ExternalName
ExternalName string
// NodeLocal means the proxy will only forward traffic to node local endpoints
// spec.InternalTrafficPolicy == Local
NodeLocal bool
}
// DeepCopy creates a deep copy of ServiceAttributes, but skips internal mutexes.
func (s *ServiceAttributes) DeepCopy() ServiceAttributes {
// AddressMap contains a mutex, which is safe to copy in this case.
// nolint: govet
out := *s
if s.Labels != nil {
out.Labels = make(map[string]string, len(s.Labels))
for k, v := range s.Labels {
out.Labels[k] = v
}
}
if s.ExportTo != nil {
out.ExportTo = s.ExportTo.Copy()
}
if s.LabelSelectors != nil {
out.LabelSelectors = make(map[string]string, len(s.LabelSelectors))
for k, v := range s.LabelSelectors {
out.LabelSelectors[k] = v
}
}
out.ClusterExternalAddresses = s.ClusterExternalAddresses.DeepCopy()
if s.ClusterExternalPorts != nil {
out.ClusterExternalPorts = make(map[cluster.ID]map[uint32]uint32, len(s.ClusterExternalPorts))
for k, m := range s.ClusterExternalPorts {
if m == nil {
out.ClusterExternalPorts[k] = nil
continue
}
out.ClusterExternalPorts[k] = make(map[uint32]uint32, len(m))
for sp, np := range m {
out.ClusterExternalPorts[k][sp] = np
}
}
}
out.Aliases = slices.Clone(s.Aliases)
// AddressMap contains a mutex, which is safe to return a copy in this case.
// nolint: govet
return out
}
// Equals checks whether the attributes are equal from the passed in service.
func (s *ServiceAttributes) Equals(other *ServiceAttributes) bool {
if s == nil {
return other == nil
}
if other == nil {
return s == nil
}
if !maps.Equal(s.Labels, other.Labels) {
return false
}
if !maps.Equal(s.LabelSelectors, other.LabelSelectors) {
return false
}
if !maps.Equal(s.ExportTo, other.ExportTo) {
return false
}
if !slices.Equal(s.Aliases, other.Aliases) {
return false
}
if s.ClusterExternalAddresses.Len() != other.ClusterExternalAddresses.Len() {
return false
}
for k, v1 := range s.ClusterExternalAddresses.GetAddresses() {
if v2, ok := other.ClusterExternalAddresses.Addresses[k]; !ok || !slices.Equal(v1, v2) {
return false
}
}
if len(s.ClusterExternalPorts) != len(other.ClusterExternalPorts) {
return false
}
for k, v1 := range s.ClusterExternalPorts {
if v2, ok := s.ClusterExternalPorts[k]; !ok || !maps.Equal(v1, v2) {
return false
}
}
return s.Name == other.Name && s.Namespace == other.Namespace &&
s.ServiceRegistry == other.ServiceRegistry && s.K8sAttributes == other.K8sAttributes
}
// ServiceDiscovery enumerates Istio service instances.
// nolint: lll
type ServiceDiscovery interface {
NetworkGatewaysWatcher
// Services list declarations of all services in the system
Services() []*Service
// GetService retrieves a service by host name if it exists
GetService(hostname host.Name) *Service
// GetProxyServiceTargets returns the service targets that co-located with a given Proxy
//
// Co-located generally means running in the same network namespace and security context.
//
// A Proxy operating as a Sidecar will return a non-empty slice. A stand-alone Proxy
// will return an empty slice.
//
// There are two reasons why this returns multiple ServiceTargets instead of one:
// - A ServiceTargets has a single Port. But a Service
// may have many ports. So a workload implementing such a Service would need
// multiple ServiceTargets, one for each port.
// - A single workload may implement multiple logical Services.
//
// In the second case, multiple services may be implemented by the same physical port number,
// though with a different ServicePort and IstioEndpoint for each. If any of these overlapping
// services are not HTTP or H2-based, behavior is undefined, since the listener may not be able to
// determine the intended destination of a connection without a Host header on the request.
GetProxyServiceTargets(*Proxy) []ServiceTarget
GetProxyWorkloadLabels(*Proxy) labels.Instance
// MCSServices returns information about the services that have been exported/imported via the
// Kubernetes Multi-Cluster Services (MCS) ServiceExport API. Only applies to services in
// Kubernetes clusters.
MCSServices() []MCSServiceInfo
AmbientIndexes
}
type AmbientIndexes interface {
AddressInformation(addresses sets.String) ([]*AddressInfo, sets.String)
AdditionalPodSubscriptions(
proxy *Proxy,
allAddresses sets.String,
currentSubs sets.String,
) sets.String
Policies(requested sets.Set[ConfigKey]) []*security.Authorization
Waypoint(scope WaypointScope) []netip.Addr
WorkloadsForWaypoint(scope WaypointScope) []*WorkloadInfo
}
// NoopAmbientIndexes provides an implementation of AmbientIndexes that always returns nil, to easily "skip" it.
type NoopAmbientIndexes struct{}
func (u NoopAmbientIndexes) AddressInformation(sets.String) ([]*AddressInfo, sets.String) {
return nil, nil
}
func (u NoopAmbientIndexes) AdditionalPodSubscriptions(
*Proxy,
sets.String,
sets.String,
) sets.String {
return nil
}
func (u NoopAmbientIndexes) Policies(sets.Set[ConfigKey]) []*security.Authorization {
return nil
}
func (u NoopAmbientIndexes) Waypoint(WaypointScope) []netip.Addr {
return nil
}
func (u NoopAmbientIndexes) WorkloadsForWaypoint(scope WaypointScope) []*WorkloadInfo {
return nil
}
var _ AmbientIndexes = NoopAmbientIndexes{}
type AddressInfo struct {
*workloadapi.Address
}
func (i AddressInfo) Aliases() []string {
switch addr := i.Type.(type) {
case *workloadapi.Address_Workload:
aliases := make([]string, 0, len(addr.Workload.Addresses))
network := addr.Workload.Network
for _, workloadAddr := range addr.Workload.Addresses {
ip, _ := netip.AddrFromSlice(workloadAddr)
aliases = append(aliases, network+"/"+ip.String())
}
return aliases
case *workloadapi.Address_Service:
aliases := make([]string, 0, len(addr.Service.Addresses))
for _, networkAddr := range addr.Service.Addresses {
ip, _ := netip.AddrFromSlice(networkAddr.Address)
aliases = append(aliases, networkAddr.Network+"/"+ip.String())
}
return aliases
}
return nil
}
func (i AddressInfo) ResourceName() string {
var name string
switch addr := i.Type.(type) {
case *workloadapi.Address_Workload:
name = workloadResourceName(addr.Workload)
case *workloadapi.Address_Service:
name = serviceResourceName(addr.Service)
}
return name
}
type ServiceInfo struct {
*workloadapi.Service
}
func (i ServiceInfo) ResourceName() string {
return serviceResourceName(i.Service)
}
func serviceResourceName(s *workloadapi.Service) string {
return s.Namespace + "/" + s.Hostname
}
type WorkloadSource string
const (
WorkloadSourcePod WorkloadSource = "pod"
WorkloadSourceServiceEntry WorkloadSource = "serviceentry"
WorkloadSourceWorkloadEntry WorkloadSource = "workloadentry"
)
type WorkloadInfo struct {
*workloadapi.Workload
// Labels for the workload. Note these are only used internally, not sent over XDS
Labels map[string]string
// Source of the workload. Note this is used internally only.
Source WorkloadSource
// CreationTime is the time when the workload was created. Note this is used internally only.
CreationTime time.Time
}
func workloadResourceName(w *workloadapi.Workload) string {
return w.Uid
}
func (i *WorkloadInfo) Clone() *WorkloadInfo {
return &WorkloadInfo{
Workload: proto.Clone(i).(*workloadapi.Workload),
Labels: maps.Clone(i.Labels),
Source: i.Source,
CreationTime: i.CreationTime,
}
}
func (i *WorkloadInfo) ResourceName() string {
return workloadResourceName(i.Workload)
}
func ExtractWorkloadsFromAddresses(addrs []*AddressInfo) []WorkloadInfo {
return slices.MapFilter(addrs, func(a *AddressInfo) *WorkloadInfo {
switch addr := a.Type.(type) {
case *workloadapi.Address_Workload:
return &WorkloadInfo{Workload: addr.Workload}
default:
return nil
}
})
}
func SortWorkloadsByCreationTime(workloads []*WorkloadInfo) []*WorkloadInfo {
sort.SliceStable(workloads, func(i, j int) bool {
if workloads[i].CreationTime.Equal(workloads[j].CreationTime) {
return workloads[i].Uid < workloads[j].Uid
}
return workloads[i].CreationTime.Before(workloads[j].CreationTime)
})
return workloads
}
// MCSServiceInfo combines the name of a service with a particular Kubernetes cluster. This
// is used for debug information regarding the state of Kubernetes Multi-Cluster Services (MCS).
type MCSServiceInfo struct {
Cluster cluster.ID
Name string
Namespace string
Exported bool
Imported bool
ClusterSetVIP string
Discoverability map[host.Name]string
}
// GetNames returns port names
func (ports PortList) GetNames() []string {
names := make([]string, 0, len(ports))
for _, port := range ports {
names = append(names, port.Name)
}
return names
}
// Get retrieves a port declaration by name
func (ports PortList) Get(name string) (*Port, bool) {
for _, port := range ports {
if port.Name == name {
return port, true
}
}
return nil, false
}
// GetByPort retrieves a port declaration by port value
func (ports PortList) GetByPort(num int) (*Port, bool) {
for _, port := range ports {
if port.Port == num && port.Protocol != protocol.UDP {
return port, true
}
}
return nil, false
}
func (p *Port) Equals(other *Port) bool {
if p == nil {
return other == nil
}
if other == nil {
return p == nil
}
return p.Name == other.Name && p.Port == other.Port && p.Protocol == other.Protocol
}
func (ports PortList) Equals(other PortList) bool {
return slices.EqualFunc(ports, other, func(a, b *Port) bool {
return a.Equals(b)
})
}
func (ports PortList) String() string {
sp := make([]string, 0, len(ports))
for _, p := range ports {
sp = append(sp, p.String())
}
return strings.Join(sp, ", ")
}
// External predicate checks whether the service is external
func (s *Service) External() bool {
return s.MeshExternal
}
// BuildSubsetKey generates a unique string referencing service instances for a given service name, a subset and a port.
// The proxy queries Pilot with this key to obtain the list of instances in a subset.
func BuildSubsetKey(direction TrafficDirection, subsetName string, hostname host.Name, port int) string {
return string(direction) + "|" + strconv.Itoa(port) + "|" + subsetName + "|" + string(hostname)
}
// BuildInboundSubsetKey generates a unique string referencing service instances with port.
func BuildInboundSubsetKey(port int) string {
return BuildSubsetKey(TrafficDirectionInbound, "", "", port)
}
// BuildDNSSrvSubsetKey generates a unique string referencing service instances for a given service name, a subset and a port.
// The proxy queries Pilot with this key to obtain the list of instances in a subset.
// This is used only for the SNI-DNAT router. Do not use for other purposes.
// The DNS Srv format of the cluster is also used as the default SNI string for Istio mTLS connections
func BuildDNSSrvSubsetKey(direction TrafficDirection, subsetName string, hostname host.Name, port int) string {
return string(direction) + "_." + strconv.Itoa(port) + "_." + subsetName + "_." + string(hostname)
}
// IsValidSubsetKey checks if a string is valid for subset key parsing.
func IsValidSubsetKey(s string) bool {
return strings.Count(s, "|") == 3
}
// IsDNSSrvSubsetKey checks whether the given key is a DNSSrv key (built by BuildDNSSrvSubsetKey).
func IsDNSSrvSubsetKey(s string) bool {
if strings.HasPrefix(s, trafficDirectionOutboundSrvPrefix) ||
strings.HasPrefix(s, trafficDirectionInboundSrvPrefix) {
return true
}
return false
}
// ParseSubsetKeyHostname is an optimized specialization of ParseSubsetKey that only returns the hostname.
// This is created as this is used in some hot paths and is about 2x faster than ParseSubsetKey; for typical use ParseSubsetKey is sufficient (and zero-alloc).
func ParseSubsetKeyHostname(s string) (hostname string) {
idx := strings.LastIndex(s, "|")
if idx == -1 {
// Could be DNS SRV format.
// Do not do LastIndex("_."), as those are valid characters in the hostname (unlike |)
// Fallback to the full parser.
_, _, hostname, _ := ParseSubsetKey(s)
return string(hostname)
}
return s[idx+1:]
}
// ParseSubsetKey is the inverse of the BuildSubsetKey method
func ParseSubsetKey(s string) (direction TrafficDirection, subsetName string, hostname host.Name, port int) {
sep := "|"
// This could be the DNS srv form of the cluster that uses outbound_.port_.subset_.hostname
// Since we do not want every callsite to implement the logic to differentiate between the two forms
// we add an alternate parser here.
if strings.HasPrefix(s, trafficDirectionOutboundSrvPrefix) ||
strings.HasPrefix(s, trafficDirectionInboundSrvPrefix) {
sep = "_."
}
// Format: dir|port|subset|hostname
dir, s, ok := strings.Cut(s, sep)
if !ok {
return
}
direction = TrafficDirection(dir)
p, s, ok := strings.Cut(s, sep)
if !ok {
return
}
port, _ = strconv.Atoi(p)
ss, s, ok := strings.Cut(s, sep)
if !ok {
return
}
subsetName = ss
// last part. No | remains -- verify this
if strings.Contains(s, sep) {
return
}
hostname = host.Name(s)
return
}
// GetAddresses returns a Service's addresses.
// This method returns all the VIPs of a service if the ClusterID is explicitly set to "", otherwise only return the VIP
// specific to the cluster where the node resides
func (s *Service) GetAddresses(node *Proxy) []string {
if node.Metadata != nil && node.Metadata.ClusterID == "" {
return s.getAllAddresses()
}
return []string{s.GetAddressForProxy(node)}
}
// GetAddressForProxy returns a Service's address specific to the cluster where the node resides
func (s *Service) GetAddressForProxy(node *Proxy) string {
if node.Metadata != nil {
if node.Metadata.ClusterID != "" {
addresses := s.ClusterVIPs.GetAddressesFor(node.Metadata.ClusterID)
if len(addresses) > 0 {
return addresses[0]
}
}
if node.Metadata.DNSCapture && node.Metadata.DNSAutoAllocate && s.DefaultAddress == constants.UnspecifiedIP {
if node.SupportsIPv4() && s.AutoAllocatedIPv4Address != "" {
return s.AutoAllocatedIPv4Address
}
if node.SupportsIPv6() && s.AutoAllocatedIPv6Address != "" {
return s.AutoAllocatedIPv6Address
}
}
}
return s.DefaultAddress
}
// GetExtraAddressesForProxy returns a k8s service's extra addresses to the cluster where the node resides.
// Especially for dual stack k8s service to get other IP family addresses.
func (s *Service) GetExtraAddressesForProxy(node *Proxy) []string {
if features.EnableDualStack && node.Metadata != nil {
if node.Metadata.ClusterID != "" {
addresses := s.ClusterVIPs.GetAddressesFor(node.Metadata.ClusterID)
if len(addresses) > 1 {
return addresses[1:]
}
}
}
return nil
}
// getAllAddresses returns a Service's all addresses.
func (s *Service) getAllAddresses() []string {
var addresses []string
addressMap := s.ClusterVIPs.GetAddresses()
for _, clusterAddresses := range addressMap {
addresses = append(addresses, clusterAddresses...)
}
return addresses
}
// GetTLSModeFromEndpointLabels returns the value of the label
// security.istio.io/tlsMode if set. Do not return Enums or constants
// from this function as users could provide values other than istio/disabled
// and apply custom transport socket matchers here.
func GetTLSModeFromEndpointLabels(labels map[string]string) string {
if labels != nil {
if val, exists := labels[label.SecurityTlsMode.Name]; exists {
return val
}
}
return DisabledTLSModeLabel
}
// DeepCopy creates a clone of Service.
func (s *Service) DeepCopy() *Service {
// nolint: govet
out := *s
out.Attributes = s.Attributes.DeepCopy()
if s.Ports != nil {
out.Ports = make(PortList, len(s.Ports))
for i, port := range s.Ports {
if port != nil {
out.Ports[i] = &Port{
Name: port.Name,
Port: port.Port,
Protocol: port.Protocol,
}
} else {
out.Ports[i] = nil
}
}
}
if s.ServiceAccounts != nil {
out.ServiceAccounts = make([]string, len(s.ServiceAccounts))
copy(out.ServiceAccounts, s.ServiceAccounts)
}
out.ClusterVIPs = *s.ClusterVIPs.DeepCopy()
return &out
}
// Equals compares two service objects.
func (s *Service) Equals(other *Service) bool {
if s == nil {
return other == nil
}
if other == nil {
return s == nil
}
if !s.Attributes.Equals(&other.Attributes) {
return false
}
if !s.Ports.Equals(other.Ports) {
return false
}
if !slices.Equal(s.ServiceAccounts, other.ServiceAccounts) {
return false
}
if len(s.ClusterVIPs.Addresses) != len(other.ClusterVIPs.Addresses) {
return false
}
for k, v1 := range s.ClusterVIPs.Addresses {
if v2, ok := other.ClusterVIPs.Addresses[k]; !ok || !slices.Equal(v1, v2) {
return false
}
}
return s.DefaultAddress == other.DefaultAddress && s.AutoAllocatedIPv4Address == other.AutoAllocatedIPv4Address &&
s.AutoAllocatedIPv6Address == other.AutoAllocatedIPv6Address && s.Hostname == other.Hostname &&
s.Resolution == other.Resolution && s.MeshExternal == other.MeshExternal
}
// DeepCopy creates a clone of IstioEndpoint.
func (ep *IstioEndpoint) DeepCopy() *IstioEndpoint {
return copyInternal(ep).(*IstioEndpoint)
}
// ShallowCopy creates a shallow clone of IstioEndpoint.
func (ep *IstioEndpoint) ShallowCopy() *IstioEndpoint {
// nolint: govet
cpy := *ep
return &cpy
}
func copyInternal(v any) any {
copied, err := copystructure.Copy(v)
if err != nil {
// There are 2 locations where errors are generated in copystructure.Copy:
// * The reflection walk over the structure fails, which should never happen
// * A configurable copy function returns an error. This is only used for copying times, which never returns an error.
// Therefore, this should never happen
panic(err)
}
return copied
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"sort"
"strings"
"k8s.io/apimachinery/pkg/types"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
const (
wildcardNamespace = "*"
currentNamespace = "."
wildcardService = host.Name("*")
)
var (
sidecarScopedKnownConfigTypes = sets.New(
kind.ServiceEntry,
kind.VirtualService,
kind.DestinationRule,
kind.Sidecar,
)
// clusterScopedKnownConfigTypes includes configs when they are in root namespace,
// they will be applied to all namespaces within the cluster.
clusterScopedKnownConfigTypes = sets.New(
kind.EnvoyFilter,
kind.AuthorizationPolicy,
kind.RequestAuthentication,
kind.WasmPlugin,
)
)
type hostClassification struct {
exactHosts sets.Set[host.Name]
allHosts []host.Name
}
func (hc hostClassification) Matches(h host.Name) bool {
// exact lookup is fast, so check that first
if hc.exactHosts.Contains(h) {
return true
}
// exactHosts not found, fallback to loop allHosts
hIsWildCarded := h.IsWildCarded()
for _, importedHost := range hc.allHosts {
// If both are exact hosts, then fallback is not needed.
// In this scenario it should be determined by exact lookup.
if !hIsWildCarded && !importedHost.IsWildCarded() {
continue
}
// Check if the hostnames match per usual hostname matching rules
if h.SubsetOf(importedHost) {
return true
}
}
return false
}
// SidecarScope is a wrapper over the Sidecar resource with some
// preprocessed data to determine the list of services, virtualServices,
// and destinationRules that are accessible to a given
// sidecar. Precomputing the list of services, virtual services, dest rules
// for a sidecar improves performance as we no longer need to compute this
// list for every sidecar. We simply have to match a sidecar to a
// SidecarScope. Note that this is not the same as public/private scoped
// services. The list of services seen by every sidecar scope (namespace
// wide or per workload) depends on the imports, the listeners, and other
// settings.
//
// Every proxy workload of SidecarProxy type will always map to a
// SidecarScope object. If the proxy's namespace does not have a user
// specified Sidecar CRD, we will construct one that has a catch all egress
// listener that imports every public service/virtualService in the mesh.
type SidecarScope struct {
Name string
// This is the namespace where the sidecar takes effect,
// maybe different from the ns where sidecar resides if sidecar is in root ns.
Namespace string
// The cr itself. Can be nil if we are constructing the default
// sidecar scope
Sidecar *networking.Sidecar
// Version this sidecar was computed for
Version string
// Set of egress listeners, and their associated services. A sidecar
// scope should have either ingress/egress listeners or both. For
// every proxy workload that maps to a sidecar API object (or the
// default object), we will go through every egress listener in the
// object and process the Envoy listener or RDS based on the imported
// services/virtual services in that listener.
EgressListeners []*IstioEgressListenerWrapper
// Union of services imported across all egress listeners for use by CDS code.
services []*Service
servicesByHostname map[host.Name]*Service
// Destination rules imported across all egress listeners. This
// contains the computed set based on public/private destination rules
// as well as the inherited ones, in addition to the wildcard matches
// such as *.com applying to foo.bar.com. Each hostname in this map
// corresponds to a service in the services array above. When computing
// CDS, we simply have to find the matching service and return the
// destination rule.
destinationRules map[host.Name][]*ConsolidatedDestRule
destinationRulesByNames map[types.NamespacedName]*config.Config
// OutboundTrafficPolicy defines the outbound traffic policy for this sidecar.
// If OutboundTrafficPolicy is ALLOW_ANY traffic to unknown destinations will
// be forwarded.
OutboundTrafficPolicy *networking.OutboundTrafficPolicy
// Set of known configs this sidecar depends on.
// This field will be used to determine the config/resource scope
// which means which config changes will affect the proxies within this scope.
configDependencies sets.Set[ConfigHash]
}
// MarshalJSON implements json.Marshaller
func (sc *SidecarScope) MarshalJSON() ([]byte, error) {
// Json cannot expose unexported fields, so copy the ones we want here
return json.MarshalIndent(map[string]any{
"version": sc.Version,
"name": sc.Name,
"namespace": sc.Namespace,
"outboundTrafficPolicy": sc.OutboundTrafficPolicy,
"services": sc.services,
"servicesByHostname": sc.servicesByHostname,
"sidecar": sc.Sidecar,
"destinationRules": sc.destinationRules,
}, "", " ")
}
// IstioEgressListenerWrapper is a wrapper for
// networking.IstioEgressListener object. The wrapper provides performance
// optimizations as it allows us to precompute and store the list of
// services/virtualServices that apply to this listener.
type IstioEgressListenerWrapper struct {
// The actual IstioEgressListener api object from the Config. It can be
// nil if this is for the default sidecar scope.
IstioListener *networking.IstioEgressListener
// Specifies whether matching ports is required.
matchPort bool
// List of services imported by this egress listener above.
// This will be used by LDS and RDS code when
// building the set of virtual hosts or the tcp filterchain matches for
// a given listener port. Two listeners, on user specified ports or
// unix domain sockets could have completely different sets of
// services. So a global list of services per sidecar scope will be
// incorrect. Hence the per listener set of services.
services []*Service
// List of virtual services imported by this egress listener above.
// As with per listener services, this
// will be used by RDS code to compute the virtual host configs for
// http listeners, as well as by TCP/TLS filter code to compute the
// service routing configs and the filter chain matches. We need a
// virtualService set per listener and not one per sidecarScope because
// each listener imports an independent set of virtual services.
// Listener 1 could import a public virtual service for serviceA from
// namespace A that has some path rewrite, while listener2 could import
// a private virtual service for serviceA from the local namespace,
// with a different path rewrite or no path rewrites.
virtualServices []config.Config
// An index of hostname to the namespaced name of the VirtualService containing the most
// relevant host match. Depending on the `PERSIST_OLDEST_FIRST_HEURISTIC_FOR_VIRTUAL_SERVICE_HOST_MATCHING`
// feature flag, it could be the most specific host match or the oldest host match.
mostSpecificWildcardVsIndex map[host.Name]types.NamespacedName
}
const defaultSidecar = "default-sidecar"
// DefaultSidecarScopeForNamespace is a sidecar scope object with a default catch all egress listener
// that matches the default Istio behavior: a sidecar has listeners for all services in the mesh
// We use this scope when the user has not set any sidecar Config for a given config namespace.
func DefaultSidecarScopeForNamespace(ps *PushContext, configNamespace string) *SidecarScope {
defaultEgressListener := &IstioEgressListenerWrapper{
IstioListener: &networking.IstioEgressListener{
Hosts: []string{"*/*"},
},
}
// TODO: merge services like sidecar specified using `addService`
defaultEgressListener.services = ps.servicesExportedToNamespace(configNamespace)
defaultEgressListener.virtualServices = ps.VirtualServicesForGateway(configNamespace, constants.IstioMeshGateway)
defaultEgressListener.mostSpecificWildcardVsIndex = computeWildcardHostVirtualServiceIndex(
defaultEgressListener.virtualServices, defaultEgressListener.services)
out := &SidecarScope{
Name: defaultSidecar,
Namespace: configNamespace,
EgressListeners: []*IstioEgressListenerWrapper{defaultEgressListener},
destinationRules: make(map[host.Name][]*ConsolidatedDestRule),
destinationRulesByNames: make(map[types.NamespacedName]*config.Config),
servicesByHostname: make(map[host.Name]*Service, len(defaultEgressListener.services)),
configDependencies: make(sets.Set[ConfigHash]),
Version: ps.PushVersion,
}
servicesAdded := make(map[host.Name]sidecarServiceIndex)
for _, listener := range out.EgressListeners {
for _, s := range listener.services {
out.appendSidecarServices(servicesAdded, s)
}
// add dependencies on delegate virtual services
delegates := ps.DelegateVirtualServices(listener.virtualServices)
for _, delegate := range delegates {
out.AddConfigDependencies(delegate)
}
for _, vs := range listener.virtualServices {
for _, cfg := range VirtualServiceDependencies(vs) {
out.AddConfigDependencies(cfg.HashCode())
}
}
}
// Now that we have all the services that sidecars using this scope (in
// this config namespace) will see, identify all the destinationRules
// that these services need
for _, s := range out.services {
if dr := ps.destinationRule(configNamespace, s); dr != nil {
out.destinationRules[s.Hostname] = dr
for _, cdr := range dr {
for _, from := range cdr.from {
out.destinationRulesByNames[from] = cdr.rule
out.AddConfigDependencies(ConfigKey{
Kind: kind.DestinationRule,
Name: from.Name,
Namespace: from.Namespace,
}.HashCode())
}
}
}
out.AddConfigDependencies(ConfigKey{
Kind: kind.ServiceEntry,
Name: string(s.Hostname),
Namespace: s.Attributes.Namespace,
}.HashCode())
}
if ps.Mesh.OutboundTrafficPolicy != nil {
out.OutboundTrafficPolicy = &networking.OutboundTrafficPolicy{
Mode: networking.OutboundTrafficPolicy_Mode(ps.Mesh.OutboundTrafficPolicy.Mode),
}
}
return out
}
// convertToSidecarScope converts from Sidecar config to SidecarScope object
func convertToSidecarScope(ps *PushContext, sidecarConfig *config.Config, configNamespace string) *SidecarScope {
if sidecarConfig == nil {
return DefaultSidecarScopeForNamespace(ps, configNamespace)
}
sidecar := sidecarConfig.Spec.(*networking.Sidecar)
out := &SidecarScope{
Name: sidecarConfig.Name,
Namespace: configNamespace,
Sidecar: sidecar,
servicesByHostname: make(map[host.Name]*Service),
configDependencies: make(sets.Set[ConfigHash]),
Version: ps.PushVersion,
}
out.AddConfigDependencies(ConfigKey{
Kind: kind.Sidecar,
Name: sidecarConfig.Name,
Namespace: sidecarConfig.Namespace,
}.HashCode())
egressConfigs := sidecar.Egress
// If egress not set, setup a default listener
if len(egressConfigs) == 0 {
egressConfigs = append(egressConfigs, &networking.IstioEgressListener{Hosts: []string{"*/*"}})
}
out.EgressListeners = make([]*IstioEgressListenerWrapper, 0, len(egressConfigs))
for _, e := range egressConfigs {
out.EgressListeners = append(out.EgressListeners,
convertIstioListenerToWrapper(ps, configNamespace, e))
}
// Now collect all the imported services across all egress listeners in
// this sidecar crd. This is needed to generate CDS output
servicesAdded := make(map[host.Name]sidecarServiceIndex)
for _, listener := range out.EgressListeners {
// First add the explicitly requested services, which take priority
for _, s := range listener.services {
out.appendSidecarServices(servicesAdded, s)
}
// add dependencies on delegate virtual services
delegates := ps.DelegateVirtualServices(listener.virtualServices)
for _, delegate := range delegates {
out.AddConfigDependencies(delegate)
}
// Infer more possible destinations from virtual services
// Services chosen here will not override services explicitly requested in listener.services.
// That way, if there is ambiguity around what hostname to pick, a user can specify the one they
// want in the hosts field, and the potentially random choice below won't matter
for _, vs := range listener.virtualServices {
for _, cfg := range VirtualServiceDependencies(vs) {
out.AddConfigDependencies(cfg.HashCode())
}
v := vs.Spec.(*networking.VirtualService)
for h, ports := range virtualServiceDestinations(v) {
// Default to this hostname in our config namespace
if s, ok := ps.ServiceIndex.HostnameAndNamespace[host.Name(h)][configNamespace]; ok {
// This won't overwrite hostnames that have already been found eg because they were requested in hosts
var vss *Service
if listener.matchPort {
vss = serviceMatchingListenerPort(s, listener)
} else {
vss = serviceMatchingVirtualServicePorts(s, ports)
}
if vss != nil {
out.appendSidecarServices(servicesAdded, vss)
}
} else {
// We couldn't find the hostname in our config namespace
// We have to pick one arbitrarily for now, so we'll pick the first namespace alphabetically
// TODO: could we choose services more intelligently based on their ports?
byNamespace := ps.ServiceIndex.HostnameAndNamespace[host.Name(h)]
if len(byNamespace) == 0 {
// This hostname isn't found anywhere
log.Debugf("Could not find service hostname %s parsed from %s", h, vs.Key())
continue
}
ns := make([]string, 0, len(byNamespace))
for k := range byNamespace {
if ps.IsServiceVisible(byNamespace[k], configNamespace) {
ns = append(ns, k)
}
}
if len(ns) > 0 {
sort.Strings(ns)
// Pick first namespace alphabetically
// This won't overwrite hostnames that have already been found eg because they were requested in hosts
var vss *Service
if listener.matchPort {
vss = serviceMatchingListenerPort(byNamespace[ns[0]], listener)
} else {
vss = serviceMatchingVirtualServicePorts(byNamespace[ns[0]], ports)
}
if vss != nil {
out.appendSidecarServices(servicesAdded, vss)
}
}
}
}
}
}
// Now that we have all the services that sidecars using this scope (in
// this config namespace) will see, identify all the destinationRules
// that these services need
out.destinationRules = make(map[host.Name][]*ConsolidatedDestRule)
out.destinationRulesByNames = make(map[types.NamespacedName]*config.Config)
for _, s := range out.services {
drList := ps.destinationRule(configNamespace, s)
if drList != nil {
out.destinationRules[s.Hostname] = drList
for _, dr := range drList {
for _, key := range dr.from {
out.AddConfigDependencies(ConfigKey{
Kind: kind.DestinationRule,
Name: key.Name,
Namespace: key.Namespace,
}.HashCode())
out.destinationRulesByNames[key] = dr.rule
}
}
}
out.AddConfigDependencies(ConfigKey{
Kind: kind.ServiceEntry,
Name: string(s.Hostname),
Namespace: s.Attributes.Namespace,
}.HashCode())
}
if sidecar.OutboundTrafficPolicy == nil {
if ps.Mesh.OutboundTrafficPolicy != nil {
out.OutboundTrafficPolicy = &networking.OutboundTrafficPolicy{
Mode: networking.OutboundTrafficPolicy_Mode(ps.Mesh.OutboundTrafficPolicy.Mode),
}
}
} else {
out.OutboundTrafficPolicy = sidecar.OutboundTrafficPolicy
}
return out
}
func convertIstioListenerToWrapper(ps *PushContext, configNamespace string,
istioListener *networking.IstioEgressListener,
) *IstioEgressListenerWrapper {
out := &IstioEgressListenerWrapper{
IstioListener: istioListener,
matchPort: needsPortMatch(istioListener),
}
hostsByNamespace := make(map[string]hostClassification)
for _, h := range istioListener.Hosts {
parts := strings.SplitN(h, "/", 2)
if len(parts) < 2 {
log.Errorf("Illegal host in sidecar resource: %s, host must be of form namespace/dnsName", h)
continue
}
if parts[0] == currentNamespace {
parts[0] = configNamespace
}
ns := parts[0]
hName := host.Name(parts[1])
if _, exists := hostsByNamespace[ns]; !exists {
hostsByNamespace[ns] = hostClassification{exactHosts: sets.New[host.Name](), allHosts: make([]host.Name, 0)}
}
// exact hosts are saved separately for map lookup
if !hName.IsWildCarded() {
hostsByNamespace[ns].exactHosts.Insert(hName)
}
// allHosts contains the exact hosts and wildcard hosts,
// since SelectVirtualServices will use `Matches` semantic matching.
hc := hostsByNamespace[ns]
hc.allHosts = append(hc.allHosts, hName)
hostsByNamespace[ns] = hc
}
out.virtualServices = SelectVirtualServices(ps.virtualServiceIndex, configNamespace, hostsByNamespace)
svces := ps.servicesExportedToNamespace(configNamespace)
out.services = out.selectServices(svces, configNamespace, hostsByNamespace)
out.mostSpecificWildcardVsIndex = computeWildcardHostVirtualServiceIndex(out.virtualServices, out.services)
return out
}
// GetEgressListenerForRDS returns the egress listener corresponding to
// the listener port or the bind address or the catch all listener
func (sc *SidecarScope) GetEgressListenerForRDS(port int, bind string) *IstioEgressListenerWrapper {
if sc == nil {
return nil
}
for _, e := range sc.EgressListeners {
// We hit a catchall listener. This is the last listener in the list of listeners
// return as is
if e.IstioListener == nil || e.IstioListener.Port == nil {
return e
}
// Check if the ports match
// for unix domain sockets (i.e. port == 0), check if the bind is equal to the routeName
if int(e.IstioListener.Port.Number) == port {
if port == 0 { // unix domain socket
if e.IstioListener.Bind == bind {
return e
}
// no match.. continue searching
continue
}
// this is a non-zero port match
return e
}
}
// This should never be reached unless user explicitly set an empty array for egress
// listeners which we actually forbid
return nil
}
// HasIngressListener returns if the sidecar scope has ingress listener set
func (sc *SidecarScope) HasIngressListener() bool {
if sc == nil {
return false
}
if sc.Sidecar == nil || len(sc.Sidecar.Ingress) == 0 {
return false
}
return true
}
// InboundConnectionPoolForPort returns the connection pool settings for a specific inbound port. If there's not a
// setting for that specific port, then the settings at the Sidecar resource are returned. If neither exist,
// then nil is returned so the caller can decide what values to fall back on.
func (sc *SidecarScope) InboundConnectionPoolForPort(port int) *networking.ConnectionPoolSettings {
if sc == nil || sc.Sidecar == nil {
return nil
}
for _, in := range sc.Sidecar.Ingress {
if int(in.Port.Number) == port {
if in.GetConnectionPool() != nil {
return in.ConnectionPool
}
}
}
// if set, it'll be non-nil and have values (guaranteed by validation); or if unset it'll be nil
return sc.Sidecar.GetInboundConnectionPool()
}
// Services returns the list of services imported by this egress listener
func (ilw *IstioEgressListenerWrapper) Services() []*Service {
return ilw.services
}
// VirtualServices returns the list of virtual services imported by this
// egress listener
func (ilw *IstioEgressListenerWrapper) VirtualServices() []config.Config {
return ilw.virtualServices
}
// WildcardHostVirtualServiceIndex returns the the wildcardHostVirtualServiceIndex for this egress
// listener.
func (ilw *IstioEgressListenerWrapper) MostSpecificWildcardServiceIndex() map[host.Name]types.NamespacedName {
return ilw.mostSpecificWildcardVsIndex
}
// DependsOnConfig determines if the proxy depends on the given config.
// Returns whether depends on this config or this kind of config is not scopeZd(unknown to be depended) here.
func (sc *SidecarScope) DependsOnConfig(config ConfigKey, rootNs string) bool {
if sc == nil {
return true
}
// This kind of config will trigger a change if made in the root namespace or the same namespace
if clusterScopedKnownConfigTypes.Contains(config.Kind) {
return config.Namespace == rootNs || config.Namespace == sc.Namespace
}
// This kind of config is unknown to sidecarScope.
if _, f := sidecarScopedKnownConfigTypes[config.Kind]; !f {
return true
}
return sc.configDependencies.Contains(config.HashCode())
}
func (sc *SidecarScope) GetService(hostname host.Name) *Service {
if sc == nil {
return nil
}
return sc.servicesByHostname[hostname]
}
// AddConfigDependencies add extra config dependencies to this scope. This action should be done before the
// SidecarScope being used to avoid concurrent read/write.
func (sc *SidecarScope) AddConfigDependencies(dependencies ...ConfigHash) {
if sc == nil {
return
}
if sc.configDependencies == nil {
sc.configDependencies = sets.New(dependencies...)
} else {
sc.configDependencies.InsertAll(dependencies...)
}
}
// DestinationRule returns a destinationrule for a svc.
func (sc *SidecarScope) DestinationRule(direction TrafficDirection, proxy *Proxy, svc host.Name) *ConsolidatedDestRule {
destinationRules := sc.destinationRules[svc]
var catchAllDr *ConsolidatedDestRule
for _, destRule := range destinationRules {
destinationRule := destRule.rule.Spec.(*networking.DestinationRule)
if destinationRule.GetWorkloadSelector() == nil {
catchAllDr = destRule
}
// filter DestinationRule based on workloadSelector for outbound configs.
// WorkloadSelector configuration is honored only for outbound configuration, because
// for inbound configuration, the settings at sidecar would be more explicit and the preferred way forward.
if sc.Namespace == destRule.rule.Namespace &&
destinationRule.GetWorkloadSelector() != nil && direction == TrafficDirectionOutbound {
workloadSelector := labels.Instance(destinationRule.GetWorkloadSelector().GetMatchLabels())
// return destination rule if workload selector matches
if workloadSelector.SubsetOf(proxy.Labels) {
return destRule
}
}
}
// If there is no workload specific destinationRule, return the wild carded dr if present.
if catchAllDr != nil {
return catchAllDr
}
return nil
}
// DestinationRuleConfig returns merged destination rules for a svc.
func (sc *SidecarScope) DestinationRuleConfig(direction TrafficDirection, proxy *Proxy, svc host.Name) *config.Config {
cdr := sc.DestinationRule(direction, proxy, svc)
if cdr == nil {
return nil
}
return cdr.rule
}
// Services returns the list of services that are visible to a sidecar.
func (sc *SidecarScope) Services() []*Service {
return sc.services
}
// Testing Only. This allows tests to inject a config without having the mock.
func (sc *SidecarScope) SetDestinationRulesForTesting(configs []config.Config) {
sc.destinationRulesByNames = make(map[types.NamespacedName]*config.Config)
for _, c := range configs {
c := c
sc.destinationRulesByNames[types.NamespacedName{Name: c.Name, Namespace: c.Namespace}] = &c
}
}
func (sc *SidecarScope) DestinationRuleByName(name, namespace string) *config.Config {
if sc == nil {
return nil
}
return sc.destinationRulesByNames[types.NamespacedName{
Name: name,
Namespace: namespace,
}]
}
// ServicesForHostname returns a list of services that fall under the hostname provided. This hostname
// can be a wildcard.
func (sc *SidecarScope) ServicesForHostname(hostname host.Name) []*Service {
if !hostname.IsWildCarded() {
if svc, f := sc.servicesByHostname[hostname]; f {
return []*Service{svc}
}
return nil
}
services := make([]*Service, 0)
for _, svc := range sc.services {
if hostname.Matches(svc.Hostname) {
services = append(services, svc)
}
}
return services
}
// Return filtered services through the hosts field in the egress portion of the Sidecar config.
// Note that the returned service could be trimmed.
func (ilw *IstioEgressListenerWrapper) selectServices(services []*Service, configNamespace string, hostsByNamespace map[string]hostClassification) []*Service {
importedServices := make([]*Service, 0)
wildcardHosts, wnsFound := hostsByNamespace[wildcardNamespace]
for _, s := range services {
configNamespace := s.Attributes.Namespace
// Check if there is an explicit import of form ns/* or ns/host
if importedHosts, nsFound := hostsByNamespace[configNamespace]; nsFound {
if svc := matchingAliasService(importedHosts, matchingService(importedHosts, s, ilw)); svc != nil {
importedServices = append(importedServices, svc)
continue
}
}
// Check if there is an import of form */host or */*
if wnsFound {
if svc := matchingAliasService(wildcardHosts, matchingService(wildcardHosts, s, ilw)); svc != nil {
importedServices = append(importedServices, svc)
}
}
}
validServices := make(map[host.Name]string, len(importedServices))
for _, svc := range importedServices {
_, f := validServices[svc.Hostname]
// Select a single namespace for a given hostname.
// If the same hostname is imported from multiple namespaces, pick the one in the configNamespace
// If neither are in configNamespace, an arbitrary one will be chosen
if !f || svc.Attributes.Namespace == configNamespace {
validServices[svc.Hostname] = svc.Attributes.Namespace
}
}
// Filter down to just instances in scope for the service
return slices.FilterInPlace(importedServices, func(svc *Service) bool {
return validServices[svc.Hostname] == svc.Attributes.Namespace
})
}
// Return the original service or a trimmed service which has a subset of the ports in original service.
func matchingService(importedHosts hostClassification, service *Service, ilw *IstioEgressListenerWrapper) *Service {
if importedHosts.Matches(service.Hostname) {
if ilw.matchPort {
return serviceMatchingListenerPort(service, ilw)
}
return service
}
return nil
}
// matchingAliasService the original service or a trimmed service which has a subset of aliases, based on imports from sidecar
func matchingAliasService(importedHosts hostClassification, service *Service) *Service {
if service == nil {
return nil
}
matched := make([]NamespacedHostname, 0, len(service.Attributes.Aliases))
for _, alias := range service.Attributes.Aliases {
if importedHosts.Matches(alias.Hostname) {
matched = append(matched, alias)
}
}
if len(matched) == len(service.Attributes.Aliases) {
return service
}
service = service.DeepCopy()
service.Attributes.Aliases = matched
return service
}
// serviceMatchingListenerPort constructs service with listener port.
func serviceMatchingListenerPort(service *Service, ilw *IstioEgressListenerWrapper) *Service {
for _, port := range service.Ports {
if port.Port == int(ilw.IstioListener.Port.GetNumber()) {
sc := service.DeepCopy()
sc.Ports = []*Port{port}
return sc
}
}
return nil
}
func serviceMatchingVirtualServicePorts(service *Service, vsDestPorts sets.Set[int]) *Service {
// A value of 0 in vsDestPorts is used as a sentinel to indicate a dependency
// on every port of the service.
if len(vsDestPorts) == 0 || vsDestPorts.Contains(0) {
return service
}
foundPorts := make([]*Port, 0)
for _, port := range service.Ports {
if vsDestPorts.Contains(port.Port) {
foundPorts = append(foundPorts, port)
}
}
if len(foundPorts) == len(service.Ports) {
return service
}
if len(foundPorts) > 0 {
sc := service.DeepCopy()
sc.Ports = foundPorts
return sc
}
// If the service has more than one port, and the Virtual Service only
// specifies destination ports not found in the service, we'll simply
// not add the service to the sidecar as an optimization, because
// traffic will not route properly anyway. This matches the above
// behavior in serviceMatchingListenerPort for ports specified on the
// sidecar egress listener.
log.Warnf("Failed to find any VirtualService destination ports %v exposed by Service %s", vsDestPorts, service.Hostname)
return nil
}
// computeWildcardHostVirtualServiceIndex computes the wildcardHostVirtualServiceIndex for a given
// (sorted) list of virtualServices. This is used to optimize the lookup of the most specific wildcard host.
//
// N.B the caller MUST presort virtualServices based on the desired precedence for duplicate hostnames.
// This function will persist that order and not overwrite any previous entries for a given hostname.
func computeWildcardHostVirtualServiceIndex(virtualServices []config.Config, services []*Service) map[host.Name]types.NamespacedName {
fqdnVirtualServiceHostIndex := make(map[host.Name]config.Config, len(virtualServices))
wildcardVirtualServiceHostIndex := make(map[host.Name]config.Config, len(virtualServices))
for _, vs := range virtualServices {
v := vs.Spec.(*networking.VirtualService)
for _, h := range v.Hosts {
// We may have duplicate (not just overlapping) hosts; assume the list of VS is sorted already
// and never overwrite existing entries
if host.Name(h).IsWildCarded() {
_, exists := wildcardVirtualServiceHostIndex[host.Name(h)]
if !exists {
wildcardVirtualServiceHostIndex[host.Name(h)] = vs
}
} else {
_, exists := fqdnVirtualServiceHostIndex[host.Name(h)]
if !exists {
fqdnVirtualServiceHostIndex[host.Name(h)] = vs
}
}
}
}
mostSpecificWildcardVsIndex := make(map[host.Name]types.NamespacedName)
comparator := MostSpecificHostMatch[config.Config]
if features.PersistOldestWinsHeuristicForVirtualServiceHostMatching {
comparator = OldestMatchingHost
}
for _, svc := range services {
_, ref, exists := comparator(svc.Hostname, fqdnVirtualServiceHostIndex, wildcardVirtualServiceHostIndex)
if !exists {
// This svc doesn't have a virtualService; skip
continue
}
mostSpecificWildcardVsIndex[svc.Hostname] = ref.NamespacedName()
}
return mostSpecificWildcardVsIndex
}
func needsPortMatch(l *networking.IstioEgressListener) bool {
// If a listener is defined with a port, we should match services with port except in the following case.
// - If Port's protocol is proxy protocol(HTTP_PROXY) in which case the egress listener is used as generic egress http proxy.
return l != nil && l.Port.GetNumber() != 0 &&
protocol.Parse(l.Port.Protocol) != protocol.HTTP_PROXY
}
type sidecarServiceIndex struct {
svc *Service
index int // index record the position of the svc in slice
}
func (sc *SidecarScope) appendSidecarServices(servicesAdded map[host.Name]sidecarServiceIndex, s *Service) {
if s == nil {
return
}
if foundSvc, found := servicesAdded[s.Hostname]; !found {
sc.services = append(sc.services, s)
servicesAdded[s.Hostname] = sidecarServiceIndex{s, len(sc.services) - 1}
sc.servicesByHostname[s.Hostname] = s
} else {
existing := foundSvc.svc
// We donot merge k8s service with any other services from other registries
if existing.Attributes.ServiceRegistry == provider.Kubernetes {
return
}
// In some scenarios, there may be multiple Services defined for the same hostname due to ServiceEntry allowing
// arbitrary hostnames. In these cases, we want to pick the first Service, which is the oldest. This ensures
// newly created Services cannot take ownership unexpectedly.
// However, the Service is from Kubernetes it should take precedence over ones not. This prevents someone from
// "domain squatting" on the hostname before a Kubernetes Service is created.
if s.Attributes.ServiceRegistry == provider.Kubernetes {
log.Debugf("Service %s/%s from registry %s ignored by %s/%s/%s", existing.Attributes.Namespace, existing.Hostname, existing.Attributes.ServiceRegistry,
s.Attributes.Namespace, s.Hostname, s.Attributes.ServiceRegistry)
// replace service in slice
sc.services[foundSvc.index] = s
// Update index as well, so that future reads will merge into the new service
foundSvc.svc = s
servicesAdded[foundSvc.svc.Hostname] = foundSvc
sc.servicesByHostname[s.Hostname] = s
return
}
// we merge ports for services both defined by ServiceEntry in same namespace
if existing.Attributes.Namespace == s.Attributes.Namespace {
// merge the ports to service when each listener generates partial service
// we only merge if the found service is in the same namespace as the one we're trying to add
copied := foundSvc.svc.DeepCopy()
for _, p := range s.Ports {
found := false
for _, osp := range copied.Ports {
if p.Port == osp.Port {
found = true
break
}
}
if !found {
copied.Ports = append(copied.Ports, p)
}
}
// replace service in slice
sc.services[foundSvc.index] = copied
// Update index as well, so that future reads will merge into the new service
foundSvc.svc = copied
servicesAdded[foundSvc.svc.Hostname] = foundSvc
sc.servicesByHostname[s.Hostname] = s
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package status
import (
"istio.io/api/meta/v1alpha1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/slices"
)
const (
StatusTrue = "True"
StatusFalse = "False"
)
func GetConditionFromSpec(cfg config.Config, condition string) *v1alpha1.IstioCondition {
c, ok := cfg.Status.(*v1alpha1.IstioStatus)
if !ok {
return nil
}
return GetCondition(c.Conditions, condition)
}
func GetBoolConditionFromSpec(cfg config.Config, condition string, defaultValue bool) bool {
c, ok := cfg.Status.(*v1alpha1.IstioStatus)
if !ok {
return defaultValue
}
return GetBoolCondition(c.Conditions, condition, defaultValue)
}
func GetBoolCondition(conditions []*v1alpha1.IstioCondition, condition string, defaultValue bool) bool {
got := GetCondition(conditions, condition)
if got == nil {
return defaultValue
}
if got.Status == StatusTrue {
return true
}
if got.Status == StatusFalse {
return false
}
return defaultValue
}
func GetCondition(conditions []*v1alpha1.IstioCondition, condition string) *v1alpha1.IstioCondition {
for _, cond := range conditions {
if cond.Type == condition {
return cond
}
}
return nil
}
func UpdateConfigCondition(cfg config.Config, condition *v1alpha1.IstioCondition) config.Config {
cfg = cfg.DeepCopy()
var status *v1alpha1.IstioStatus
if cfg.Status == nil {
cfg.Status = &v1alpha1.IstioStatus{}
}
status = cfg.Status.(*v1alpha1.IstioStatus)
status.Conditions = updateCondition(status.Conditions, condition)
return cfg
}
func updateCondition(conditions []*v1alpha1.IstioCondition, condition *v1alpha1.IstioCondition) []*v1alpha1.IstioCondition {
for i, cond := range conditions {
if cond.Type == condition.Type {
conditions[i] = condition
return conditions
}
}
return append(conditions, condition)
}
func DeleteConfigCondition(cfg config.Config, condition string) config.Config {
c, ok := cfg.Status.(*v1alpha1.IstioStatus)
if !ok {
return cfg
}
if GetCondition(c.Conditions, condition) == nil {
return cfg
}
cfg = cfg.DeepCopy()
status := cfg.Status.(*v1alpha1.IstioStatus)
status.Conditions = deleteCondition(status.Conditions, condition)
return cfg
}
func deleteCondition(conditions []*v1alpha1.IstioCondition, condition string) []*v1alpha1.IstioCondition {
conditions = slices.FilterInPlace(conditions, func(c *v1alpha1.IstioCondition) bool {
return c.Type != condition
})
return conditions
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"sort"
"strings"
"sync"
"time"
udpa "github.com/cncf/xds/go/udpa/type/v1"
accesslog "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
httpwasm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/wasm/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
wasmfilter "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/wasm/v3"
wasm "github.com/envoyproxy/go-control-plane/envoy/extensions/wasm/v3"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/structpb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
"k8s.io/apimachinery/pkg/types"
sd "istio.io/api/envoy/extensions/stackdriver/config/v1alpha1"
"istio.io/api/envoy/extensions/stats"
meshconfig "istio.io/api/mesh/v1alpha1"
tpb "istio.io/api/telemetry/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/xds"
"istio.io/istio/pkg/ptr"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
)
// Telemetry holds configuration for Telemetry API resources.
type Telemetry struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
Spec *tpb.Telemetry `json:"spec"`
}
// Telemetries organizes Telemetry configuration by namespace.
type Telemetries struct {
// Maps from namespace to the Telemetry configs.
NamespaceToTelemetries map[string][]Telemetry `json:"namespace_to_telemetries"`
// The name of the root namespace.
RootNamespace string `json:"root_namespace"`
// Computed meshConfig
meshConfig *meshconfig.MeshConfig
// computedMetricsFilters contains the set of cached HCM/listener filters for the metrics portion.
// These filters are extremely costly, as we insert them into every listener on every proxy, and to
// generate them we need to merge many telemetry specs and perform 2 Any marshals.
// To improve performance, we store a cache based on the Telemetries that impacted the filter, as well as
// its class and protocol. This is protected by mu.
// Currently, this only applies to metrics, but a similar concept can likely be applied to logging and
// tracing for performance.
// The computedMetricsFilters lifetime is bound to the Telemetries object. During a push context
// creation, we will preserve the Telemetries (and thus the cache) if not Telemetries are modified.
// As result, this cache will live until any Telemetry is modified.
computedMetricsFilters map[metricsKey]any
computedLoggingConfig map[loggingKey][]LoggingConfig
mu sync.Mutex
}
// telemetryKey defines a key into the computedMetricsFilters cache.
type telemetryKey struct {
// Root stores the Telemetry in the root namespace, if any
Root types.NamespacedName
// Namespace stores the Telemetry in the root namespace, if any
Namespace types.NamespacedName
// Workload stores the Telemetry in the root namespace, if any
Workload types.NamespacedName
}
// loggingKey defines a key into the computedLoggingConfig cache.
type loggingKey struct {
telemetryKey
Class networking.ListenerClass
Protocol networking.ListenerProtocol
}
// metricsKey defines a key into the computedMetricsFilters cache.
type metricsKey struct {
telemetryKey
Class networking.ListenerClass
Protocol networking.ListenerProtocol
ProxyType NodeType
}
// getTelemetries returns the Telemetry configurations for the given environment.
func getTelemetries(env *Environment) *Telemetries {
telemetries := &Telemetries{
NamespaceToTelemetries: map[string][]Telemetry{},
RootNamespace: env.Mesh().GetRootNamespace(),
meshConfig: env.Mesh(),
computedMetricsFilters: map[metricsKey]any{},
computedLoggingConfig: map[loggingKey][]LoggingConfig{},
}
fromEnv := env.List(gvk.Telemetry, NamespaceAll)
sortConfigByCreationTime(fromEnv)
for _, config := range fromEnv {
telemetry := Telemetry{
Name: config.Name,
Namespace: config.Namespace,
Spec: config.Spec.(*tpb.Telemetry),
}
telemetries.NamespaceToTelemetries[config.Namespace] = append(telemetries.NamespaceToTelemetries[config.Namespace], telemetry)
}
return telemetries
}
type metricsConfig struct {
ClientMetrics metricConfig
ServerMetrics metricConfig
ReportingInterval *durationpb.Duration
RotationInterval *durationpb.Duration
GracefulDeletionInterval *durationpb.Duration
}
type metricConfig struct {
// if true, do not add filter to chain
Disabled bool
Overrides []metricsOverride
}
type telemetryFilterConfig struct {
metricsConfig
Provider *meshconfig.MeshConfig_ExtensionProvider
Metrics bool
AccessLogging bool
LogsFilter *tpb.AccessLogging_Filter
NodeType NodeType
}
func (t telemetryFilterConfig) MetricsForClass(c networking.ListenerClass) metricConfig {
switch c {
case networking.ListenerClassGateway:
return t.ClientMetrics
case networking.ListenerClassSidecarInbound:
return t.ServerMetrics
case networking.ListenerClassSidecarOutbound:
return t.ClientMetrics
default:
return t.ClientMetrics
}
}
type metricsOverride struct {
Name string
Disabled bool
Tags []tagOverride
}
type tagOverride struct {
Name string
Remove bool
Value string
}
// computedTelemetries contains the various Telemetry configurations in scope for a given proxy.
// This can include the root namespace, namespace, and workload Telemetries combined
type computedTelemetries struct {
telemetryKey
Metrics []*tpb.Metrics
Logging []*computedAccessLogging
Tracing []*tpb.Tracing
}
// computedAccessLogging contains the various AccessLogging configurations in scope for a given proxy,
// include combined configurations for one of the following levels: 1. the root namespace level
// 2. namespace level 3. workload level combined.
type computedAccessLogging struct {
telemetryKey
Logging []*tpb.AccessLogging
}
type TracingConfig struct {
ServerSpec TracingSpec
ClientSpec TracingSpec
}
type TracingSpec struct {
Provider *meshconfig.MeshConfig_ExtensionProvider
Disabled bool
RandomSamplingPercentage *float64
CustomTags map[string]*tpb.Tracing_CustomTag
UseRequestIDForTraceSampling bool
}
type LoggingConfig struct {
Disabled bool
AccessLog *accesslog.AccessLog
Provider *meshconfig.MeshConfig_ExtensionProvider
Filter *tpb.AccessLogging_Filter
}
type loggingSpec struct {
Disabled bool
Filter *tpb.AccessLogging_Filter
}
func workloadMode(class networking.ListenerClass) tpb.WorkloadMode {
switch class {
case networking.ListenerClassGateway:
return tpb.WorkloadMode_CLIENT
case networking.ListenerClassSidecarInbound:
return tpb.WorkloadMode_SERVER
case networking.ListenerClassSidecarOutbound:
return tpb.WorkloadMode_CLIENT
case networking.ListenerClassUndefined:
// this should not happen, just in case
return tpb.WorkloadMode_CLIENT
}
return tpb.WorkloadMode_CLIENT
}
// AccessLogging returns the logging configuration for a given proxy and listener class.
// If nil or empty configuration is returned, access logs are not configured via Telemetry and should use fallback mechanisms.
// If access logging is explicitly disabled, a configuration with disabled set to true is returned.
func (t *Telemetries) AccessLogging(push *PushContext, proxy *Proxy, class networking.ListenerClass) []LoggingConfig {
ct := t.applicableTelemetries(proxy)
if len(ct.Logging) == 0 && len(t.meshConfig.GetDefaultProviders().GetAccessLogging()) == 0 {
// No Telemetry API configured, fall back to legacy mesh config setting
return nil
}
key := loggingKey{
telemetryKey: ct.telemetryKey,
Class: class,
}
t.mu.Lock()
defer t.mu.Unlock()
precomputed, ok := t.computedLoggingConfig[key]
if ok {
return precomputed
}
providers := mergeLogs(ct.Logging, t.meshConfig, workloadMode(class))
cfgs := make([]LoggingConfig, 0, len(providers))
for p, v := range providers {
fp := t.fetchProvider(p)
if fp == nil {
log.Debugf("fail to fetch provider %s", p)
continue
}
cfg := LoggingConfig{
Provider: fp,
Filter: v.Filter,
Disabled: v.Disabled,
}
al := telemetryAccessLog(push, fp)
if al == nil {
// stackdriver will be handled in HTTPFilters/TCPFilters
continue
}
cfg.AccessLog = al
cfgs = append(cfgs, cfg)
}
t.computedLoggingConfig[key] = cfgs
return cfgs
}
// Tracing returns the logging tracing for a given proxy. If nil is returned, tracing
// are not configured via Telemetry and should use fallback mechanisms. If a non-nil but disabled is set,
// then tracing is explicitly disabled
func (t *Telemetries) Tracing(proxy *Proxy) *TracingConfig {
ct := t.applicableTelemetries(proxy)
providerNames := t.meshConfig.GetDefaultProviders().GetTracing()
hasDefaultProvider := len(providerNames) > 0
if len(ct.Tracing) == 0 && !hasDefaultProvider {
return nil
}
clientSpec := TracingSpec{UseRequestIDForTraceSampling: true}
serverSpec := TracingSpec{UseRequestIDForTraceSampling: true}
if hasDefaultProvider {
// todo: what do we want to do with more than one default provider?
// for now, use only the first provider.
fetched := t.fetchProvider(providerNames[0])
clientSpec.Provider = fetched
serverSpec.Provider = fetched
}
for _, m := range ct.Tracing {
names := getProviderNames(m.Providers)
specs := []*TracingSpec{&clientSpec, &serverSpec}
if m.Match != nil {
switch m.Match.Mode {
case tpb.WorkloadMode_CLIENT:
specs = []*TracingSpec{&clientSpec}
case tpb.WorkloadMode_SERVER:
specs = []*TracingSpec{&serverSpec}
}
}
if len(names) > 0 {
// NOTE: we only support a single provider per mode
// so, choosing the first provider returned in the list
// is the "safest"
fetched := t.fetchProvider(names[0])
for _, spec := range specs {
spec.Provider = fetched
}
}
// Now merge in any overrides
if m.DisableSpanReporting != nil {
for _, spec := range specs {
spec.Disabled = m.DisableSpanReporting.GetValue()
}
}
// TODO: metrics overrides do a deep merge, but here we do a shallow merge.
// We should consider if we want to reconcile the two.
if m.CustomTags != nil {
for _, spec := range specs {
spec.CustomTags = m.CustomTags
}
}
if m.RandomSamplingPercentage != nil {
for _, spec := range specs {
spec.RandomSamplingPercentage = ptr.Of(m.RandomSamplingPercentage.GetValue())
}
}
if m.UseRequestIdForTraceSampling != nil {
for _, spec := range specs {
spec.UseRequestIDForTraceSampling = m.UseRequestIdForTraceSampling.Value
}
}
}
// If no provider is configured (and retrieved) for the tracing specs,
// then we will disable the configuration.
if clientSpec.Provider == nil {
clientSpec.Disabled = true
}
if serverSpec.Provider == nil {
serverSpec.Disabled = true
}
cfg := TracingConfig{
ClientSpec: clientSpec,
ServerSpec: serverSpec,
}
return &cfg
}
// HTTPFilters computes the HttpFilter for a given proxy/class
func (t *Telemetries) HTTPFilters(proxy *Proxy, class networking.ListenerClass) []*hcm.HttpFilter {
if res := t.telemetryFilters(proxy, class, networking.ListenerProtocolHTTP); res != nil {
return res.([]*hcm.HttpFilter)
}
return nil
}
// TCPFilters computes the TCPFilters for a given proxy/class
func (t *Telemetries) TCPFilters(proxy *Proxy, class networking.ListenerClass) []*listener.Filter {
if res := t.telemetryFilters(proxy, class, networking.ListenerProtocolTCP); res != nil {
return res.([]*listener.Filter)
}
return nil
}
// applicableTelemetries fetches the relevant telemetry configurations for a given proxy
func (t *Telemetries) applicableTelemetries(proxy *Proxy) computedTelemetries {
if t == nil {
return computedTelemetries{}
}
namespace := proxy.ConfigNamespace
// Order here matters. The latter elements will override the first elements
ms := []*tpb.Metrics{}
ls := []*computedAccessLogging{}
ts := []*tpb.Tracing{}
key := telemetryKey{}
if t.RootNamespace != "" {
telemetry := t.namespaceWideTelemetryConfig(t.RootNamespace)
if telemetry != (Telemetry{}) {
key.Root = types.NamespacedName{Name: telemetry.Name, Namespace: telemetry.Namespace}
ms = append(ms, telemetry.Spec.GetMetrics()...)
if len(telemetry.Spec.GetAccessLogging()) != 0 {
ls = append(ls, &computedAccessLogging{
telemetryKey: telemetryKey{
Root: key.Root,
},
Logging: telemetry.Spec.GetAccessLogging(),
})
}
ts = append(ts, telemetry.Spec.GetTracing()...)
}
}
if namespace != t.RootNamespace {
telemetry := t.namespaceWideTelemetryConfig(namespace)
if telemetry != (Telemetry{}) {
key.Namespace = types.NamespacedName{Name: telemetry.Name, Namespace: telemetry.Namespace}
ms = append(ms, telemetry.Spec.GetMetrics()...)
if len(telemetry.Spec.GetAccessLogging()) != 0 {
ls = append(ls, &computedAccessLogging{
telemetryKey: telemetryKey{
Namespace: key.Namespace,
},
Logging: telemetry.Spec.GetAccessLogging(),
})
}
ts = append(ts, telemetry.Spec.GetTracing()...)
}
}
ct := &computedTelemetries{
telemetryKey: key,
Metrics: ms,
Logging: ls,
Tracing: ts,
}
for _, telemetry := range t.NamespaceToTelemetries[namespace] {
spec := telemetry.Spec
if len(spec.GetSelector().GetMatchLabels()) == 0 {
continue
}
opts := WorkloadSelectionOpts{
RootNamespace: t.RootNamespace,
Namespace: telemetry.Namespace,
WorkloadLabels: proxy.Labels,
IsWaypoint: proxy.IsWaypointProxy(),
}
switch getPolicyMatcher(gvk.Telemetry, telemetry.Name, opts, spec) {
case policyMatchSelector:
selector := labels.Instance(spec.GetSelector().GetMatchLabels())
if selector.SubsetOf(proxy.Labels) {
ct = appendApplicableTelemetries(ct, telemetry, spec)
}
case policyMatchDirect:
ct = appendApplicableTelemetries(ct, telemetry, spec)
case policyMatchIgnore:
log.Debug("There isn't a match between the workload and the policy. Policy is ignored.")
}
}
return *ct
}
func appendApplicableTelemetries(ct *computedTelemetries, tel Telemetry, spec *tpb.Telemetry) *computedTelemetries {
ct.telemetryKey.Workload = types.NamespacedName{Name: tel.Name, Namespace: tel.Namespace}
ct.Metrics = append(ct.Metrics, spec.GetMetrics()...)
if len(tel.Spec.GetAccessLogging()) != 0 {
ct.Logging = append(ct.Logging, &computedAccessLogging{
telemetryKey: telemetryKey{
Workload: types.NamespacedName{Name: tel.Name, Namespace: tel.Namespace},
},
Logging: tel.Spec.GetAccessLogging(),
})
}
ct.Tracing = append(ct.Tracing, spec.GetTracing()...)
return ct
}
// telemetryFilters computes the filters for the given proxy/class and protocol. This computes the
// set of applicable Telemetries, merges them, then translates to the appropriate filters based on the
// extension providers in the mesh config. Where possible, the result is cached.
// Currently, this includes metrics and access logging, as some providers are implemented in filters.
func (t *Telemetries) telemetryFilters(proxy *Proxy, class networking.ListenerClass, protocol networking.ListenerProtocol) any {
if t == nil {
return nil
}
c := t.applicableTelemetries(proxy)
key := metricsKey{
telemetryKey: c.telemetryKey,
Class: class,
Protocol: protocol,
ProxyType: proxy.Type,
}
t.mu.Lock()
defer t.mu.Unlock()
precomputed, f := t.computedMetricsFilters[key]
if f {
return precomputed
}
// First, take all the metrics configs and transform them into a normalized form
tmm := mergeMetrics(c.Metrics, t.meshConfig)
log.Debugf("merged metrics, proxyID: %s metrics: %+v", proxy.ID, tmm)
// Additionally, fetch relevant access logging configurations
tml := mergeLogs(c.Logging, t.meshConfig, workloadMode(class))
// The above result is in a nested map to deduplicate responses. This loses ordering, so we convert to
// a list to retain stable naming
allKeys := sets.New[string]()
for k, v := range tml {
if v.Disabled {
continue
}
allKeys.Insert(k)
}
for k := range tmm {
allKeys.Insert(k)
}
rotationInterval := getInterval(features.MetricRotationInterval, defaultMetricRotationInterval)
gracefulDeletionInterval := getInterval(features.MetricGracefulDeletionInterval, defaultMetricGracefulDeletionInterval)
m := make([]telemetryFilterConfig, 0, allKeys.Len())
for _, k := range sets.SortedList(allKeys) {
p := t.fetchProvider(k)
if p == nil {
continue
}
loggingCfg, logging := tml[k]
mertricCfg, metrics := tmm[k]
mertricCfg.RotationInterval = rotationInterval
mertricCfg.GracefulDeletionInterval = gracefulDeletionInterval
cfg := telemetryFilterConfig{
Provider: p,
metricsConfig: mertricCfg,
AccessLogging: logging && !loggingCfg.Disabled,
Metrics: metrics,
LogsFilter: tml[p.Name].Filter,
NodeType: proxy.Type,
}
m = append(m, cfg)
}
var res any
// Finally, compute the actual filters based on the protoc
switch protocol {
case networking.ListenerProtocolHTTP:
res = buildHTTPTelemetryFilter(class, m)
default:
res = buildTCPTelemetryFilter(class, m)
}
// Update cache
t.computedMetricsFilters[key] = res
return res
}
// default value for metric rotation interval and graceful deletion interval,
// more details can be found in here: https://github.com/istio/proxy/blob/master/source/extensions/filters/http/istio_stats/config.proto#L116
var (
defaultMetricRotationInterval = 0 * time.Second
defaultMetricGracefulDeletionInterval = 5 * time.Minute
)
// getInterval return nil to reduce the size of the config, when equal to the default.
func getInterval(input, defaultValue time.Duration) *durationpb.Duration {
if input == defaultValue {
return nil
}
return durationpb.New(input)
}
// mergeLogs returns the set of providers for the given logging configuration.
// The provider names are mapped to any applicable access logging filter that has been applied in provider configuration.
func mergeLogs(logs []*computedAccessLogging, mesh *meshconfig.MeshConfig, mode tpb.WorkloadMode) map[string]loggingSpec {
providers := map[string]loggingSpec{}
if len(logs) == 0 {
for _, dp := range mesh.GetDefaultProviders().GetAccessLogging() {
// Insert the default provider.
providers[dp] = loggingSpec{}
}
return providers
}
providerNames := mesh.GetDefaultProviders().GetAccessLogging()
filters := map[string]loggingSpec{}
for _, m := range logs {
names := sets.New[string]()
for _, p := range m.Logging {
if !matchWorkloadMode(p.Match, mode) {
continue
}
subProviders := getProviderNames(p.Providers)
names.InsertAll(subProviders...)
for _, prov := range subProviders {
filters[prov] = loggingSpec{
Filter: p.Filter,
}
}
}
if names.Len() > 0 {
providerNames = names.UnsortedList()
}
}
inScopeProviders := sets.New(providerNames...)
parentProviders := mesh.GetDefaultProviders().GetAccessLogging()
for _, l := range logs {
for _, m := range l.Logging {
providerNames := getProviderNames(m.Providers)
if len(providerNames) == 0 {
providerNames = parentProviders
}
parentProviders = providerNames
for _, provider := range providerNames {
if !inScopeProviders.Contains(provider) {
// We don't care about this, remove it
// This occurs when a top level provider is later disabled by a lower level
continue
}
if !matchWorkloadMode(m.Match, mode) {
continue
}
// see UT: server - multi filters disabled
if m.GetDisabled().GetValue() {
providers[provider] = loggingSpec{Disabled: true}
continue
}
providers[provider] = filters[provider]
}
}
}
return providers
}
func matchWorkloadMode(selector *tpb.AccessLogging_LogSelector, mode tpb.WorkloadMode) bool {
if selector == nil {
return true
}
if selector.Mode == tpb.WorkloadMode_CLIENT_AND_SERVER {
return true
}
return selector.Mode == mode
}
func (t *Telemetries) namespaceWideTelemetryConfig(namespace string) Telemetry {
for _, tel := range t.NamespaceToTelemetries[namespace] {
if len(tel.Spec.GetSelector().GetMatchLabels()) == 0 {
return tel
}
}
return Telemetry{}
}
// fetchProvider finds the matching ExtensionProviders from the mesh config
func (t *Telemetries) fetchProvider(m string) *meshconfig.MeshConfig_ExtensionProvider {
for _, p := range t.meshConfig.ExtensionProviders {
if strings.EqualFold(m, p.Name) {
return p
}
}
return nil
}
func (t *Telemetries) Debug(proxy *Proxy) any {
at := t.applicableTelemetries(proxy)
return at
}
var allMetrics = func() []string {
r := make([]string, 0, len(tpb.MetricSelector_IstioMetric_value))
for k := range tpb.MetricSelector_IstioMetric_value {
if k != tpb.MetricSelector_IstioMetric_name[int32(tpb.MetricSelector_ALL_METRICS)] {
r = append(r, k)
}
}
sort.Strings(r)
return r
}()
// mergeMetrics merges many Metrics objects into a normalized configuration
func mergeMetrics(metrics []*tpb.Metrics, mesh *meshconfig.MeshConfig) map[string]metricsConfig {
type metricOverride struct {
Disabled *wrappers.BoolValue
TagOverrides map[string]*tpb.MetricsOverrides_TagOverride
}
// provider -> mode -> metric -> overrides
providers := map[string]map[tpb.WorkloadMode]map[string]metricOverride{}
if len(metrics) == 0 {
for _, dp := range mesh.GetDefaultProviders().GetMetrics() {
// Insert the default provider. It has no overrides; presence of the key is sufficient to
// get the filter created.
providers[dp] = map[tpb.WorkloadMode]map[string]metricOverride{}
}
}
providerNames := mesh.GetDefaultProviders().GetMetrics()
for _, m := range metrics {
names := getProviderNames(m.Providers)
// If providers is set, it overrides the parent. If not, inherent from the parent. It is not a deep merge.
if len(names) > 0 {
providerNames = names
}
}
// Record the names of all providers we should configure. Anything else we will ignore
inScopeProviders := sets.New(providerNames...)
parentProviders := mesh.GetDefaultProviders().GetMetrics()
disabledAllMetricsProviders := sets.New[string]()
reportingIntervals := map[string]*durationpb.Duration{}
for _, m := range metrics {
providerNames := getProviderNames(m.Providers)
// If providers is not set, use parent's
if len(providerNames) == 0 {
providerNames = parentProviders
}
reportInterval := m.GetReportingInterval()
parentProviders = providerNames
for _, provider := range providerNames {
if !inScopeProviders.Contains(provider) {
// We don't care about this, remove it
// This occurs when a top level provider is later disabled by a lower level
continue
}
if reportInterval != nil {
reportingIntervals[provider] = reportInterval
}
if _, f := providers[provider]; !f {
providers[provider] = map[tpb.WorkloadMode]map[string]metricOverride{
tpb.WorkloadMode_CLIENT: {},
tpb.WorkloadMode_SERVER: {},
}
}
mp := providers[provider]
// For each override, we normalize the configuration. The metrics list is an ordered list - latter
// elements have precedence. As a result, we will apply updates on top of previous entries.
for _, o := range m.Overrides {
// if we disable all metrics, we should drop the entire filter
if isAllMetrics(o.GetMatch()) && o.Disabled.GetValue() {
for _, mode := range getModes(o.GetMatch().GetMode()) {
key := metricProviderModeKey(provider, mode)
disabledAllMetricsProviders.Insert(key)
}
continue
}
metricsNames := getMatches(o.GetMatch())
// If client or server is set explicitly, only apply there. Otherwise, we will apply to both.
// Note: client and server keys may end up the same, which is fine
for _, mode := range getModes(o.GetMatch().GetMode()) {
// root namespace disables all, but then enables them by namespace scoped
key := metricProviderModeKey(provider, mode)
disabledAllMetricsProviders.Delete(key)
// Next, get all matches.
// This is a bit funky because the matches are oneof of ENUM and customer metric. We normalize
// these to strings, so we may end up with a list like [REQUEST_COUNT, my-customer-metric].
// TODO: we always flatten ALL_METRICS into each metric mode. For some stats providers (prometheus),
// we are able to apply overrides to all metrics directly rather than duplicating the config.
// We should tweak this to collapse to this mode where possible
for _, metricName := range metricsNames {
if _, f := mp[mode]; !f {
mp[mode] = map[string]metricOverride{}
}
override := mp[mode][metricName]
if o.Disabled != nil {
override.Disabled = o.Disabled
}
for k, v := range o.TagOverrides {
if override.TagOverrides == nil {
override.TagOverrides = map[string]*tpb.MetricsOverrides_TagOverride{}
}
override.TagOverrides[k] = v
}
mp[mode][metricName] = override
}
}
}
}
}
processed := map[string]metricsConfig{}
for provider, modeMap := range providers {
tmm := processed[provider]
tmm.ReportingInterval = reportingIntervals[provider]
for mode, metricMap := range modeMap {
key := metricProviderModeKey(provider, mode)
if disabledAllMetricsProviders.Contains(key) {
switch mode {
case tpb.WorkloadMode_CLIENT:
tmm.ClientMetrics.Disabled = true
case tpb.WorkloadMode_SERVER:
tmm.ServerMetrics.Disabled = true
}
continue
}
for metric, override := range metricMap {
tags := []tagOverride{}
for k, v := range override.TagOverrides {
o := tagOverride{Name: k}
switch v.Operation {
case tpb.MetricsOverrides_TagOverride_REMOVE:
o.Remove = true
o.Value = ""
case tpb.MetricsOverrides_TagOverride_UPSERT:
o.Value = v.GetValue()
o.Remove = false
}
tags = append(tags, o)
}
// Keep order deterministic
sort.Slice(tags, func(i, j int) bool {
return tags[i].Name < tags[j].Name
})
mo := metricsOverride{
Name: metric,
Disabled: override.Disabled.GetValue(),
Tags: tags,
}
switch mode {
case tpb.WorkloadMode_CLIENT:
tmm.ClientMetrics.Overrides = append(tmm.ClientMetrics.Overrides, mo)
default:
tmm.ServerMetrics.Overrides = append(tmm.ServerMetrics.Overrides, mo)
}
}
}
// Keep order deterministic
sort.Slice(tmm.ServerMetrics.Overrides, func(i, j int) bool {
return tmm.ServerMetrics.Overrides[i].Name < tmm.ServerMetrics.Overrides[j].Name
})
sort.Slice(tmm.ClientMetrics.Overrides, func(i, j int) bool {
return tmm.ClientMetrics.Overrides[i].Name < tmm.ClientMetrics.Overrides[j].Name
})
processed[provider] = tmm
}
return processed
}
func metricProviderModeKey(provider string, mode tpb.WorkloadMode) string {
return fmt.Sprintf("%s/%s", provider, mode)
}
func getProviderNames(providers []*tpb.ProviderRef) []string {
res := make([]string, 0, len(providers))
for _, p := range providers {
res = append(res, p.GetName())
}
return res
}
func getModes(mode tpb.WorkloadMode) []tpb.WorkloadMode {
switch mode {
case tpb.WorkloadMode_CLIENT, tpb.WorkloadMode_SERVER:
return []tpb.WorkloadMode{mode}
default:
return []tpb.WorkloadMode{tpb.WorkloadMode_CLIENT, tpb.WorkloadMode_SERVER}
}
}
func isAllMetrics(match *tpb.MetricSelector) bool {
switch m := match.GetMetricMatch().(type) {
case *tpb.MetricSelector_CustomMetric:
return false
case *tpb.MetricSelector_Metric:
return m.Metric == tpb.MetricSelector_ALL_METRICS
default:
return true
}
}
func getMatches(match *tpb.MetricSelector) []string {
switch m := match.GetMetricMatch().(type) {
case *tpb.MetricSelector_CustomMetric:
return []string{m.CustomMetric}
case *tpb.MetricSelector_Metric:
if m.Metric == tpb.MetricSelector_ALL_METRICS {
return allMetrics
}
return []string{m.Metric.String()}
default:
return allMetrics
}
}
var waypointStatsConfig = protoconv.MessageToAny(&udpa.TypedStruct{
TypeUrl: "type.googleapis.com/stats.PluginConfig",
Value: &structpb.Struct{
Fields: map[string]*structpb.Value{
"reporter": {
Kind: &structpb.Value_StringValue{
StringValue: "SERVER_GATEWAY",
},
},
},
},
})
// telemetryFilterHandled contains the number of providers we handle below.
// This is to ensure this stays in sync as new handlers are added
// STOP. DO NOT UPDATE THIS WITHOUT UPDATING buildHTTPTelemetryFilter and buildTCPTelemetryFilter.
const telemetryFilterHandled = 14
func buildHTTPTelemetryFilter(class networking.ListenerClass, metricsCfg []telemetryFilterConfig) []*hcm.HttpFilter {
res := make([]*hcm.HttpFilter, 0, len(metricsCfg))
for _, cfg := range metricsCfg {
switch cfg.Provider.GetProvider().(type) {
case *meshconfig.MeshConfig_ExtensionProvider_Prometheus:
if cfg.NodeType == Waypoint {
f := &hcm.HttpFilter{
Name: xds.StatsFilterName,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: waypointStatsConfig},
}
res = append(res, f)
} else {
if statsCfg := generateStatsConfig(class, cfg); statsCfg != nil {
f := &hcm.HttpFilter{
Name: xds.StatsFilterName,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: statsCfg},
}
res = append(res, f)
}
}
case *meshconfig.MeshConfig_ExtensionProvider_Stackdriver:
sdCfg := generateSDConfig(class, cfg)
vmConfig := ConstructVMConfig("envoy.wasm.null.stackdriver")
vmConfig.VmConfig.VmId = stackdriverVMID(class)
wasmConfig := &httpwasm.Wasm{
Config: &wasm.PluginConfig{
RootId: vmConfig.VmConfig.VmId,
Vm: vmConfig,
Configuration: sdCfg,
},
}
f := &hcm.HttpFilter{
Name: xds.StackdriverFilterName,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: protoconv.MessageToAny(wasmConfig)},
}
res = append(res, f)
default:
// Only prometheus and SD supported currently
continue
}
}
return res
}
func buildTCPTelemetryFilter(class networking.ListenerClass, telemetryConfigs []telemetryFilterConfig) []*listener.Filter {
res := []*listener.Filter{}
for _, telemetryCfg := range telemetryConfigs {
switch telemetryCfg.Provider.GetProvider().(type) {
case *meshconfig.MeshConfig_ExtensionProvider_Prometheus:
if telemetryCfg.NodeType == Waypoint {
f := &listener.Filter{
Name: xds.StatsFilterName,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: waypointStatsConfig},
}
res = append(res, f)
} else {
if cfg := generateStatsConfig(class, telemetryCfg); cfg != nil {
f := &listener.Filter{
Name: xds.StatsFilterName,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: cfg},
}
res = append(res, f)
}
}
case *meshconfig.MeshConfig_ExtensionProvider_Stackdriver:
cfg := generateSDConfig(class, telemetryCfg)
vmConfig := ConstructVMConfig("envoy.wasm.null.stackdriver")
vmConfig.VmConfig.VmId = stackdriverVMID(class)
wasmConfig := &wasmfilter.Wasm{
Config: &wasm.PluginConfig{
RootId: vmConfig.VmConfig.VmId,
Vm: vmConfig,
Configuration: cfg,
},
}
f := &listener.Filter{
Name: xds.StackdriverFilterName,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(wasmConfig)},
}
res = append(res, f)
default:
// Only prometheus and SD supported currently
continue
}
}
return res
}
func stackdriverVMID(class networking.ListenerClass) string {
switch class {
case networking.ListenerClassSidecarInbound:
return "stackdriver_inbound"
default:
return "stackdriver_outbound"
}
}
var metricToSDServerMetrics = map[string]string{
"REQUEST_COUNT": "server/request_count",
"REQUEST_DURATION": "server/response_latencies",
"REQUEST_SIZE": "server/request_bytes",
"RESPONSE_SIZE": "server/response_bytes",
"TCP_OPENED_CONNECTIONS": "server/connection_open_count",
"TCP_CLOSED_CONNECTIONS": "server/connection_close_count",
"TCP_SENT_BYTES": "server/sent_bytes_count",
"TCP_RECEIVED_BYTES": "server/received_bytes_count",
"GRPC_REQUEST_MESSAGES": "",
"GRPC_RESPONSE_MESSAGES": "",
}
var metricToSDClientMetrics = map[string]string{
"REQUEST_COUNT": "client/request_count",
"REQUEST_DURATION": "client/response_latencies",
"REQUEST_SIZE": "client/request_bytes",
"RESPONSE_SIZE": "client/response_bytes",
"TCP_OPENED_CONNECTIONS": "client/connection_open_count",
"TCP_CLOSED_CONNECTIONS": "client/connection_close_count",
"TCP_SENT_BYTES": "client/sent_bytes_count",
"TCP_RECEIVED_BYTES": "client/received_bytes_count",
"GRPC_REQUEST_MESSAGES": "",
"GRPC_RESPONSE_MESSAGES": "",
}
// used for CEL expressions in stackdriver serialization
var jsonUnescaper = strings.NewReplacer(`\u003e`, `>`, `\u003c`, `<`, `\u0026`, `&`)
func generateSDConfig(class networking.ListenerClass, telemetryConfig telemetryFilterConfig) *anypb.Any {
cfg := sd.PluginConfig{
DisableHostHeaderFallback: disableHostHeaderFallback(class),
}
metricNameMap := metricToSDClientMetrics
if class == networking.ListenerClassSidecarInbound {
metricNameMap = metricToSDServerMetrics
}
metricCfg := telemetryConfig.MetricsForClass(class)
if !metricCfg.Disabled {
for _, override := range metricCfg.Overrides {
metricName, f := metricNameMap[override.Name]
if !f {
// Not a predefined metric, must be a custom one
metricName = override.Name
}
if metricName == "" {
continue
}
if cfg.MetricsOverrides == nil {
cfg.MetricsOverrides = map[string]*sd.MetricsOverride{}
}
if _, f := cfg.MetricsOverrides[metricName]; !f {
cfg.MetricsOverrides[metricName] = &sd.MetricsOverride{}
}
cfg.MetricsOverrides[metricName].Drop = override.Disabled
for _, t := range override.Tags {
if t.Remove {
// Remove is not supported by SD
continue
}
if cfg.MetricsOverrides[metricName].TagOverrides == nil {
cfg.MetricsOverrides[metricName].TagOverrides = map[string]string{}
}
cfg.MetricsOverrides[metricName].TagOverrides[t.Name] = t.Value
}
}
}
if telemetryConfig.AccessLogging {
if telemetryConfig.LogsFilter != nil {
cfg.AccessLoggingFilterExpression = telemetryConfig.LogsFilter.Expression
} else {
if class == networking.ListenerClassSidecarInbound {
cfg.AccessLogging = sd.PluginConfig_FULL
} else {
// this can be achieved via CEL: `response.code >= 400 || response.code == 0`
cfg.AccessLogging = sd.PluginConfig_ERRORS_ONLY
}
}
} else {
// The field is deprecated, but until it is removed we need to set it.
cfg.DisableServerAccessLogging = true // nolint: staticcheck
}
cfg.MetricExpiryDuration = durationpb.New(1 * time.Hour)
cfg.EnableAuditLog = features.StackdriverAuditLog
// In WASM we are not actually processing protobuf at all, so we need to encode this to JSON
cfgJSON, _ := protomarshal.MarshalProtoNames(&cfg)
// MarshalProtoNames() forces HTML-escaped JSON encoding.
// this can be problematic for CEL expressions, particularly those using
// '>', '<', and '&'s. It is easier to use replaceAll operations than it is
// to mimic MarshalProtoNames() with configured JSON Encoder.
pb := &wrappers.StringValue{Value: jsonUnescaper.Replace(string(cfgJSON))}
return protoconv.MessageToAny(pb)
}
var metricToPrometheusMetric = map[string]string{
"REQUEST_COUNT": "requests_total",
"REQUEST_DURATION": "request_duration_milliseconds",
"REQUEST_SIZE": "request_bytes",
"RESPONSE_SIZE": "response_bytes",
"TCP_OPENED_CONNECTIONS": "tcp_connections_opened_total",
"TCP_CLOSED_CONNECTIONS": "tcp_connections_closed_total",
"TCP_SENT_BYTES": "tcp_sent_bytes_total",
"TCP_RECEIVED_BYTES": "tcp_received_bytes_total",
"GRPC_REQUEST_MESSAGES": "request_messages_total",
"GRPC_RESPONSE_MESSAGES": "response_messages_total",
}
func generateStatsConfig(class networking.ListenerClass, filterConfig telemetryFilterConfig) *anypb.Any {
if !filterConfig.Metrics {
// No metric for prometheus
return nil
}
listenerCfg := filterConfig.MetricsForClass(class)
if listenerCfg.Disabled {
// no metrics for this listener
return nil
}
cfg := stats.PluginConfig{
DisableHostHeaderFallback: disableHostHeaderFallback(class),
TcpReportingDuration: filterConfig.ReportingInterval,
RotationInterval: filterConfig.RotationInterval,
GracefulDeletionInterval: filterConfig.GracefulDeletionInterval,
}
for _, override := range listenerCfg.Overrides {
metricName, f := metricToPrometheusMetric[override.Name]
if !f {
// Not a predefined metric, must be a custom one
metricName = override.Name
}
mc := &stats.MetricConfig{
Dimensions: map[string]string{},
Name: metricName,
Drop: override.Disabled,
}
for _, t := range override.Tags {
if t.Remove {
mc.TagsToRemove = append(mc.TagsToRemove, t.Name)
} else {
mc.Dimensions[t.Name] = t.Value
}
}
cfg.Metrics = append(cfg.Metrics, mc)
}
return protoconv.MessageToAny(&cfg)
}
func disableHostHeaderFallback(class networking.ListenerClass) bool {
return class == networking.ListenerClassSidecarInbound || class == networking.ListenerClassGateway
}
// Equal compares two computedTelemetries for equality. This was created to help with testing. Because of the nature of the structs being compared,
// it is safer to use cmp.Equal as opposed to reflect.DeepEqual. Also, because of the way the structs are generated, it is not possible to use
// cmpopts.IgnoreUnexported without risking flakiness if those third party types that are relied on change. Next best thing is to use a custom
// comparer as defined below. When cmp.Equal is called on this type, this will be leveraged by cmp.Equal to do the comparison see
// https://godoc.org/github.com/google/go-cmp/cmp#Equal for more info.
func (ct *computedTelemetries) Equal(other *computedTelemetries) bool {
if ct == nil && other == nil {
return true
}
if ct != nil && other == nil || ct == nil && other != nil {
return false
}
if len(ct.Metrics) != len(other.Metrics) || len(ct.Logging) != len(other.Logging) || len(ct.Tracing) != len(other.Tracing) {
return false
}
// Sort each slice so that we can compare them in order. Comparison is on the fields that are used in the test cases.
sort.SliceStable(ct.Metrics, func(i, j int) bool {
return ct.Metrics[i].Providers[0].Name < ct.Metrics[j].Providers[0].Name
})
sort.SliceStable(other.Metrics, func(i, j int) bool {
return other.Metrics[i].Providers[0].Name < other.Metrics[j].Providers[0].Name
})
for i := range ct.Metrics {
if ct.Metrics[i].ReportingInterval != nil && other.Metrics[i].ReportingInterval != nil {
if ct.Metrics[i].ReportingInterval.AsDuration() != other.Metrics[i].ReportingInterval.AsDuration() {
return false
}
}
if ct.Metrics[i].Providers != nil && other.Metrics[i].Providers != nil {
if ct.Metrics[i].Providers[0].Name != other.Metrics[i].Providers[0].Name {
return false
}
}
}
sort.SliceStable(ct.Logging, func(i, j int) bool {
return ct.Logging[i].telemetryKey.Root.Name < ct.Logging[j].telemetryKey.Root.Name
})
sort.SliceStable(other.Logging, func(i, j int) bool {
return other.Logging[i].telemetryKey.Root.Name < other.Logging[j].telemetryKey.Root.Name
})
for i := range ct.Logging {
if ct.Logging[i].telemetryKey != other.Logging[i].telemetryKey {
return false
}
if ct.Logging[i].Logging != nil && other.Logging[i].Logging != nil {
if ct.Logging[i].Logging[0].Providers[0].Name != other.Logging[i].Logging[0].Providers[0].Name {
return false
}
}
}
sort.SliceStable(ct.Tracing, func(i, j int) bool {
return ct.Tracing[i].Providers[0].Name < ct.Tracing[j].Providers[0].Name
})
sort.SliceStable(other.Tracing, func(i, j int) bool {
return other.Tracing[i].Providers[0].Name < other.Tracing[j].Providers[0].Name
})
for i := range ct.Tracing {
if ct.Tracing[i].Match != nil && other.Tracing[i].Match != nil {
if ct.Tracing[i].Match.Mode != other.Tracing[i].Match.Mode {
return false
}
}
if ct.Tracing[i].Providers != nil && other.Tracing[i].Providers != nil {
if ct.Tracing[i].Providers[0].Name != other.Tracing[i].Providers[0].Name {
return false
}
}
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strings"
accesslog "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
fileaccesslog "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/file/v3"
grpcaccesslog "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/grpc/v3"
otelaccesslog "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/open_telemetry/v3"
celformatter "github.com/envoyproxy/go-control-plane/envoy/extensions/formatter/cel/v3"
metadataformatter "github.com/envoyproxy/go-control-plane/envoy/extensions/formatter/metadata/v3"
reqwithoutquery "github.com/envoyproxy/go-control-plane/envoy/extensions/formatter/req_without_query/v3"
otlpcommon "go.opentelemetry.io/proto/otlp/common/v1"
"google.golang.org/protobuf/types/known/structpb"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/wellknown"
)
const (
// EnvoyTextLogFormat format for envoy text based access logs for Istio 1.9 onwards.
// This includes the additional new operator RESPONSE_CODE_DETAILS and CONNECTION_TERMINATION_DETAILS that tells
// the reason why Envoy rejects a request.
EnvoyTextLogFormat = "[%START_TIME%] \"%REQ(:METHOD)% %REQ(X-ENVOY-ORIGINAL-PATH?:PATH)% " +
"%PROTOCOL%\" %RESPONSE_CODE% %RESPONSE_FLAGS% " +
"%RESPONSE_CODE_DETAILS% %CONNECTION_TERMINATION_DETAILS% " +
"\"%UPSTREAM_TRANSPORT_FAILURE_REASON%\" %BYTES_RECEIVED% %BYTES_SENT% " +
"%DURATION% %RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)% \"%REQ(X-FORWARDED-FOR)%\" " +
"\"%REQ(USER-AGENT)%\" \"%REQ(X-REQUEST-ID)%\" \"%REQ(:AUTHORITY)%\" \"%UPSTREAM_HOST%\" " +
"%UPSTREAM_CLUSTER% %UPSTREAM_LOCAL_ADDRESS% %DOWNSTREAM_LOCAL_ADDRESS% " +
"%DOWNSTREAM_REMOTE_ADDRESS% %REQUESTED_SERVER_NAME% %ROUTE_NAME%\n"
HTTPEnvoyAccessLogFriendlyName = "http_envoy_accesslog"
TCPEnvoyAccessLogFriendlyName = "tcp_envoy_accesslog"
OtelEnvoyAccessLogFriendlyName = "otel_envoy_accesslog"
TCPEnvoyALSName = "envoy.tcp_grpc_access_log"
OtelEnvoyALSName = "envoy.access_loggers.open_telemetry"
reqWithoutQueryCommandOperator = "%REQ_WITHOUT_QUERY"
metadataCommandOperator = "%METADATA"
celCommandOperator = "%CEL"
DevStdout = "/dev/stdout"
builtinEnvoyAccessLogProvider = "envoy"
)
var (
// this is used for testing. it should not be changed in regular code.
clusterLookupFn = LookupCluster
// EnvoyJSONLogFormatIstio map of values for envoy json based access logs for Istio 1.9 onwards.
// This includes the additional log operator RESPONSE_CODE_DETAILS and CONNECTION_TERMINATION_DETAILS that tells
// the reason why Envoy rejects a request.
EnvoyJSONLogFormatIstio = &structpb.Struct{
Fields: map[string]*structpb.Value{
"start_time": {Kind: &structpb.Value_StringValue{StringValue: "%START_TIME%"}},
"route_name": {Kind: &structpb.Value_StringValue{StringValue: "%ROUTE_NAME%"}},
"method": {Kind: &structpb.Value_StringValue{StringValue: "%REQ(:METHOD)%"}},
"path": {Kind: &structpb.Value_StringValue{StringValue: "%REQ(X-ENVOY-ORIGINAL-PATH?:PATH)%"}},
"protocol": {Kind: &structpb.Value_StringValue{StringValue: "%PROTOCOL%"}},
"response_code": {Kind: &structpb.Value_StringValue{StringValue: "%RESPONSE_CODE%"}},
"response_flags": {Kind: &structpb.Value_StringValue{StringValue: "%RESPONSE_FLAGS%"}},
"response_code_details": {Kind: &structpb.Value_StringValue{StringValue: "%RESPONSE_CODE_DETAILS%"}},
"connection_termination_details": {Kind: &structpb.Value_StringValue{StringValue: "%CONNECTION_TERMINATION_DETAILS%"}},
"bytes_received": {Kind: &structpb.Value_StringValue{StringValue: "%BYTES_RECEIVED%"}},
"bytes_sent": {Kind: &structpb.Value_StringValue{StringValue: "%BYTES_SENT%"}},
"duration": {Kind: &structpb.Value_StringValue{StringValue: "%DURATION%"}},
"upstream_service_time": {Kind: &structpb.Value_StringValue{StringValue: "%RESP(X-ENVOY-UPSTREAM-SERVICE-TIME)%"}},
"x_forwarded_for": {Kind: &structpb.Value_StringValue{StringValue: "%REQ(X-FORWARDED-FOR)%"}},
"user_agent": {Kind: &structpb.Value_StringValue{StringValue: "%REQ(USER-AGENT)%"}},
"request_id": {Kind: &structpb.Value_StringValue{StringValue: "%REQ(X-REQUEST-ID)%"}},
"authority": {Kind: &structpb.Value_StringValue{StringValue: "%REQ(:AUTHORITY)%"}},
"upstream_host": {Kind: &structpb.Value_StringValue{StringValue: "%UPSTREAM_HOST%"}},
"upstream_cluster": {Kind: &structpb.Value_StringValue{StringValue: "%UPSTREAM_CLUSTER%"}},
"upstream_local_address": {Kind: &structpb.Value_StringValue{StringValue: "%UPSTREAM_LOCAL_ADDRESS%"}},
"downstream_local_address": {Kind: &structpb.Value_StringValue{StringValue: "%DOWNSTREAM_LOCAL_ADDRESS%"}},
"downstream_remote_address": {Kind: &structpb.Value_StringValue{StringValue: "%DOWNSTREAM_REMOTE_ADDRESS%"}},
"requested_server_name": {Kind: &structpb.Value_StringValue{StringValue: "%REQUESTED_SERVER_NAME%"}},
"upstream_transport_failure_reason": {Kind: &structpb.Value_StringValue{StringValue: "%UPSTREAM_TRANSPORT_FAILURE_REASON%"}},
},
}
// State logged by the metadata exchange filter about the upstream and downstream service instances
// We need to propagate these as part of access log service stream
// Logging them by default on the console may be an issue as the base64 encoded string is bound to be a big one.
// But end users can certainly configure it on their own via the meshConfig using the %FILTER_STATE% macro.
envoyWasmStateToLog = []string{"wasm.upstream_peer", "wasm.upstream_peer_id", "wasm.downstream_peer", "wasm.downstream_peer_id"}
// reqWithoutQueryFormatter configures additional formatters needed for some of the format strings like "REQ_WITHOUT_QUERY"
reqWithoutQueryFormatter = &core.TypedExtensionConfig{
Name: "envoy.formatter.req_without_query",
TypedConfig: protoconv.MessageToAny(&reqwithoutquery.ReqWithoutQuery{}),
}
// metadataFormatter configures additional formatters needed for some of the format strings like "METADATA"
// for more information, see https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/formatter/metadata/v3/metadata.proto
metadataFormatter = &core.TypedExtensionConfig{
Name: "envoy.formatter.metadata",
TypedConfig: protoconv.MessageToAny(&metadataformatter.Metadata{}),
}
// celFormatter configures additional formatters needed for some of the format strings like "CEL"
// for more information, see https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/formatter/cel/v3/cel.proto
celFormatter = &core.TypedExtensionConfig{
Name: "envoy.formatter.cel",
TypedConfig: protoconv.MessageToAny(&celformatter.Cel{}),
}
)
// configureFromProviderConfigHandled contains the number of providers we handle below.
// This is to ensure this stays in sync as new handlers are added
// STOP. DO NOT UPDATE THIS WITHOUT UPDATING telemetryAccessLog.
const telemetryAccessLogHandled = 14
func telemetryAccessLog(push *PushContext, fp *meshconfig.MeshConfig_ExtensionProvider) *accesslog.AccessLog {
var al *accesslog.AccessLog
switch prov := fp.Provider.(type) {
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLog:
// For built-in provider, fallback to MeshConfig for formatting options when LogFormat unset.
if fp.Name == builtinEnvoyAccessLogProvider && prov.EnvoyFileAccessLog.LogFormat == nil {
al = FileAccessLogFromMeshConfig(prov.EnvoyFileAccessLog.Path, push.Mesh)
} else {
al = fileAccessLogFromTelemetry(prov.EnvoyFileAccessLog)
}
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyHttpAls:
al = httpGrpcAccessLogFromTelemetry(push, prov.EnvoyHttpAls)
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyTcpAls:
al = tcpGrpcAccessLogFromTelemetry(push, prov.EnvoyTcpAls)
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyOtelAls:
al = openTelemetryLog(push, prov.EnvoyOtelAls)
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyExtAuthzHttp,
*meshconfig.MeshConfig_ExtensionProvider_EnvoyExtAuthzGrpc,
*meshconfig.MeshConfig_ExtensionProvider_Zipkin,
*meshconfig.MeshConfig_ExtensionProvider_Lightstep,
*meshconfig.MeshConfig_ExtensionProvider_Datadog,
*meshconfig.MeshConfig_ExtensionProvider_Skywalking,
*meshconfig.MeshConfig_ExtensionProvider_Opencensus,
*meshconfig.MeshConfig_ExtensionProvider_Opentelemetry,
*meshconfig.MeshConfig_ExtensionProvider_Prometheus,
*meshconfig.MeshConfig_ExtensionProvider_Stackdriver:
// No access logs supported for this provider
// Stackdriver is a special case as its handled in the Metrics logic, as it uses a shared filter
return nil
}
return al
}
func tcpGrpcAccessLogFromTelemetry(push *PushContext, prov *meshconfig.MeshConfig_ExtensionProvider_EnvoyTcpGrpcV3LogProvider) *accesslog.AccessLog {
logName := TCPEnvoyAccessLogFriendlyName
if prov != nil && prov.LogName != "" {
logName = prov.LogName
}
filterObjects := envoyWasmStateToLog
if len(prov.FilterStateObjectsToLog) != 0 {
filterObjects = prov.FilterStateObjectsToLog
}
hostname, cluster, err := clusterLookupFn(push, prov.Service, int(prov.Port))
if err != nil {
IncLookupClusterFailures("envoyTCPAls")
log.Errorf("could not find cluster for tcp grpc provider %q: %v", prov, err)
return nil
}
fl := &grpcaccesslog.TcpGrpcAccessLogConfig{
CommonConfig: &grpcaccesslog.CommonGrpcAccessLogConfig{
LogName: logName,
GrpcService: &core.GrpcService{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{
ClusterName: cluster,
Authority: hostname,
},
},
},
TransportApiVersion: core.ApiVersion_V3,
FilterStateObjectsToLog: filterObjects,
},
}
return &accesslog.AccessLog{
Name: TCPEnvoyALSName,
ConfigType: &accesslog.AccessLog_TypedConfig{TypedConfig: protoconv.MessageToAny(fl)},
}
}
func fileAccessLogFromTelemetry(prov *meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLogProvider) *accesslog.AccessLog {
p := prov.Path
if p == "" {
p = DevStdout
}
fl := &fileaccesslog.FileAccessLog{
Path: p,
}
var needsFormatter []*core.TypedExtensionConfig
if prov.LogFormat != nil {
switch logFormat := prov.LogFormat.LogFormat.(type) {
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLogProvider_LogFormat_Text:
fl.AccessLogFormat, needsFormatter = buildFileAccessTextLogFormat(logFormat.Text)
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLogProvider_LogFormat_Labels:
fl.AccessLogFormat, needsFormatter = buildFileAccessJSONLogFormat(logFormat)
}
} else {
fl.AccessLogFormat, needsFormatter = buildFileAccessTextLogFormat("")
}
if len(needsFormatter) != 0 {
fl.GetLogFormat().Formatters = needsFormatter
}
al := &accesslog.AccessLog{
Name: wellknown.FileAccessLog,
ConfigType: &accesslog.AccessLog_TypedConfig{TypedConfig: protoconv.MessageToAny(fl)},
}
return al
}
func buildFileAccessTextLogFormat(logFormatText string) (*fileaccesslog.FileAccessLog_LogFormat, []*core.TypedExtensionConfig) {
formatString := fileAccessLogFormat(logFormatText)
formatters := accessLogTextFormatters(formatString)
return &fileaccesslog.FileAccessLog_LogFormat{
LogFormat: &core.SubstitutionFormatString{
Format: &core.SubstitutionFormatString_TextFormatSource{
TextFormatSource: &core.DataSource{
Specifier: &core.DataSource_InlineString{
InlineString: formatString,
},
},
},
},
}, formatters
}
func buildFileAccessJSONLogFormat(
logFormat *meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLogProvider_LogFormat_Labels,
) (*fileaccesslog.FileAccessLog_LogFormat, []*core.TypedExtensionConfig) {
jsonLogStruct := EnvoyJSONLogFormatIstio
if logFormat.Labels != nil {
jsonLogStruct = logFormat.Labels
}
// allow default behavior when no labels supplied.
if len(jsonLogStruct.Fields) == 0 {
jsonLogStruct = EnvoyJSONLogFormatIstio
}
formatters := accessLogJSONFormatters(jsonLogStruct)
return &fileaccesslog.FileAccessLog_LogFormat{
LogFormat: &core.SubstitutionFormatString{
Format: &core.SubstitutionFormatString_JsonFormat{
JsonFormat: jsonLogStruct,
},
JsonFormatOptions: &core.JsonFormatOptions{SortProperties: true},
},
}, formatters
}
func accessLogJSONFormatters(jsonLogStruct *structpb.Struct) []*core.TypedExtensionConfig {
reqWithoutQuery, metadata, cel := false, false, false
for _, value := range jsonLogStruct.Fields {
if reqWithoutQuery && metadata {
break
}
if !reqWithoutQuery && strings.Contains(value.GetStringValue(), reqWithoutQueryCommandOperator) {
reqWithoutQuery = true
}
if !metadata && strings.Contains(value.GetStringValue(), metadataCommandOperator) {
metadata = true
}
if !cel && strings.Contains(value.GetStringValue(), celCommandOperator) {
cel = true
}
}
formatters := make([]*core.TypedExtensionConfig, 0, 2)
if reqWithoutQuery {
formatters = append(formatters, reqWithoutQueryFormatter)
}
if metadata {
formatters = append(formatters, metadataFormatter)
}
if cel {
formatters = append(formatters, celFormatter)
}
return formatters
}
func accessLogTextFormatters(text string) []*core.TypedExtensionConfig {
formatters := make([]*core.TypedExtensionConfig, 0, 2)
if strings.Contains(text, reqWithoutQueryCommandOperator) {
formatters = append(formatters, reqWithoutQueryFormatter)
}
if strings.Contains(text, metadataCommandOperator) {
formatters = append(formatters, metadataFormatter)
}
if strings.Contains(text, celCommandOperator) {
formatters = append(formatters, celFormatter)
}
return formatters
}
func httpGrpcAccessLogFromTelemetry(push *PushContext, prov *meshconfig.MeshConfig_ExtensionProvider_EnvoyHttpGrpcV3LogProvider) *accesslog.AccessLog {
logName := HTTPEnvoyAccessLogFriendlyName
if prov != nil && prov.LogName != "" {
logName = prov.LogName
}
filterObjects := envoyWasmStateToLog
if len(prov.FilterStateObjectsToLog) != 0 {
filterObjects = prov.FilterStateObjectsToLog
}
hostname, cluster, err := clusterLookupFn(push, prov.Service, int(prov.Port))
if err != nil {
IncLookupClusterFailures("envoyHTTPAls")
log.Errorf("could not find cluster for http grpc provider %q: %v", prov, err)
return nil
}
fl := &grpcaccesslog.HttpGrpcAccessLogConfig{
CommonConfig: &grpcaccesslog.CommonGrpcAccessLogConfig{
LogName: logName,
GrpcService: &core.GrpcService{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{
ClusterName: cluster,
Authority: hostname,
},
},
},
TransportApiVersion: core.ApiVersion_V3,
FilterStateObjectsToLog: filterObjects,
},
AdditionalRequestHeadersToLog: prov.AdditionalRequestHeadersToLog,
AdditionalResponseHeadersToLog: prov.AdditionalResponseHeadersToLog,
AdditionalResponseTrailersToLog: prov.AdditionalResponseTrailersToLog,
}
return &accesslog.AccessLog{
Name: wellknown.HTTPGRPCAccessLog,
ConfigType: &accesslog.AccessLog_TypedConfig{TypedConfig: protoconv.MessageToAny(fl)},
}
}
func fileAccessLogFormat(formatString string) string {
if formatString != "" {
// From the spec: "NOTE: Istio will insert a newline ('\n') on all formats (if missing)."
if !strings.HasSuffix(formatString, "\n") {
formatString += "\n"
}
return formatString
}
return EnvoyTextLogFormat
}
func FileAccessLogFromMeshConfig(path string, mesh *meshconfig.MeshConfig) *accesslog.AccessLog {
// We need to build access log. This is needed either on first access or when mesh config changes.
fl := &fileaccesslog.FileAccessLog{
Path: path,
}
var formatters []*core.TypedExtensionConfig
switch mesh.AccessLogEncoding {
case meshconfig.MeshConfig_TEXT:
formatString := fileAccessLogFormat(mesh.AccessLogFormat)
formatters = accessLogTextFormatters(formatString)
fl.AccessLogFormat = &fileaccesslog.FileAccessLog_LogFormat{
LogFormat: &core.SubstitutionFormatString{
Format: &core.SubstitutionFormatString_TextFormatSource{
TextFormatSource: &core.DataSource{
Specifier: &core.DataSource_InlineString{
InlineString: formatString,
},
},
},
},
}
case meshconfig.MeshConfig_JSON:
jsonLogStruct := EnvoyJSONLogFormatIstio
if len(mesh.AccessLogFormat) > 0 {
parsedJSONLogStruct := structpb.Struct{}
if err := protomarshal.UnmarshalAllowUnknown([]byte(mesh.AccessLogFormat), &parsedJSONLogStruct); err != nil {
log.Errorf("error parsing provided json log format, default log format will be used: %v", err)
} else {
jsonLogStruct = &parsedJSONLogStruct
}
}
formatters = accessLogJSONFormatters(jsonLogStruct)
fl.AccessLogFormat = &fileaccesslog.FileAccessLog_LogFormat{
LogFormat: &core.SubstitutionFormatString{
Format: &core.SubstitutionFormatString_JsonFormat{
JsonFormat: jsonLogStruct,
},
JsonFormatOptions: &core.JsonFormatOptions{SortProperties: true},
},
}
default:
log.Warnf("unsupported access log format %v", mesh.AccessLogEncoding)
}
if len(formatters) > 0 {
fl.GetLogFormat().Formatters = formatters
}
al := &accesslog.AccessLog{
Name: wellknown.FileAccessLog,
ConfigType: &accesslog.AccessLog_TypedConfig{TypedConfig: protoconv.MessageToAny(fl)},
}
return al
}
func openTelemetryLog(pushCtx *PushContext,
provider *meshconfig.MeshConfig_ExtensionProvider_EnvoyOpenTelemetryLogProvider,
) *accesslog.AccessLog {
hostname, cluster, err := clusterLookupFn(pushCtx, provider.Service, int(provider.Port))
if err != nil {
IncLookupClusterFailures("envoyOtelAls")
log.Errorf("could not find cluster for open telemetry provider %q: %v", provider, err)
return nil
}
logName := provider.LogName
if logName == "" {
logName = OtelEnvoyAccessLogFriendlyName
}
f := EnvoyTextLogFormat
if provider.LogFormat != nil && provider.LogFormat.Text != "" {
f = provider.LogFormat.Text
}
var labels *structpb.Struct
if provider.LogFormat != nil {
labels = provider.LogFormat.Labels
}
cfg := buildOpenTelemetryAccessLogConfig(logName, hostname, cluster, f, labels)
return &accesslog.AccessLog{
Name: OtelEnvoyALSName,
ConfigType: &accesslog.AccessLog_TypedConfig{TypedConfig: protoconv.MessageToAny(cfg)},
}
}
func buildOpenTelemetryAccessLogConfig(logName, hostname, clusterName, format string, labels *structpb.Struct) *otelaccesslog.OpenTelemetryAccessLogConfig {
cfg := &otelaccesslog.OpenTelemetryAccessLogConfig{
CommonConfig: &grpcaccesslog.CommonGrpcAccessLogConfig{
LogName: logName,
GrpcService: &core.GrpcService{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{
ClusterName: clusterName,
Authority: hostname,
},
},
},
TransportApiVersion: core.ApiVersion_V3,
FilterStateObjectsToLog: envoyWasmStateToLog,
},
DisableBuiltinLabels: !features.EnableOTELBuiltinResourceLabels,
}
if format != "" {
cfg.Body = &otlpcommon.AnyValue{
Value: &otlpcommon.AnyValue_StringValue{
StringValue: format,
},
}
}
if labels != nil && len(labels.Fields) != 0 {
cfg.Attributes = &otlpcommon.KeyValueList{
Values: ConvertStructToAttributeKeyValues(labels.Fields),
}
}
return cfg
}
func ConvertStructToAttributeKeyValues(labels map[string]*structpb.Value) []*otlpcommon.KeyValue {
if len(labels) == 0 {
return nil
}
attrList := make([]*otlpcommon.KeyValue, 0, len(labels))
// Sort keys to ensure stable XDS generation
for _, key := range slices.Sort(maps.Keys(labels)) {
value := labels[key]
kv := &otlpcommon.KeyValue{
Key: key,
Value: &otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: value.GetStringValue()}},
}
attrList = append(attrList, kv)
}
return attrList
}
func LookupCluster(push *PushContext, service string, port int) (hostname string, cluster string, err error) {
if service == "" {
err = fmt.Errorf("service must not be empty")
return
}
// TODO(yangminzhu): Verify the service and its cluster is supported, e.g. resolution type is not OriginalDst.
if parts := strings.Split(service, "/"); len(parts) == 2 {
namespace, name := parts[0], parts[1]
if svc := push.ServiceIndex.HostnameAndNamespace[host.Name(name)][namespace]; svc != nil {
hostname = string(svc.Hostname)
cluster = BuildSubsetKey(TrafficDirectionOutbound, "", svc.Hostname, port)
return
}
} else {
namespaceToServices := push.ServiceIndex.HostnameAndNamespace[host.Name(service)]
var namespaces []string
for k := range namespaceToServices {
namespaces = append(namespaces, k)
}
// If namespace is omitted, return successfully if there is only one such host name in the service index.
if len(namespaces) == 1 {
svc := namespaceToServices[namespaces[0]]
hostname = string(svc.Hostname)
cluster = BuildSubsetKey(TrafficDirectionOutbound, "", svc.Hostname, port)
return
} else if len(namespaces) > 1 {
err = fmt.Errorf("found %s in multiple namespaces %v, specify the namespace explicitly in "+
"the format of <Namespace>/<Hostname>", service, namespaces)
return
}
}
err = fmt.Errorf("could not find service %s in Istio service registry", service)
return
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"sync"
"time"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/golang-lru/v2/simplelru"
"google.golang.org/protobuf/testing/protocmp"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/monitoring"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
var enableStats = func() bool {
return features.EnableXDSCacheMetrics
}
var (
xdsCacheReads = monitoring.NewSum(
"xds_cache_reads",
"Total number of xds cache xdsCacheReads.",
monitoring.WithEnabled(enableStats),
)
xdsCacheEvictions = monitoring.NewSum(
"xds_cache_evictions",
"Total number of xds cache evictions.",
monitoring.WithEnabled(enableStats),
)
xdsCacheSize = monitoring.NewGauge(
"xds_cache_size",
"Current size of xds cache",
monitoring.WithEnabled(enableStats),
)
dependentConfigSize = monitoring.NewGauge(
"xds_cache_dependent_config_size",
"Current size of dependent configs",
monitoring.WithEnabled(enableStats),
)
xdsCacheHits = xdsCacheReads.With(typeTag.Value("hit"))
xdsCacheMisses = xdsCacheReads.With(typeTag.Value("miss"))
xdsCacheEvictionsOnClear = xdsCacheEvictions.With(typeTag.Value("clear"))
xdsCacheEvictionsOnSize = xdsCacheEvictions.With(typeTag.Value("size"))
)
func hit() {
xdsCacheHits.Increment()
}
func miss() {
xdsCacheMisses.Increment()
}
func size(cs int) {
xdsCacheSize.Record(float64(cs))
}
type CacheToken uint64
type dependents interface {
DependentConfigs() []ConfigHash
}
// typedXdsCache interface defines a store for caching XDS responses.
// All operations are thread safe.
type typedXdsCache[K comparable] interface {
// Flush clears the evicted indexes.
Flush()
// Add adds the given key with the value and its dependents for the given pushContext to the cache.
// If the cache has been updated to a newer push context, the write will be dropped silently.
// This ensures stale data does not overwrite fresh data when dealing with concurrent
// writers.
Add(key K, entry dependents, pushRequest *PushRequest, value *discovery.Resource)
// Get retrieves the cached value if it exists.
Get(key K) *discovery.Resource
// Clear removes the cache entries that are dependent on the configs passed.
Clear(sets.Set[ConfigKey])
// ClearAll clears the entire cache.
ClearAll()
// Keys returns all currently configured keys. This is for testing/debug only
Keys() []K
// Snapshot returns a snapshot of all keys and values. This is for testing/debug only
Snapshot() []*discovery.Resource
}
// newTypedXdsCache returns an instance of a cache.
func newTypedXdsCache[K comparable]() typedXdsCache[K] {
cache := &lruCache[K]{
enableAssertions: features.EnableUnsafeAssertions,
configIndex: map[ConfigHash]sets.Set[K]{},
evictQueue: make([]evictKeyConfigs[K], 0, 1000),
}
cache.store = newLru(cache.onEvict)
return cache
}
type evictKeyConfigs[K comparable] struct {
key K
dependentConfigs []ConfigHash
}
type lruCache[K comparable] struct {
enableAssertions bool
store simplelru.LRUCache[K, cacheValue]
// token stores the latest token of the store, used to prevent stale data overwrite.
// It is refreshed when Clear or ClearAll are called
token CacheToken
mu sync.RWMutex
configIndex map[ConfigHash]sets.Set[K]
evictQueue []evictKeyConfigs[K]
// mark whether a key is evicted on Clear call, passively.
evictedOnClear bool
}
var _ typedXdsCache[uint64] = &lruCache[uint64]{}
func newLru[K comparable](evictCallback simplelru.EvictCallback[K, cacheValue]) simplelru.LRUCache[K, cacheValue] {
sz := features.XDSCacheMaxSize
if sz <= 0 {
sz = 20000
}
l, err := simplelru.NewLRU(sz, evictCallback)
if err != nil {
panic(fmt.Errorf("invalid lru configuration: %v", err))
}
return l
}
func (l *lruCache[K]) Flush() {
l.mu.Lock()
for _, keyConfigs := range l.evictQueue {
l.clearConfigIndex(keyConfigs.key, keyConfigs.dependentConfigs)
}
// The underlying array releases references to elements so that they can be garbage collected.
clear(l.evictQueue)
l.evictQueue = l.evictQueue[:0:1000]
l.recordDependentConfigSize()
l.mu.Unlock()
}
func (l *lruCache[K]) recordDependentConfigSize() {
if !enableStats() {
return
}
dsize := 0
for _, dependents := range l.configIndex {
dsize += len(dependents)
}
dependentConfigSize.Record(float64(dsize))
}
// This is the callback passed to LRU, it will be called whenever a key is removed.
func (l *lruCache[K]) onEvict(k K, v cacheValue) {
if l.evictedOnClear {
xdsCacheEvictionsOnClear.Increment()
} else {
xdsCacheEvictionsOnSize.Increment()
}
// async clearing indexes
l.evictQueue = append(l.evictQueue, evictKeyConfigs[K]{k, v.dependentConfigs})
}
func (l *lruCache[K]) updateConfigIndex(k K, dependentConfigs []ConfigHash) {
for _, cfg := range dependentConfigs {
sets.InsertOrNew(l.configIndex, cfg, k)
}
}
func (l *lruCache[K]) clearConfigIndex(k K, dependentConfigs []ConfigHash) {
c, exists := l.store.Get(k)
if exists {
newDependents := c.dependentConfigs
// we only need to clear configs {old difference new}
dependents := sets.New(dependentConfigs...).Difference(sets.New(newDependents...))
for cfg := range dependents {
sets.DeleteCleanupLast(l.configIndex, cfg, k)
}
return
}
for _, cfg := range dependentConfigs {
sets.DeleteCleanupLast(l.configIndex, cfg, k)
}
}
// assertUnchanged checks that a cache entry is not changed. This helps catch bad cache invalidation
// We should never have a case where we overwrite an existing item with a new change. Instead, when
// config sources change, Clear/ClearAll should be called. At this point, we may get multiple writes
// because multiple writers may get cache misses concurrently, but they ought to generate identical
// configuration. This also checks that our XDS config generation is deterministic, which is a very
// important property.
func (l *lruCache[K]) assertUnchanged(key K, existing *discovery.Resource, replacement *discovery.Resource) {
if l.enableAssertions {
if existing == nil {
// This is a new addition, not an update
return
}
// Record time so that we can correlate when the error actually happened, since the async reporting
// may be delayed
t0 := time.Now()
// This operation is really slow, which makes tests fail for unrelated reasons, so we process it async.
go func() {
if !cmp.Equal(existing, replacement, protocmp.Transform()) {
warning := fmt.Errorf("assertion failed at %v, cache entry changed but not cleared for key %v: %v\n%v\n%v",
t0, key, cmp.Diff(existing, replacement, protocmp.Transform()), existing, replacement)
panic(warning)
}
}()
}
}
func (l *lruCache[K]) Add(k K, entry dependents, pushReq *PushRequest, value *discovery.Resource) {
if pushReq == nil || pushReq.Start.Equal(time.Time{}) {
return
}
// It will not overflow until year 2262
token := CacheToken(pushReq.Start.UnixNano())
l.mu.Lock()
defer l.mu.Unlock()
if token < l.token {
// entry may be stale, we need to drop it. This can happen when the cache is invalidated
// after we call Clear or ClearAll.
return
}
cur, f := l.store.Get(k)
if f {
// This is the stale or same resource
if token <= cur.token {
return
}
if l.enableAssertions {
l.assertUnchanged(k, cur.value, value)
}
}
dependentConfigs := entry.DependentConfigs()
toWrite := cacheValue{value: value, token: token, dependentConfigs: dependentConfigs}
l.store.Add(k, toWrite)
l.token = token
l.updateConfigIndex(k, dependentConfigs)
// we have to make sure we evict old entries with the same key
// to prevent leaking in the index maps
if f {
l.evictQueue = append(l.evictQueue, evictKeyConfigs[K]{k, cur.dependentConfigs})
}
size(l.store.Len())
}
type cacheValue struct {
value *discovery.Resource
token CacheToken
dependentConfigs []ConfigHash
}
func (l *lruCache[K]) Get(key K) *discovery.Resource {
return l.get(key, 0)
}
// get return the cached value if it exists.
func (l *lruCache[K]) get(key K, token CacheToken) *discovery.Resource {
l.mu.Lock()
defer l.mu.Unlock()
cv, ok := l.store.Get(key)
if !ok || cv.value == nil {
miss()
return nil
}
if cv.token >= token {
hit()
return cv.value
}
miss()
return nil
}
func (l *lruCache[K]) Clear(configs sets.Set[ConfigKey]) {
l.mu.Lock()
defer l.mu.Unlock()
l.token = CacheToken(time.Now().UnixNano())
l.evictedOnClear = true
defer func() {
l.evictedOnClear = false
}()
for ckey := range configs {
hc := ckey.HashCode()
referenced := l.configIndex[hc]
delete(l.configIndex, hc)
for key := range referenced {
l.store.Remove(key)
}
}
size(l.store.Len())
}
func (l *lruCache[K]) ClearAll() {
l.mu.Lock()
defer l.mu.Unlock()
l.token = CacheToken(time.Now().UnixNano())
// Purge with an evict function would turn up to be pretty slow since
// it runs the function for every key in the store, might be better to just
// create a new store.
l.store = newLru(l.onEvict)
l.configIndex = map[ConfigHash]sets.Set[K]{}
// The underlying array releases references to elements so that they can be garbage collected.
clear(l.evictQueue)
l.evictQueue = l.evictQueue[:0:1000]
size(l.store.Len())
}
func (l *lruCache[K]) Keys() []K {
l.mu.RLock()
defer l.mu.RUnlock()
return slices.Clone(l.store.Keys())
}
func (l *lruCache[K]) Snapshot() []*discovery.Resource {
l.mu.RLock()
defer l.mu.RUnlock()
iKeys := l.store.Keys()
res := make([]*discovery.Resource, len(iKeys))
for i, ik := range iKeys {
v, ok := l.store.Get(ik)
if !ok {
continue
}
res[i] = v.value
}
return res
}
func (l *lruCache[K]) indexLength() int {
l.mu.RLock()
defer l.mu.RUnlock()
return len(l.configIndex)
}
func (l *lruCache[K]) configIndexSnapshot() map[ConfigHash]sets.Set[K] {
l.mu.RLock()
defer l.mu.RUnlock()
res := make(map[ConfigHash]sets.Set[K], len(l.configIndex))
for k, v := range l.configIndex {
res[k] = v
}
return res
}
// disabledCache is a cache that is always empty
type disabledCache[K comparable] struct{}
var _ typedXdsCache[uint64] = &disabledCache[uint64]{}
func (d disabledCache[K]) Flush() {
}
func (d disabledCache[K]) Add(k K, entry dependents, pushReq *PushRequest, value *discovery.Resource) {
}
func (d disabledCache[K]) Get(k K) *discovery.Resource {
return nil
}
func (d disabledCache[K]) Clear(configsUpdated sets.Set[ConfigKey]) {}
func (d disabledCache[K]) ClearAll() {}
func (d disabledCache[K]) Keys() []K { return nil }
func (d disabledCache[K]) Snapshot() []*discovery.Resource { return nil }
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strings"
"github.com/hashicorp/go-multierror"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/validation"
)
// UnixAddressPrefix is the prefix used to indicate an address is for a Unix Domain socket. It is used in
// ServiceEntry.Endpoint.Address message.
const (
UnixAddressPrefix = "unix://"
PodIPAddressPrefix = "0.0.0.0"
LocalhostAddressPrefix = "127.0.0.1"
PodIPv6AddressPrefix = "::"
LocalhostIPv6AddressPrefix = "::1"
)
// Validate ensures that the service object is well-defined
func (s *Service) Validate() error {
var errs error
if len(s.Hostname) == 0 {
errs = multierror.Append(errs, fmt.Errorf("invalid empty hostname"))
}
parts := strings.Split(string(s.Hostname), ".")
for _, part := range parts {
if !labels.IsDNS1123Label(part) {
errs = multierror.Append(errs, fmt.Errorf("invalid hostname part: %q", part))
}
}
// Require at least one port
if len(s.Ports) == 0 {
errs = multierror.Append(errs, fmt.Errorf("service must have at least one declared port"))
}
// Port names can be empty if there exists only one port
for _, port := range s.Ports {
if port.Name == "" {
if len(s.Ports) > 1 {
errs = multierror.Append(errs,
fmt.Errorf("empty port names are not allowed for services with multiple ports"))
}
} else if !labels.IsDNS1123Label(port.Name) {
errs = multierror.Append(errs, fmt.Errorf("invalid name: %q", port.Name))
}
if err := validation.ValidatePort(port.Port); err != nil {
errs = multierror.Append(errs,
fmt.Errorf("invalid service port value %d for %q: %v", port.Port, port.Name, err))
}
}
return errs
}
// Validate ensures that the service instance is well-defined
func (instance *ServiceInstance) Validate() error {
var errs error
if instance.Service == nil {
errs = multierror.Append(errs, fmt.Errorf("missing service in the instance"))
} else if err := instance.Service.Validate(); err != nil {
errs = multierror.Append(errs, err)
}
if instance.Endpoint != nil {
if err := instance.Endpoint.Labels.Validate(); err != nil {
errs = multierror.Append(errs, err)
}
if err := validation.ValidatePort(int(instance.Endpoint.EndpointPort)); err != nil {
errs = multierror.Append(errs, err)
}
}
port := instance.ServicePort
if port == nil {
errs = multierror.Append(errs, fmt.Errorf("missing service port"))
} else if instance.Service != nil {
expected, ok := instance.Service.Ports.Get(port.Name)
if !ok {
errs = multierror.Append(errs, fmt.Errorf("missing service port %q", port.Name))
} else {
if expected.Port != port.Port {
errs = multierror.Append(errs,
fmt.Errorf("unexpected service port value %d, expected %d", port.Port, expected.Port))
}
if expected.Protocol != port.Protocol {
errs = multierror.Append(errs,
fmt.Errorf("unexpected service protocol %s, expected %s", port.Protocol, expected.Protocol))
}
}
}
return errs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"strings"
"k8s.io/apimachinery/pkg/types"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/config/visibility"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
)
// SelectVirtualServices selects the virtual services by matching given services' host names.
// This function is used by sidecar converter.
func SelectVirtualServices(vsidx virtualServiceIndex, configNamespace string, hostsByNamespace map[string]hostClassification) []config.Config {
importedVirtualServices := make([]config.Config, 0)
vsset := sets.New[types.NamespacedName]()
addVirtualService := func(vs config.Config, hosts hostClassification) {
key := vs.NamespacedName()
if vsset.Contains(key) {
return
}
rule := vs.Spec.(*networking.VirtualService)
for _, vh := range rule.Hosts {
// first, check exactHosts
if hosts.exactHosts.Contains(host.Name(vh)) {
importedVirtualServices = append(importedVirtualServices, vs)
vsset.Insert(key)
return
}
// exactHosts not found, fallback to loop allHosts
vhIsWildCard := host.Name(vh).IsWildCarded()
for _, ah := range hosts.allHosts {
// If both are exact hosts, then fallback is not needed.
// In this scenario it should be determined by exact lookup.
if !vhIsWildCard && !ah.IsWildCarded() {
continue
}
if vsHostMatches(vh, ah, vs) {
importedVirtualServices = append(importedVirtualServices, vs)
vsset.Insert(key)
// break both loops
return
}
}
}
}
wnsImportedHosts, wnsFound := hostsByNamespace[wildcardNamespace]
loopAndAdd := func(vses []config.Config) {
for _, c := range vses {
configNamespace := c.Namespace
// Selection algorithm:
// virtualservices have a list of hosts in the API spec
// if any host in the list matches one service hostname, select the virtual service
// and break out of the loop.
// Check if there is an explicit import of form ns/* or ns/host
if importedHosts, nsFound := hostsByNamespace[configNamespace]; nsFound {
addVirtualService(c, importedHosts)
}
// Check if there is an import of form */host or */*
if wnsFound {
addVirtualService(c, wnsImportedHosts)
}
}
}
n := types.NamespacedName{Namespace: configNamespace, Name: constants.IstioMeshGateway}
loopAndAdd(vsidx.privateByNamespaceAndGateway[n])
loopAndAdd(vsidx.exportedToNamespaceByGateway[n])
loopAndAdd(vsidx.publicByGateway[constants.IstioMeshGateway])
return importedVirtualServices
}
// vsHostMatches checks if the given VirtualService host matches the importedHost (from Sidecar)
func vsHostMatches(vsHost string, importedHost host.Name, vs config.Config) bool {
if UseGatewaySemantics(vs) {
// The new way. Matching logic exactly mirrors Service matching
// If a route defines `*.com` and we import `a.com`, it will not match
return host.Name(vsHost).SubsetOf(importedHost)
}
// The old way. We check Matches which is bi-directional. This is for backwards compatibility
return host.Name(vsHost).Matches(importedHost)
}
func resolveVirtualServiceShortnames(rule *networking.VirtualService, meta config.Meta) {
// Kubernetes Gateway API semantics support shortnames
if UseGatewaySemantics(config.Config{Meta: meta}) {
return
}
// resolve top level hosts
for i, h := range rule.Hosts {
rule.Hosts[i] = string(ResolveShortnameToFQDN(h, meta))
}
// resolve gateways to bind to
for i, g := range rule.Gateways {
if g != constants.IstioMeshGateway {
rule.Gateways[i] = resolveGatewayName(g, meta)
}
}
// resolve host in http route.destination, route.mirror
for _, d := range rule.Http {
for _, m := range d.Match {
for i, g := range m.Gateways {
if g != constants.IstioMeshGateway {
m.Gateways[i] = resolveGatewayName(g, meta)
}
}
}
for _, w := range d.Route {
if w.Destination != nil {
w.Destination.Host = string(ResolveShortnameToFQDN(w.Destination.Host, meta))
}
}
if d.Mirror != nil {
d.Mirror.Host = string(ResolveShortnameToFQDN(d.Mirror.Host, meta))
}
for _, m := range d.Mirrors {
if m.Destination != nil {
m.Destination.Host = string(ResolveShortnameToFQDN(m.Destination.Host, meta))
}
}
}
// resolve host in tcp route.destination
for _, d := range rule.Tcp {
for _, m := range d.Match {
for i, g := range m.Gateways {
if g != constants.IstioMeshGateway {
m.Gateways[i] = resolveGatewayName(g, meta)
}
}
}
for _, w := range d.Route {
if w.Destination != nil {
w.Destination.Host = string(ResolveShortnameToFQDN(w.Destination.Host, meta))
}
}
}
// resolve host in tls route.destination
for _, tls := range rule.Tls {
for _, m := range tls.Match {
for i, g := range m.Gateways {
if g != constants.IstioMeshGateway {
m.Gateways[i] = resolveGatewayName(g, meta)
}
}
}
for _, w := range tls.Route {
if w.Destination != nil {
w.Destination.Host = string(ResolveShortnameToFQDN(w.Destination.Host, meta))
}
}
}
}
// Return merged virtual services and the root->delegate vs map
func mergeVirtualServicesIfNeeded(
vServices []config.Config,
defaultExportTo sets.Set[visibility.Instance],
) ([]config.Config, map[ConfigKey][]ConfigKey) {
out := make([]config.Config, 0, len(vServices))
delegatesMap := map[types.NamespacedName]config.Config{}
delegatesExportToMap := make(map[types.NamespacedName]sets.Set[visibility.Instance])
// root virtualservices with delegate
var rootVses []config.Config
// 1. classify virtualservices
for _, vs := range vServices {
rule := vs.Spec.(*networking.VirtualService)
// it is delegate, add it to the indexer cache along with the exportTo for the delegate
if len(rule.Hosts) == 0 {
delegatesMap[vs.NamespacedName()] = vs
var exportToSet sets.Set[visibility.Instance]
if len(rule.ExportTo) == 0 {
// No exportTo in virtualService. Use the global default
exportToSet = sets.NewWithLength[visibility.Instance](defaultExportTo.Len())
for v := range defaultExportTo {
if v == visibility.Private {
exportToSet.Insert(visibility.Instance(vs.Namespace))
} else {
exportToSet.Insert(v)
}
}
} else {
exportToSet = sets.NewWithLength[visibility.Instance](len(rule.ExportTo))
for _, e := range rule.ExportTo {
if e == string(visibility.Private) {
exportToSet.Insert(visibility.Instance(vs.Namespace))
} else {
exportToSet.Insert(visibility.Instance(e))
}
}
}
delegatesExportToMap[vs.NamespacedName()] = exportToSet
continue
}
// root vs
if isRootVs(rule) {
rootVses = append(rootVses, vs)
continue
}
// the others are normal vs without delegate
out = append(out, vs)
}
delegatesByRoot := make(map[ConfigKey][]ConfigKey, len(rootVses))
// 2. merge delegates and root
for _, root := range rootVses {
rootConfigKey := ConfigKey{Kind: kind.VirtualService, Name: root.Name, Namespace: root.Namespace}
rootVs := root.Spec.(*networking.VirtualService)
mergedRoutes := []*networking.HTTPRoute{}
for _, route := range rootVs.Http {
// it is root vs with delegate
if delegate := route.Delegate; delegate != nil {
delegateNamespace := delegate.Namespace
if delegateNamespace == "" {
delegateNamespace = root.Namespace
}
delegateConfigKey := ConfigKey{Kind: kind.VirtualService, Name: delegate.Name, Namespace: delegateNamespace}
delegatesByRoot[rootConfigKey] = append(delegatesByRoot[rootConfigKey], delegateConfigKey)
delegateVS, ok := delegatesMap[types.NamespacedName{Namespace: delegateNamespace, Name: delegate.Name}]
if !ok {
log.Debugf("delegate virtual service %s/%s of %s/%s not found",
delegateNamespace, delegate.Name, root.Namespace, root.Name)
// delegate not found, ignore only the current HTTP route
continue
}
// make sure that the delegate is visible to root virtual service's namespace
exportTo := delegatesExportToMap[types.NamespacedName{Namespace: delegateNamespace, Name: delegate.Name}]
if !exportTo.Contains(visibility.Public) && !exportTo.Contains(visibility.Instance(root.Namespace)) {
log.Debugf("delegate virtual service %s/%s of %s/%s is not exported to %s",
delegateNamespace, delegate.Name, root.Namespace, root.Name, root.Namespace)
continue
}
// DeepCopy to prevent mutate the original delegate, it can conflict
// when multiple routes delegate to one single VS.
copiedDelegate := config.DeepCopy(delegateVS.Spec)
vs := copiedDelegate.(*networking.VirtualService)
merged := mergeHTTPRoutes(route, vs.Http)
mergedRoutes = append(mergedRoutes, merged...)
} else {
mergedRoutes = append(mergedRoutes, route)
}
}
rootVs.Http = mergedRoutes
if log.DebugEnabled() {
vsString, _ := protomarshal.ToJSONWithIndent(rootVs, " ")
log.Debugf("merged virtualService: %s", vsString)
}
out = append(out, root)
}
return out, delegatesByRoot
}
// merge root's route with delegate's and the merged route number equals the delegate's.
// if there is a conflict with root, the route is ignored
func mergeHTTPRoutes(root *networking.HTTPRoute, delegate []*networking.HTTPRoute) []*networking.HTTPRoute {
root.Delegate = nil
out := make([]*networking.HTTPRoute, 0, len(delegate))
for _, subRoute := range delegate {
merged := mergeHTTPRoute(root, subRoute)
if merged != nil {
out = append(out, merged)
}
}
return out
}
// merge the two HTTPRoutes, if there is a conflict with root, the delegate route is ignored
func mergeHTTPRoute(root *networking.HTTPRoute, delegate *networking.HTTPRoute) *networking.HTTPRoute {
// suppose there are N1 match conditions in root, N2 match conditions in delegate
// if match condition of N2 is a subset of anyone in N1, this is a valid matching conditions
merged, conflict := mergeHTTPMatchRequests(root.Match, delegate.Match)
if conflict {
log.Debugf("HTTPMatchRequests conflict: root route %s, delegate route %s", root.Name, delegate.Name)
return nil
}
delegate.Match = merged
if delegate.Name == "" {
delegate.Name = root.Name
} else if root.Name != "" {
delegate.Name = root.Name + "-" + delegate.Name
}
if delegate.Rewrite == nil {
delegate.Rewrite = root.Rewrite
}
if delegate.DirectResponse == nil {
delegate.DirectResponse = root.DirectResponse
}
if delegate.Timeout == nil {
delegate.Timeout = root.Timeout
}
if delegate.Retries == nil {
delegate.Retries = root.Retries
}
if delegate.Fault == nil {
delegate.Fault = root.Fault
}
if delegate.Mirror == nil {
delegate.Mirror = root.Mirror
}
// nolint: staticcheck
if delegate.MirrorPercent == nil {
delegate.MirrorPercent = root.MirrorPercent
}
if delegate.MirrorPercentage == nil {
delegate.MirrorPercentage = root.MirrorPercentage
}
if delegate.CorsPolicy == nil {
delegate.CorsPolicy = root.CorsPolicy
}
if delegate.Mirrors == nil {
delegate.Mirrors = root.Mirrors
}
if delegate.Headers == nil {
delegate.Headers = root.Headers
}
return delegate
}
// return merged match conditions if not conflicts
func mergeHTTPMatchRequests(root, delegate []*networking.HTTPMatchRequest) (out []*networking.HTTPMatchRequest, conflict bool) {
if len(root) == 0 {
return delegate, false
}
if len(delegate) == 0 {
return root, false
}
// each HTTPMatchRequest of delegate must find a superset in root.
// otherwise it conflicts
for _, subMatch := range delegate {
foundMatch := false
for _, rootMatch := range root {
if hasConflict(rootMatch, subMatch) {
log.Debugf("HTTPMatchRequests conflict: root %v, delegate %v", rootMatch, subMatch)
continue
}
// merge HTTPMatchRequest
out = append(out, mergeHTTPMatchRequest(rootMatch, subMatch))
foundMatch = true
}
if !foundMatch {
return nil, true
}
}
if len(out) == 0 {
conflict = true
}
return
}
func mergeHTTPMatchRequest(root, delegate *networking.HTTPMatchRequest) *networking.HTTPMatchRequest {
// nolint: govet
out := *delegate
if out.Name == "" {
out.Name = root.Name
} else if root.Name != "" {
out.Name = root.Name + "-" + out.Name
}
if out.Uri == nil {
out.Uri = root.Uri
}
if out.Scheme == nil {
out.Scheme = root.Scheme
}
if out.Method == nil {
out.Method = root.Method
}
if out.Authority == nil {
out.Authority = root.Authority
}
// headers
out.Headers = maps.MergeCopy(root.Headers, delegate.Headers)
// withoutheaders
out.WithoutHeaders = maps.MergeCopy(root.WithoutHeaders, delegate.WithoutHeaders)
// queryparams
out.QueryParams = maps.MergeCopy(root.QueryParams, delegate.QueryParams)
if out.Port == 0 {
out.Port = root.Port
}
// SourceLabels
out.SourceLabels = maps.MergeCopy(root.SourceLabels, delegate.SourceLabels)
if out.SourceNamespace == "" {
out.SourceNamespace = root.SourceNamespace
}
if len(out.Gateways) == 0 {
out.Gateways = root.Gateways
}
if len(out.StatPrefix) == 0 {
out.StatPrefix = root.StatPrefix
}
return &out
}
func hasConflict(root, leaf *networking.HTTPMatchRequest) bool {
roots := []*networking.StringMatch{root.Uri, root.Scheme, root.Method, root.Authority}
leaves := []*networking.StringMatch{leaf.Uri, leaf.Scheme, leaf.Method, leaf.Authority}
for i := range roots {
if stringMatchConflict(roots[i], leaves[i]) {
return true
}
}
// header conflicts
for key, leafHeader := range leaf.Headers {
if stringMatchConflict(root.Headers[key], leafHeader) {
return true
}
}
// without headers
for key, leafValue := range leaf.WithoutHeaders {
if stringMatchConflict(root.WithoutHeaders[key], leafValue) {
return true
}
}
// query params conflict
for key, value := range leaf.QueryParams {
if stringMatchConflict(root.QueryParams[key], value) {
return true
}
}
if root.IgnoreUriCase != leaf.IgnoreUriCase {
return true
}
if root.Port > 0 && leaf.Port > 0 && root.Port != leaf.Port {
return true
}
// sourceNamespace
if root.SourceNamespace != "" && leaf.SourceNamespace != root.SourceNamespace {
return true
}
// sourceLabels should not conflict, root should have superset of sourceLabels.
for key, leafValue := range leaf.SourceLabels {
if v, ok := root.SourceLabels[key]; ok && v != leafValue {
return true
}
}
// gateways should not conflict, root should have superset of gateways.
if len(root.Gateways) > 0 && len(leaf.Gateways) > 0 {
if len(root.Gateways) < len(leaf.Gateways) {
return true
}
rootGateway := sets.New(root.Gateways...)
for _, gw := range leaf.Gateways {
if !rootGateway.Contains(gw) {
return true
}
}
}
return false
}
func stringMatchConflict(root, leaf *networking.StringMatch) bool {
// no conflict when root or leaf is not specified
if root == nil || leaf == nil {
return false
}
// If root regex match is specified, delegate should not have other matches.
if root.GetRegex() != "" {
if leaf.GetRegex() != "" || leaf.GetPrefix() != "" || leaf.GetExact() != "" {
return true
}
}
// If delegate regex match is specified, root should not have other matches.
if leaf.GetRegex() != "" {
if root.GetRegex() != "" || root.GetPrefix() != "" || root.GetExact() != "" {
return true
}
}
// root is exact match
if exact := root.GetExact(); exact != "" {
// leaf is prefix match, conflict
if leaf.GetPrefix() != "" {
return true
}
// both exact, but not equal
if leaf.GetExact() != exact {
return true
}
return false
}
// root is prefix match
if prefix := root.GetPrefix(); prefix != "" {
// leaf is prefix match
if leaf.GetPrefix() != "" {
// leaf(`/a`) is not subset of root(`/a/b`)
return !strings.HasPrefix(leaf.GetPrefix(), prefix)
}
// leaf is exact match
if leaf.GetExact() != "" {
// leaf(`/a`) is not subset of root(`/a/b`)
return !strings.HasPrefix(leaf.GetExact(), prefix)
}
}
return true
}
func isRootVs(vs *networking.VirtualService) bool {
for _, route := range vs.Http {
// it is root vs with delegate
if route.Delegate != nil {
return true
}
}
return false
}
// UseIngressSemantics determines which logic we should use for VirtualService
// This allows ingress and VS to both be represented by VirtualService, but have different
// semantics.
func UseIngressSemantics(cfg config.Config) bool {
return cfg.Annotations[constants.InternalRouteSemantics] == constants.RouteSemanticsIngress
}
// UseGatewaySemantics determines which logic we should use for VirtualService
// This allows gateway-api and VS to both be represented by VirtualService, but have different
// semantics.
func UseGatewaySemantics(cfg config.Config) bool {
return cfg.Annotations[constants.InternalRouteSemantics] == constants.RouteSemanticsGateway
}
// VirtualServiceDependencies returns dependent configs of the vs,
// for internal vs generated from gateway-api routes, it returns the parent routes,
// otherwise it just returns the vs as is.
func VirtualServiceDependencies(vs config.Config) []ConfigKey {
if !UseGatewaySemantics(vs) {
return []ConfigKey{
{
Kind: kind.VirtualService,
Namespace: vs.Namespace,
Name: vs.Name,
},
}
}
// synthetic vs, get internal parents
internalParents := strings.Split(vs.Annotations[constants.InternalParentNames], ",")
out := make([]ConfigKey, 0, len(internalParents))
for _, p := range internalParents {
// kind/name.namespace
ks, nsname, ok := strings.Cut(p, "/")
if !ok {
log.Errorf("invalid InternalParentName parts: %s", p)
continue
}
var k kind.Kind
switch ks {
case kind.HTTPRoute.String():
k = kind.HTTPRoute
case kind.TCPRoute.String():
k = kind.TCPRoute
case kind.TLSRoute.String():
k = kind.TLSRoute
case kind.GRPCRoute.String():
k = kind.GRPCRoute
case kind.UDPRoute.String():
k = kind.UDPRoute
default:
// shouldn't happen
continue
}
name, ns, ok := strings.Cut(nsname, ".")
if !ok {
log.Errorf("invalid InternalParentName name: %s", nsname)
continue
}
out = append(out, ConfigKey{
Kind: k,
Name: name,
Namespace: ns,
})
}
return out
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
wasm "github.com/envoyproxy/go-control-plane/envoy/extensions/wasm/v3"
)
// ConstructVMConfig constructs a VM config.
func ConstructVMConfig(name string) *wasm.PluginConfig_VmConfig {
return &wasm.PluginConfig_VmConfig{
VmConfig: &wasm.VmConfig{
Runtime: "envoy.wasm.runtime.null",
Code: &core.AsyncDataSource{Specifier: &core.AsyncDataSource_Local{
Local: &core.DataSource{
Specifier: &core.DataSource_InlineString{
InlineString: name,
},
},
}},
},
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"time"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
type XdsCacheImpl struct {
cds typedXdsCache[uint64]
eds typedXdsCache[uint64]
rds typedXdsCache[uint64]
sds typedXdsCache[string]
}
// XdsCache interface defines a store for caching XDS responses.
// All operations are thread safe.
type XdsCache interface {
// Run starts a background thread to flush evicted indexes periodically.
Run(stop <-chan struct{})
// Add adds the given XdsCacheEntry with the value for the given pushContext to the cache.
// If the cache has been updated to a newer push context, the write will be dropped silently.
// This ensures stale data does not overwrite fresh data when dealing with concurrent
// writers.
Add(entry XdsCacheEntry, pushRequest *PushRequest, value *discovery.Resource)
// Get retrieves the cached value if it exists.
Get(entry XdsCacheEntry) *discovery.Resource
// Clear removes the cache entries that are dependent on the configs passed.
Clear(sets.Set[ConfigKey])
// ClearAll clears the entire cache.
ClearAll()
// Keys returns all currently configured keys for the type. This is for testing/debug only
Keys(t string) []any
// Snapshot returns a snapshot of all values. This is for testing/debug only
Snapshot() []*discovery.Resource
}
// XdsCacheEntry interface defines functions that should be implemented by
// resources that can be cached.
type XdsCacheEntry interface {
// Type indicates the type of Xds resource being cached like CDS.
Type() string
// Key is the key to be used in cache.
Key() any
// DependentConfigs is config items that this cache key is dependent on.
// Whenever these configs change, we should invalidate this cache entry.
DependentConfigs() []ConfigHash
// Cacheable indicates whether this entry is valid for cache. For example
// for EDS to be cacheable, the Endpoint should have corresponding service.
Cacheable() bool
}
const (
CDSType = "cds"
EDSType = "eds"
RDSType = "rds"
SDSType = "sds"
)
// NewXdsCache returns an instance of a cache.
func NewXdsCache() XdsCache {
cache := XdsCacheImpl{
eds: newTypedXdsCache[uint64](),
}
if features.EnableCDSCaching {
cache.cds = newTypedXdsCache[uint64]()
} else {
cache.cds = disabledCache[uint64]{}
}
if features.EnableRDSCaching {
cache.rds = newTypedXdsCache[uint64]()
} else {
cache.rds = disabledCache[uint64]{}
}
cache.sds = newTypedXdsCache[string]()
return cache
}
func (x XdsCacheImpl) Run(stop <-chan struct{}) {
interval := features.XDSCacheIndexClearInterval
go func() {
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
x.cds.Flush()
x.eds.Flush()
x.rds.Flush()
x.sds.Flush()
case <-stop:
return
}
}
}()
}
func (x XdsCacheImpl) Add(entry XdsCacheEntry, pushRequest *PushRequest, value *discovery.Resource) {
if !entry.Cacheable() {
return
}
k := entry.Key()
switch entry.Type() {
case CDSType:
key := k.(uint64)
x.cds.Add(key, entry, pushRequest, value)
case EDSType:
key := k.(uint64)
x.eds.Add(key, entry, pushRequest, value)
case SDSType:
key := k.(string)
x.sds.Add(key, entry, pushRequest, value)
case RDSType:
key := k.(uint64)
x.rds.Add(key, entry, pushRequest, value)
default:
log.Errorf("unknown type %s", entry.Type())
}
}
func (x XdsCacheImpl) Get(entry XdsCacheEntry) *discovery.Resource {
if !entry.Cacheable() {
return nil
}
k := entry.Key()
switch entry.Type() {
case CDSType:
key := k.(uint64)
return x.cds.Get(key)
case EDSType:
key := k.(uint64)
return x.eds.Get(key)
case SDSType:
key := k.(string)
return x.sds.Get(key)
case RDSType:
key := k.(uint64)
return x.rds.Get(key)
default:
log.Errorf("unknown type %s", entry.Type())
return nil
}
}
func (x XdsCacheImpl) Clear(s sets.Set[ConfigKey]) {
x.cds.Clear(s)
// clear all EDS cache for PA change
if HasConfigsOfKind(s, kind.PeerAuthentication) {
x.eds.ClearAll()
} else {
x.eds.Clear(s)
}
x.rds.Clear(s)
x.sds.Clear(s)
}
func (x XdsCacheImpl) ClearAll() {
x.cds.ClearAll()
x.eds.ClearAll()
x.rds.ClearAll()
x.sds.ClearAll()
}
func (x XdsCacheImpl) Keys(t string) []any {
switch t {
case CDSType:
keys := x.cds.Keys()
return convertToAnySlices(keys)
case EDSType:
keys := x.eds.Keys()
return convertToAnySlices(keys)
case SDSType:
keys := x.sds.Keys()
return convertToAnySlices(keys)
case RDSType:
keys := x.rds.Keys()
return convertToAnySlices(keys)
default:
return nil
}
}
func convertToAnySlices[K comparable](in []K) []any {
out := make([]any, len(in))
for i, k := range in {
out[i] = k
}
return out
}
func (x XdsCacheImpl) Snapshot() []*discovery.Resource {
var out []*discovery.Resource
out = append(out, x.cds.Snapshot()...)
out = append(out, x.eds.Snapshot()...)
out = append(out, x.rds.Snapshot()...)
out = append(out, x.sds.Snapshot()...)
return out
}
// DisabledCache is a cache that is always empty
type DisabledCache struct{}
func (d DisabledCache) Run(stop <-chan struct{}) {
}
func (d DisabledCache) Add(entry XdsCacheEntry, pushRequest *PushRequest, value *discovery.Resource) {
}
func (d DisabledCache) Get(entry XdsCacheEntry) *discovery.Resource {
return nil
}
func (d DisabledCache) Clear(s sets.Set[ConfigKey]) {
}
func (d DisabledCache) ClearAll() {
}
func (d DisabledCache) Keys(t string) []any {
return nil
}
func (d DisabledCache) Snapshot() []*discovery.Resource {
return nil
}
var _ XdsCache = &DisabledCache{}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apigen
import (
"strings"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/serviceregistry/serviceentry"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/log"
)
// APIGenerator supports generation of high-level API resources, similar with the MCP
// protocol. This is a replacement for MCP, using XDS (and in future UDPA) as a transport.
// Based on lessons from MCP, the protocol allows incremental updates by
// default, using the same mechanism that EDS is using, i.e. sending only changed resources
// in a push. Incremental deletes are sent as a resource with empty body.
//
// Example: networking.istio.io/v1alpha3/VirtualService
//
// TODO: we can also add a special marker in the header)
type APIGenerator struct {
// ConfigStore interface for listing istio api resources.
store model.ConfigStore
}
func NewGenerator(store model.ConfigStore) *APIGenerator {
return &APIGenerator{
store: store,
}
}
// TODO: take 'updates' into account, don't send pushes for resources that haven't changed
// TODO: support WorkloadEntry - to generate endpoints (equivalent with EDS)
// TODO: based on lessons from MCP, we want to send 'chunked' responses, like apiserver does.
// A first attempt added a 'sync' record at the end. Based on feedback and common use, a
// different approach can be used - for large responses, we can mark the last one as 'hasMore'
// by adding a field to the envelope.
// Generate implements the generate method for high level APIs, like Istio config types.
// This provides similar functionality with MCP and :8080/debug/configz.
//
// Names are based on the current resource naming in istiod stores.
func (g *APIGenerator) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
resp := model.Resources{}
// Note: this is the style used by MCP and its config. Pilot is using 'Group/Version/Kind' as the
// key, which is similar.
//
// The actual type in the Any should be a real proto - which is based on the generated package name.
// For example: type is for Any is 'type.googlepis.com/istio.networking.v1alpha3.EnvoyFilter
// We use: networking.istio.io/v1alpha3/EnvoyFilter
kind := strings.SplitN(w.TypeUrl, "/", 3)
if len(kind) != 3 {
log.Warnf("ADS: Unknown watched resources %s", w.TypeUrl)
// Still return an empty response - to not break waiting code. It is fine to not know about some resource.
return resp, model.DefaultXdsLogDetails, nil
}
// TODO: extra validation may be needed - at least logging that a resource
// of unknown type was requested. This should not be an error - maybe client asks
// for a valid CRD we just don't know about. An empty set indicates we have no such config.
rgvk := config.GroupVersionKind{
Group: kind[0],
Version: kind[1],
Kind: kind[2],
}
if w.TypeUrl == gvk.MeshConfig.String() {
resp = append(resp, &discovery.Resource{
Resource: protoconv.MessageToAny(req.Push.Mesh),
})
return resp, model.DefaultXdsLogDetails, nil
}
cfg := g.store.List(rgvk, "")
for _, c := range cfg {
// Right now model.Config is not a proto - until we change it, mcp.Resource.
// This also helps migrating MCP users.
b, err := config.PilotConfigToResource(&c)
if err != nil {
log.WithLabels("resource", c.NamespacedName()).Warnf("resource error: %v", err)
continue
}
resp = append(resp, &discovery.Resource{
Name: c.Namespace + "/" + c.Name,
Resource: protoconv.MessageToAny(b),
})
}
// TODO: MeshConfig, current dynamic ProxyConfig (for this proxy), Networks
if w.TypeUrl == gvk.ServiceEntry.String() {
// Include 'synthetic' SE - but without the endpoints. Used to generate CDS, LDS.
// EDS is pass-through.
svcs := proxy.SidecarScope.Services()
for _, s := range svcs {
// Ignore services that are result of conversion from ServiceEntry.
if s.Attributes.ServiceRegistry == provider.External {
continue
}
c := serviceentry.ServiceToServiceEntry(s, proxy)
b, err := config.PilotConfigToResource(c)
if err != nil {
log.WithLabels("resource", c.NamespacedName()).Warnf("resource error: %v", err)
continue
}
resp = append(resp, &discovery.Resource{
Name: c.Namespace + "/" + c.Name,
Resource: protoconv.MessageToAny(b),
})
}
}
return resp, model.DefaultXdsLogDetails, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"sync"
accesslog "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
cel "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/filters/cel/v3"
grpcaccesslog "github.com/envoyproxy/go-control-plane/envoy/extensions/access_loggers/grpc/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
tcp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/wellknown"
)
const (
// EnvoyServerName for istio's envoy
EnvoyServerName = "istio-envoy"
celFilter = "envoy.access_loggers.extension_filters.cel"
listenerEnvoyAccessLogFriendlyName = "listener_envoy_accesslog"
// EnvoyAccessLogCluster is the cluster name that has details for server implementing Envoy ALS.
// This cluster is created in bootstrap.
EnvoyAccessLogCluster = "envoy_accesslog_service"
)
var (
// State logged by the metadata exchange filter about the upstream and downstream service instances
// We need to propagate these as part of access log service stream
// Logging them by default on the console may be an issue as the base64 encoded string is bound to be a big one.
// But end users can certainly configure it on their own via the meshConfig using the %FILTER_STATE% macro.
envoyWasmStateToLog = []string{"wasm.upstream_peer", "wasm.upstream_peer_id", "wasm.downstream_peer", "wasm.downstream_peer_id"}
// accessLogBuilder is used to set accessLog to filters
accessLogBuilder = newAccessLogBuilder()
)
type AccessLogBuilder struct {
// tcpGrpcAccessLog is used when access log service is enabled in mesh config.
tcpGrpcAccessLog *accesslog.AccessLog
// httpGrpcAccessLog is used when access log service is enabled in mesh config.
httpGrpcAccessLog *accesslog.AccessLog
// tcpGrpcListenerAccessLog is used when access log service is enabled in mesh config.
tcpGrpcListenerAccessLog *accesslog.AccessLog
// file accessLog which is cached and reset on MeshConfig change.
mutex sync.RWMutex
fileAccesslog *accesslog.AccessLog
listenerFileAccessLog *accesslog.AccessLog
}
func newAccessLogBuilder() *AccessLogBuilder {
return &AccessLogBuilder{
tcpGrpcAccessLog: tcpGrpcAccessLog(false),
httpGrpcAccessLog: httpGrpcAccessLog(),
tcpGrpcListenerAccessLog: tcpGrpcAccessLog(true),
}
}
func (b *AccessLogBuilder) setTCPAccessLog(push *model.PushContext, proxy *model.Proxy, tcp *tcp.TcpProxy, class networking.ListenerClass) {
mesh := push.Mesh
cfgs := push.Telemetry.AccessLogging(push, proxy, class)
if len(cfgs) == 0 {
// No Telemetry API configured, fall back to legacy mesh config setting
if mesh.AccessLogFile != "" {
tcp.AccessLog = append(tcp.AccessLog, b.buildFileAccessLog(mesh))
}
if mesh.EnableEnvoyAccessLogService {
// Setting it to TCP as the low level one.
tcp.AccessLog = append(tcp.AccessLog, b.tcpGrpcAccessLog)
}
return
}
if al := buildAccessLogFromTelemetry(cfgs, false); len(al) != 0 {
tcp.AccessLog = append(tcp.AccessLog, al...)
}
}
func buildAccessLogFromTelemetry(cfgs []model.LoggingConfig, forListener bool) []*accesslog.AccessLog {
als := make([]*accesslog.AccessLog, 0, len(cfgs))
for _, c := range cfgs {
if c.Disabled {
continue
}
filters := make([]*accesslog.AccessLogFilter, 0, 2)
if forListener {
filters = append(filters, addAccessLogFilter())
}
if telFilter := buildAccessLogFilterFromTelemetry(c); telFilter != nil {
filters = append(filters, telFilter)
}
al := &accesslog.AccessLog{
Name: c.AccessLog.Name,
ConfigType: c.AccessLog.ConfigType,
Filter: buildAccessLogFilter(filters...),
}
als = append(als, al)
}
return als
}
func buildAccessLogFilterFromTelemetry(spec model.LoggingConfig) *accesslog.AccessLogFilter {
if spec.Filter == nil {
return nil
}
fl := &cel.ExpressionFilter{
Expression: spec.Filter.Expression,
}
return &accesslog.AccessLogFilter{
FilterSpecifier: &accesslog.AccessLogFilter_ExtensionFilter{
ExtensionFilter: &accesslog.ExtensionFilter{
Name: celFilter,
ConfigType: &accesslog.ExtensionFilter_TypedConfig{TypedConfig: protoconv.MessageToAny(fl)},
},
},
}
}
func (b *AccessLogBuilder) setHTTPAccessLog(push *model.PushContext, proxy *model.Proxy,
connectionManager *hcm.HttpConnectionManager, class networking.ListenerClass,
) {
mesh := push.Mesh
cfgs := push.Telemetry.AccessLogging(push, proxy, class)
if len(cfgs) == 0 {
// No Telemetry API configured, fall back to legacy mesh config setting
if mesh.AccessLogFile != "" {
connectionManager.AccessLog = append(connectionManager.AccessLog, b.buildFileAccessLog(mesh))
}
if mesh.EnableEnvoyAccessLogService {
connectionManager.AccessLog = append(connectionManager.AccessLog, b.httpGrpcAccessLog)
}
return
}
if al := buildAccessLogFromTelemetry(cfgs, false); len(al) != 0 {
connectionManager.AccessLog = append(connectionManager.AccessLog, al...)
}
}
func (b *AccessLogBuilder) setListenerAccessLog(push *model.PushContext, proxy *model.Proxy,
listener *listener.Listener, class networking.ListenerClass,
) {
mesh := push.Mesh
if mesh.DisableEnvoyListenerLog {
return
}
cfgs := push.Telemetry.AccessLogging(push, proxy, class)
if len(cfgs) == 0 {
// No Telemetry API configured, fall back to legacy mesh config setting
if mesh.AccessLogFile != "" {
listener.AccessLog = append(listener.AccessLog, b.buildListenerFileAccessLog(mesh))
}
if mesh.EnableEnvoyAccessLogService {
// Setting it to TCP as the low level one.
listener.AccessLog = append(listener.AccessLog, b.tcpGrpcListenerAccessLog)
}
return
}
if al := buildAccessLogFromTelemetry(cfgs, true); len(al) != 0 {
listener.AccessLog = append(listener.AccessLog, al...)
}
}
func (b *AccessLogBuilder) buildFileAccessLog(mesh *meshconfig.MeshConfig) *accesslog.AccessLog {
if cal := b.cachedFileAccessLog(); cal != nil {
return cal
}
// We need to build access log. This is needed either on first access or when mesh config changes.
al := model.FileAccessLogFromMeshConfig(mesh.AccessLogFile, mesh)
b.mutex.Lock()
defer b.mutex.Unlock()
b.fileAccesslog = al
return al
}
func addAccessLogFilter() *accesslog.AccessLogFilter {
return &accesslog.AccessLogFilter{
FilterSpecifier: &accesslog.AccessLogFilter_ResponseFlagFilter{
ResponseFlagFilter: &accesslog.ResponseFlagFilter{Flags: []string{"NR"}},
},
}
}
func buildAccessLogFilter(f ...*accesslog.AccessLogFilter) *accesslog.AccessLogFilter {
if len(f) == 0 {
return nil
}
if len(f) == 1 {
return f[0]
}
return &accesslog.AccessLogFilter{
FilterSpecifier: &accesslog.AccessLogFilter_AndFilter{
AndFilter: &accesslog.AndFilter{
Filters: f,
},
},
}
}
func (b *AccessLogBuilder) buildListenerFileAccessLog(mesh *meshconfig.MeshConfig) *accesslog.AccessLog {
if cal := b.cachedListenerFileAccessLog(); cal != nil {
return cal
}
// We need to build access log. This is needed either on first access or when mesh config changes.
lal := model.FileAccessLogFromMeshConfig(mesh.AccessLogFile, mesh)
// We add ResponseFlagFilter here, as we want to get listener access logs only on scenarios where we might
// not get filter Access Logs like in cases like NR to upstream.
lal.Filter = addAccessLogFilter()
b.mutex.Lock()
defer b.mutex.Unlock()
b.listenerFileAccessLog = lal
return lal
}
func (b *AccessLogBuilder) cachedFileAccessLog() *accesslog.AccessLog {
b.mutex.RLock()
defer b.mutex.RUnlock()
return b.fileAccesslog
}
func (b *AccessLogBuilder) cachedListenerFileAccessLog() *accesslog.AccessLog {
b.mutex.RLock()
defer b.mutex.RUnlock()
return b.listenerFileAccessLog
}
func tcpGrpcAccessLog(isListener bool) *accesslog.AccessLog {
accessLogFriendlyName := model.TCPEnvoyAccessLogFriendlyName
if isListener {
accessLogFriendlyName = listenerEnvoyAccessLogFriendlyName
}
fl := &grpcaccesslog.TcpGrpcAccessLogConfig{
CommonConfig: &grpcaccesslog.CommonGrpcAccessLogConfig{
LogName: accessLogFriendlyName,
GrpcService: &core.GrpcService{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{
ClusterName: EnvoyAccessLogCluster,
},
},
},
TransportApiVersion: core.ApiVersion_V3,
FilterStateObjectsToLog: envoyWasmStateToLog,
},
}
var filter *accesslog.AccessLogFilter
if isListener {
filter = addAccessLogFilter()
}
return &accesslog.AccessLog{
Name: model.TCPEnvoyALSName,
ConfigType: &accesslog.AccessLog_TypedConfig{TypedConfig: protoconv.MessageToAny(fl)},
Filter: filter,
}
}
func httpGrpcAccessLog() *accesslog.AccessLog {
fl := &grpcaccesslog.HttpGrpcAccessLogConfig{
CommonConfig: &grpcaccesslog.CommonGrpcAccessLogConfig{
LogName: model.HTTPEnvoyAccessLogFriendlyName,
GrpcService: &core.GrpcService{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{
ClusterName: EnvoyAccessLogCluster,
},
},
},
TransportApiVersion: core.ApiVersion_V3,
FilterStateObjectsToLog: envoyWasmStateToLog,
},
}
return &accesslog.AccessLog{
Name: wellknown.HTTPGRPCAccessLog,
ConfigType: &accesslog.AccessLog_TypedConfig{TypedConfig: protoconv.MessageToAny(fl)},
}
}
func (b *AccessLogBuilder) reset() {
b.mutex.Lock()
b.fileAccesslog = nil
b.listenerFileAccessLog = nil
b.mutex.Unlock()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"fmt"
"net"
"strconv"
"strings"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"google.golang.org/protobuf/types/known/structpb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/envoyfilter"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pilot/pkg/xds/endpoints"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/slices"
netutil "istio.io/istio/pkg/util/net"
"istio.io/istio/pkg/util/sets"
)
// deltaConfigTypes are used to detect changes and trigger delta calculations. When config updates has ONLY entries
// in this map, then delta calculation is triggered.
var deltaConfigTypes = sets.New(kind.ServiceEntry.String(), kind.DestinationRule.String())
// BuildClusters returns the list of clusters for the given proxy. This is the CDS output
// For outbound: Cluster for each service/subset hostname or cidr with SNI set to service hostname
// Cluster type based on resolution
// For inbound (sidecar only): Cluster for each inbound endpoint port and for each service port
func (configgen *ConfigGeneratorImpl) BuildClusters(proxy *model.Proxy, req *model.PushRequest) ([]*discovery.Resource, model.XdsLogDetails) {
// In Sotw, we care about all services.
var services []*model.Service
if features.FilterGatewayClusterConfig && proxy.Type == model.Router {
services = req.Push.GatewayServices(proxy)
} else {
services = proxy.SidecarScope.Services()
}
return configgen.buildClusters(proxy, req, services)
}
// BuildDeltaClusters generates the deltas (add and delete) for a given proxy. Currently, only service changes are reflected with deltas.
// Otherwise, we fall back onto generating everything.
func (configgen *ConfigGeneratorImpl) BuildDeltaClusters(proxy *model.Proxy, updates *model.PushRequest,
watched *model.WatchedResource,
) ([]*discovery.Resource, []string, model.XdsLogDetails, bool) {
// if we can't use delta, fall back to generate all
if !shouldUseDelta(updates) {
cl, lg := configgen.BuildClusters(proxy, updates)
return cl, nil, lg, false
}
var deletedClusters []string
var services []*model.Service
// Holds clusters per service, keyed by hostname.
serviceClusters := make(map[string]sets.String)
// Holds service ports, keyed by hostname.Inner map port and its cluster name.
// This is mainly used when service is updated and a port has been removed.
servicePortClusters := make(map[string]map[int]string)
// Holds subset clusters per service, keyed by hostname.
subsetClusters := make(map[string]sets.String)
for _, cluster := range watched.ResourceNames {
// WatchedResources.ResourceNames will contain the names of the clusters it is subscribed to. We can
// check with the name of our service (cluster names are in the format outbound|<port>|<subset>|<hostname>).
_, subset, svcHost, port := model.ParseSubsetKey(cluster)
if subset == "" {
sets.InsertOrNew(serviceClusters, string(svcHost), cluster)
} else {
sets.InsertOrNew(subsetClusters, string(svcHost), cluster)
}
if servicePortClusters[string(svcHost)] == nil {
servicePortClusters[string(svcHost)] = make(map[int]string)
}
servicePortClusters[string(svcHost)][port] = cluster
}
for key := range updates.ConfigsUpdated {
// deleted clusters for this config.
var deleted []string
var svcs []*model.Service
switch key.Kind {
case kind.ServiceEntry:
svcs, deleted = configgen.deltaFromServices(key, proxy, updates.Push, serviceClusters,
servicePortClusters, subsetClusters)
case kind.DestinationRule:
svcs, deleted = configgen.deltaFromDestinationRules(key, proxy, subsetClusters)
}
services = append(services, svcs...)
deletedClusters = append(deletedClusters, deleted...)
}
clusters, log := configgen.buildClusters(proxy, updates, services)
// DeletedClusters contains list of all subset clusters for the deleted DR or updated DR.
// When clusters are rebuilt, it rebuilt the subset clusters as well. So, we know what
// subset clusters are really needed. So if deleted cluster is not rebuilt, then it is really deleted.
builtClusters := sets.New[string]()
for _, c := range clusters {
builtClusters.Insert(c.Name)
}
finalDeletedClusters := slices.FilterInPlace(deletedClusters, func(cluster string) bool {
return !builtClusters.Contains(cluster)
})
return clusters, finalDeletedClusters, log, true
}
// deltaFromServices computes the delta clusters from the updated services.
func (configgen *ConfigGeneratorImpl) deltaFromServices(key model.ConfigKey, proxy *model.Proxy, push *model.PushContext,
serviceClusters map[string]sets.String, servicePortClusters map[string]map[int]string, subsetClusters map[string]sets.String,
) ([]*model.Service, []string) {
var deletedClusters []string
var services []*model.Service
service := push.ServiceForHostname(proxy, host.Name(key.Name))
// push.ServiceForHostname will return nil if the proxy doesn't care about the service OR it was deleted.
// we can cross-reference with WatchedResources to figure out which services were deleted.
if service == nil {
// We assume a service was deleted and delete all clusters for that service.
deletedClusters = append(deletedClusters, serviceClusters[key.Name].UnsortedList()...)
deletedClusters = append(deletedClusters, subsetClusters[key.Name].UnsortedList()...)
} else {
// Service exists. If the service update has port change, we need to the corresponding port clusters.
services = append(services, service)
for port, cluster := range servicePortClusters[service.Hostname.String()] {
// if this service port is removed, we can conclude that it is a removed cluster.
if _, exists := service.Ports.GetByPort(port); !exists {
deletedClusters = append(deletedClusters, cluster)
}
}
}
return services, deletedClusters
}
// deltaFromDestinationRules computes the delta clusters from the updated destination rules.
func (configgen *ConfigGeneratorImpl) deltaFromDestinationRules(updatedDr model.ConfigKey, proxy *model.Proxy,
subsetClusters map[string]sets.String,
) ([]*model.Service, []string) {
var deletedClusters []string
var services []*model.Service
cfg := proxy.SidecarScope.DestinationRuleByName(updatedDr.Name, updatedDr.Namespace)
if cfg == nil {
// Destinationrule was deleted. Find matching services from previous destinationrule.
prevCfg := proxy.PrevSidecarScope.DestinationRuleByName(updatedDr.Name, updatedDr.Namespace)
if prevCfg == nil {
log.Debugf("Prev DestinationRule form PrevSidecarScope is missing for %s/%s", updatedDr.Namespace, updatedDr.Name)
return nil, nil
}
dr := prevCfg.Spec.(*networking.DestinationRule)
services = append(services, proxy.SidecarScope.ServicesForHostname(host.Name(dr.Host))...)
} else {
dr := cfg.Spec.(*networking.DestinationRule)
// Destinationrule was updated. Find matching services from updated destinationrule.
services = append(services, proxy.SidecarScope.ServicesForHostname(host.Name(dr.Host))...)
// Check if destination rule host is changed, if yes, then we need to add previous host matching services.
prevCfg := proxy.PrevSidecarScope.DestinationRuleByName(updatedDr.Name, updatedDr.Namespace)
if prevCfg != nil {
prevDr := prevCfg.Spec.(*networking.DestinationRule)
if dr.Host != prevDr.Host {
services = append(services, proxy.SidecarScope.ServicesForHostname(host.Name(prevDr.Host))...)
}
}
}
// Remove all matched service subsets. When we rebuild clusters, we will rebuild the subset clusters as well.
// We can reconcile the actual subsets that are needed when we rebuild the clusters.
for _, matchedSvc := range services {
if subsetClusters[matchedSvc.Hostname.String()] != nil {
deletedClusters = append(deletedClusters, subsetClusters[matchedSvc.Hostname.String()].UnsortedList()...)
}
}
return services, deletedClusters
}
// buildClusters builds clusters for the proxy with the services passed.
func (configgen *ConfigGeneratorImpl) buildClusters(proxy *model.Proxy, req *model.PushRequest,
services []*model.Service,
) ([]*discovery.Resource, model.XdsLogDetails) {
clusters := make([]*cluster.Cluster, 0)
resources := model.Resources{}
envoyFilterPatches := req.Push.EnvoyFilters(proxy)
cb := NewClusterBuilder(proxy, req, configgen.Cache)
instances := proxy.ServiceTargets
cacheStats := cacheStats{}
switch proxy.Type {
case model.SidecarProxy:
// Setup outbound clusters
outboundPatcher := clusterPatcher{efw: envoyFilterPatches, pctx: networking.EnvoyFilter_SIDECAR_OUTBOUND}
ob, cs := configgen.buildOutboundClusters(cb, proxy, outboundPatcher, services)
cacheStats = cacheStats.merge(cs)
resources = append(resources, ob...)
// Add a blackhole and passthrough cluster for catching traffic to unresolved routes
clusters = outboundPatcher.conditionallyAppend(clusters, nil, cb.buildBlackHoleCluster(), cb.buildDefaultPassthroughCluster())
clusters = append(clusters, outboundPatcher.insertedClusters()...)
// Setup inbound clusters
inboundPatcher := clusterPatcher{efw: envoyFilterPatches, pctx: networking.EnvoyFilter_SIDECAR_INBOUND}
clusters = append(clusters, configgen.buildInboundClusters(cb, proxy, instances, inboundPatcher)...)
if proxy.EnableHBONE() {
clusters = append(clusters, configgen.buildInboundHBONEClusters())
}
// Pass through clusters for inbound traffic. These cluster bind loopback-ish src address to access node local service.
clusters = inboundPatcher.conditionallyAppend(clusters, nil, cb.buildInboundPassthroughClusters()...)
clusters = append(clusters, inboundPatcher.insertedClusters()...)
case model.Waypoint:
_, wps := findWaypointResources(proxy, req.Push)
// Waypoint proxies do not need outbound clusters in most cases, unless we have a route pointing to something
outboundPatcher := clusterPatcher{efw: envoyFilterPatches, pctx: networking.EnvoyFilter_SIDECAR_OUTBOUND}
ob, cs := configgen.buildOutboundClusters(cb, proxy, outboundPatcher, filterWaypointOutboundServices(
req.Push.ServicesAttachedToMesh(), wps.services, services))
cacheStats = cacheStats.merge(cs)
resources = append(resources, ob...)
// Setup inbound clusters
inboundPatcher := clusterPatcher{efw: envoyFilterPatches, pctx: networking.EnvoyFilter_SIDECAR_INBOUND}
clusters = append(clusters, configgen.buildWaypointInboundClusters(cb, proxy, req.Push, wps.services)...)
clusters = append(clusters, inboundPatcher.insertedClusters()...)
default: // Gateways
patcher := clusterPatcher{efw: envoyFilterPatches, pctx: networking.EnvoyFilter_GATEWAY}
ob, cs := configgen.buildOutboundClusters(cb, proxy, patcher, services)
cacheStats = cacheStats.merge(cs)
resources = append(resources, ob...)
// Gateways do not require the default passthrough cluster as they do not have original dst listeners.
clusters = patcher.conditionallyAppend(clusters, nil, cb.buildBlackHoleCluster())
if proxy.Type == model.Router && proxy.MergedGateway != nil && proxy.MergedGateway.ContainsAutoPassthroughGateways {
clusters = append(clusters, configgen.buildOutboundSniDnatClusters(proxy, req, patcher)...)
}
clusters = append(clusters, patcher.insertedClusters()...)
}
// OutboundTunnel cluster is needed for sidecar and gateway.
if proxy.EnableHBONE() {
clusters = append(clusters, cb.buildConnectOriginate(proxy, req.Push, nil))
}
// if credential socket exists, create a cluster for it
if proxy.Metadata != nil && proxy.Metadata.Raw[security.CredentialMetaDataName] == "true" {
clusters = append(clusters, cb.buildExternalSDSCluster(security.CredentialNameSocketPath))
}
for _, c := range clusters {
resources = append(resources, &discovery.Resource{Name: c.Name, Resource: protoconv.MessageToAny(c)})
}
resources = cb.normalizeClusters(resources)
if cacheStats.empty() {
return resources, model.DefaultXdsLogDetails
}
return resources, model.XdsLogDetails{AdditionalInfo: fmt.Sprintf("cached:%v/%v", cacheStats.hits, cacheStats.hits+cacheStats.miss)}
}
func shouldUseDelta(updates *model.PushRequest) bool {
return updates != nil && deltaAwareConfigTypes(updates.ConfigsUpdated) && len(updates.ConfigsUpdated) > 0
}
// deltaAwareConfigTypes returns true if all updated configs are delta enabled.
func deltaAwareConfigTypes(cfgs sets.Set[model.ConfigKey]) bool {
for k := range cfgs {
if !deltaConfigTypes.Contains(k.Kind.String()) {
return false
}
}
return true
}
// buildOutboundClusters generates all outbound (including subsets) clusters for a given proxy.
func (configgen *ConfigGeneratorImpl) buildOutboundClusters(cb *ClusterBuilder, proxy *model.Proxy, cp clusterPatcher,
services []*model.Service,
) ([]*discovery.Resource, cacheStats) {
resources := make([]*discovery.Resource, 0)
efKeys := cp.efw.KeysApplyingTo(networking.EnvoyFilter_CLUSTER)
hit, miss := 0, 0
for _, service := range services {
if service.Resolution == model.Alias {
continue
}
for _, port := range service.Ports {
if port.Protocol == protocol.UDP {
continue
}
clusterKey := buildClusterKey(service, port, cb, proxy, efKeys)
cached, allFound := cb.getAllCachedSubsetClusters(clusterKey)
if allFound && !features.EnableUnsafeAssertions {
hit += len(cached)
resources = append(resources, cached...)
continue
}
miss += len(cached)
// We have a cache miss, so we will re-generate the cluster and later store it in the cache.
var lbEndpoints []*endpoint.LocalityLbEndpoints
if clusterKey.endpointBuilder != nil {
lbEndpoints = clusterKey.endpointBuilder.FromServiceEndpoints()
}
// create default cluster
discoveryType := convertResolution(cb.proxyType, service)
defaultCluster := cb.buildCluster(clusterKey.clusterName, discoveryType, lbEndpoints, model.TrafficDirectionOutbound, port, service, nil)
if defaultCluster == nil {
continue
}
// if the service uses persistent sessions, override status allows
// DRAINING endpoints to be kept as 'UNHEALTHY' coarse status in envoy.
// Will not be used for normal traffic, only when explicit override.
if service.Attributes.Labels[features.PersistentSessionLabel] != "" {
// Default is UNKNOWN, HEALTHY, DEGRADED. Without this change, Envoy will drop endpoints with any other
// status received in EDS. With this setting, the DRAINING and UNHEALTHY endpoints are kept - both marked
// as UNHEALTHY ('coarse state'), which is what will show in config dumps.
// DRAINING/UNHEALTHY will not be used normally for new requests. They will be used if cookie/header
// selects them.
defaultCluster.cluster.CommonLbConfig.OverrideHostStatus = &core.HealthStatusSet{
Statuses: []core.HealthStatus{
core.HealthStatus_HEALTHY,
core.HealthStatus_DRAINING, core.HealthStatus_UNKNOWN, core.HealthStatus_DEGRADED,
},
}
}
subsetClusters := cb.applyDestinationRule(defaultCluster, DefaultClusterMode, service, port,
clusterKey.endpointBuilder, clusterKey.destinationRule.GetRule(), clusterKey.serviceAccounts)
if patched := cp.patch(nil, defaultCluster.build()); patched != nil {
resources = append(resources, patched)
if features.EnableCDSCaching {
cb.cache.Add(&clusterKey, cb.req, patched)
}
}
for _, ss := range subsetClusters {
if patched := cp.patch(nil, ss); patched != nil {
resources = append(resources, patched)
if features.EnableCDSCaching {
nk := clusterKey
nk.clusterName = ss.Name
cb.cache.Add(&nk, cb.req, patched)
}
}
}
}
}
return resources, cacheStats{hits: hit, miss: miss}
}
type clusterPatcher struct {
efw *model.EnvoyFilterWrapper
pctx networking.EnvoyFilter_PatchContext
}
func (p clusterPatcher) patch(hosts []host.Name, c *cluster.Cluster) *discovery.Resource {
cluster := p.doPatch(hosts, c)
if cluster == nil {
return nil
}
return &discovery.Resource{Name: cluster.Name, Resource: protoconv.MessageToAny(cluster)}
}
func (p clusterPatcher) doPatch(hosts []host.Name, c *cluster.Cluster) *cluster.Cluster {
if !envoyfilter.ShouldKeepCluster(p.pctx, p.efw, c, hosts) {
return nil
}
return envoyfilter.ApplyClusterMerge(p.pctx, p.efw, c, hosts)
}
func (p clusterPatcher) conditionallyAppend(l []*cluster.Cluster, hosts []host.Name, clusters ...*cluster.Cluster) []*cluster.Cluster {
if !p.hasPatches() {
return append(l, clusters...)
}
for _, c := range clusters {
if patched := p.doPatch(hosts, c); patched != nil {
l = append(l, patched)
}
}
return l
}
func (p clusterPatcher) insertedClusters() []*cluster.Cluster {
return envoyfilter.InsertedClusters(p.pctx, p.efw)
}
func (p clusterPatcher) hasPatches() bool {
return p.efw != nil && len(p.efw.Patches[networking.EnvoyFilter_CLUSTER]) > 0
}
// SniDnat clusters do not have any TLS setting, as they simply forward traffic to upstream
// All SniDnat clusters are internal services in the mesh.
// TODO enable cache - there is no blockers here, skipped to simplify the original caching implementation
func (configgen *ConfigGeneratorImpl) buildOutboundSniDnatClusters(proxy *model.Proxy, req *model.PushRequest,
cp clusterPatcher,
) []*cluster.Cluster {
clusters := make([]*cluster.Cluster, 0)
cb := NewClusterBuilder(proxy, req, nil)
for _, service := range proxy.SidecarScope.Services() {
if service.MeshExternal {
continue
}
destRule := proxy.SidecarScope.DestinationRule(model.TrafficDirectionOutbound, proxy, service.Hostname)
for _, port := range service.Ports {
if port.Protocol == protocol.UDP {
continue
}
// create default cluster
discoveryType := convertResolution(cb.proxyType, service)
clusterName := model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "",
service.Hostname, port.Port)
var lbEndpoints []*endpoint.LocalityLbEndpoints
var endpointBuilder *endpoints.EndpointBuilder
if service.Resolution == model.DNSLB || service.Resolution == model.DNSRoundRobinLB {
endpointBuilder = endpoints.NewCDSEndpointBuilder(proxy, cb.req.Push,
clusterName, model.TrafficDirectionOutbound, "", service.Hostname, port.Port,
service, destRule,
)
lbEndpoints = endpointBuilder.FromServiceEndpoints()
}
defaultCluster := cb.buildCluster(clusterName, discoveryType, lbEndpoints, model.TrafficDirectionOutbound, port, service, nil)
if defaultCluster == nil {
continue
}
subsetClusters := cb.applyDestinationRule(defaultCluster, SniDnatClusterMode, service, port, endpointBuilder, destRule.GetRule(), nil)
clusters = cp.conditionallyAppend(clusters, nil, defaultCluster.build())
clusters = cp.conditionallyAppend(clusters, nil, subsetClusters...)
}
}
return clusters
}
func buildInboundLocalityLbEndpoints(bind string, port uint32) []*endpoint.LocalityLbEndpoints {
if bind == "" {
return nil
}
address := util.BuildAddress(bind, port)
lbEndpoint := &endpoint.LbEndpoint{
HostIdentifier: &endpoint.LbEndpoint_Endpoint{
Endpoint: &endpoint.Endpoint{
Address: address,
},
},
}
return []*endpoint.LocalityLbEndpoints{
{
LbEndpoints: []*endpoint.LbEndpoint{lbEndpoint},
},
}
}
func buildInboundClustersFromServiceInstances(cb *ClusterBuilder, proxy *model.Proxy,
instances []model.ServiceTarget, cp clusterPatcher,
enableSidecarServiceInboundListenerMerge bool,
) []*cluster.Cluster {
clusters := make([]*cluster.Cluster, 0)
_, actualLocalHosts := getWildcardsAndLocalHost(proxy.GetIPMode())
clustersToBuild := make(map[int][]model.ServiceTarget)
ingressPortListSet := sets.New[int]()
sidecarScope := proxy.SidecarScope
if enableSidecarServiceInboundListenerMerge && sidecarScope.HasIngressListener() {
ingressPortListSet = getSidecarIngressPortList(proxy)
}
for _, instance := range instances {
// For service instances with the same port,
// we still need to capture all the instances on this port, as its required to populate telemetry metadata
// The first instance will be used as the "primary" instance; this means if we have an conflicts between
// Services the first one wins
port := int(instance.Port.TargetPort)
clustersToBuild[port] = append(clustersToBuild[port], instance)
}
bind := actualLocalHosts[0]
if cb.req.Push.Mesh.GetInboundTrafficPolicy().GetMode() == meshconfig.MeshConfig_InboundTrafficPolicy_PASSTHROUGH {
bind = ""
}
// For each workload port, we will construct a cluster
for epPort, instances := range clustersToBuild {
if ingressPortListSet.Contains(int(instances[0].Port.TargetPort)) {
// here if port is declared in service and sidecar ingress both, we continue to take the one on sidecar + other service ports
// e.g. 1,2, 3 in service and 3,4 in sidecar ingress,
// this will still generate listeners for 1,2,3,4 where 3 is picked from sidecar ingress
// port present in sidecarIngress listener so let sidecar take precedence
continue
}
localCluster := cb.buildInboundCluster(epPort, bind, proxy, instances[0], instances)
// If inbound cluster match has service, we should see if it matches with any host name across all instances.
hosts := make([]host.Name, 0, len(instances))
for _, si := range instances {
hosts = append(hosts, si.Service.Hostname)
}
clusters = cp.conditionallyAppend(clusters, hosts, localCluster.build())
}
return clusters
}
func (configgen *ConfigGeneratorImpl) buildInboundClusters(cb *ClusterBuilder, proxy *model.Proxy, instances []model.ServiceTarget,
cp clusterPatcher,
) []*cluster.Cluster {
clusters := make([]*cluster.Cluster, 0)
// The inbound clusters for a node depends on whether the node has a SidecarScope with inbound listeners
// or not. If the node has a sidecarscope with ingress listeners, we only return clusters corresponding
// to those listeners i.e. clusters made out of the defaultEndpoint field.
// If the node has no sidecarScope and has interception mode set to NONE, then we should skip the inbound
// clusters, because there would be no corresponding inbound listeners
sidecarScope := proxy.SidecarScope
noneMode := proxy.GetInterceptionMode() == model.InterceptionNone
// No user supplied sidecar scope or the user supplied one has no ingress listeners
if !sidecarScope.HasIngressListener() {
// We should not create inbound listeners in NONE mode based on the service instances
// Doing so will prevent the workloads from starting as they would be listening on the same port
// Users are required to provide the sidecar config to define the inbound listeners
if noneMode {
return nil
}
clusters = buildInboundClustersFromServiceInstances(cb, proxy, instances, cp, false)
return clusters
}
if features.EnableSidecarServiceInboundListenerMerge {
// only allow to merge inbound listeners if sidecar has ingress listener and pilot has env EnableSidecarServiceInboundListenerMerge set
clusters = buildInboundClustersFromServiceInstances(cb, proxy, instances, cp, true)
}
clusters = append(clusters, buildInboundClustersFromSidecar(cb, proxy, instances, cp)...)
return clusters
}
func buildInboundClustersFromSidecar(cb *ClusterBuilder, proxy *model.Proxy,
instances []model.ServiceTarget, cp clusterPatcher,
) []*cluster.Cluster {
clusters := make([]*cluster.Cluster, 0)
_, actualLocalHosts := getWildcardsAndLocalHost(proxy.GetIPMode())
sidecarScope := proxy.SidecarScope
for _, ingressListener := range sidecarScope.Sidecar.Ingress {
// LDS would have setup the inbound clusters
// as inbound|portNumber|portName|Hostname[or]SidecarScopeID
listenPort := &model.Port{
Port: int(ingressListener.Port.Number),
Protocol: protocol.Parse(ingressListener.Port.Protocol),
Name: ingressListener.Port.Name,
}
// Set up the endpoint. By default, we set this empty which will use ORIGINAL_DST passthrough.
// This can be overridden by ingress.defaultEndpoint.
// * 127.0.0.1: send to localhost
// * 0.0.0.0: send to INSTANCE_IP
// * unix:///...: send to configured unix domain socket
endpointAddress := ""
port := 0
if strings.HasPrefix(ingressListener.DefaultEndpoint, model.UnixAddressPrefix) {
// this is a UDS endpoint. assign it as is
endpointAddress = ingressListener.DefaultEndpoint
} else if len(ingressListener.DefaultEndpoint) > 0 {
// parse the ip, port. Validation guarantees presence of :
hostIP, hostPort, hostErr := net.SplitHostPort(ingressListener.DefaultEndpoint)
if hostPort == "" || hostErr != nil {
continue
}
var err error
if port, err = strconv.Atoi(hostPort); err != nil {
continue
}
if hostIP == model.PodIPAddressPrefix {
for _, proxyIPAddr := range cb.proxyIPAddresses {
if netutil.IsIPv4Address(proxyIPAddr) {
endpointAddress = proxyIPAddr
break
}
}
// if there is no any IPv4 address in proxyIPAddresses
if endpointAddress == "" {
endpointAddress = model.LocalhostAddressPrefix
}
} else if hostIP == model.PodIPv6AddressPrefix {
for _, proxyIPAddr := range cb.proxyIPAddresses {
if netutil.IsIPv6Address(proxyIPAddr) {
endpointAddress = proxyIPAddr
break
}
}
// if there is no any IPv6 address in proxyIPAddresses
if endpointAddress == "" {
endpointAddress = model.LocalhostIPv6AddressPrefix
}
} else if hostIP == model.LocalhostAddressPrefix {
// prefer 127.0.0.1 to ::1, but if given no option choose ::1
ipV6EndpointAddress := ""
for _, host := range actualLocalHosts {
if netutil.IsIPv4Address(host) {
endpointAddress = host
break
}
if netutil.IsIPv6Address(host) {
ipV6EndpointAddress = host
}
}
if endpointAddress == "" {
endpointAddress = ipV6EndpointAddress
}
} else if hostIP == model.LocalhostIPv6AddressPrefix {
// prefer ::1 to 127.0.0.1, but if given no option choose 127.0.0.1
ipV4EndpointAddress := ""
for _, host := range actualLocalHosts {
if netutil.IsIPv6Address(host) {
endpointAddress = host
break
}
if netutil.IsIPv4Address(host) {
ipV4EndpointAddress = host
}
}
if endpointAddress == "" {
endpointAddress = ipV4EndpointAddress
}
}
}
// Find the service instance that corresponds to this ingress listener by looking
// for a service instance that matches this ingress port as this will allow us
// to generate the right cluster name that LDS expects inbound|portNumber|portName|Hostname
svc := findOrCreateService(instances, ingressListener, sidecarScope.Name, sidecarScope.Namespace)
endpoint := model.ServiceTarget{
Service: svc,
Port: model.ServiceInstancePort{
ServicePort: listenPort,
TargetPort: uint32(port),
},
}
localCluster := cb.buildInboundCluster(int(ingressListener.Port.Number), endpointAddress, proxy, endpoint, nil)
clusters = cp.conditionallyAppend(clusters, []host.Name{endpoint.Service.Hostname}, localCluster.build())
}
return clusters
}
func findOrCreateService(instances []model.ServiceTarget,
ingressListener *networking.IstioIngressListener, sidecar string, sidecarns string,
) *model.Service {
for _, realInstance := range instances {
if realInstance.Port.TargetPort == ingressListener.Port.Number {
return realInstance.Service
}
}
// We didn't find a matching instance. Create a dummy one because we need the right
// params to generate the right cluster name i.e. inbound|portNumber|portName|SidecarScopeID - which is uniformly generated by LDS/CDS.
return &model.Service{
Hostname: host.Name(sidecar + "." + sidecarns),
Attributes: model.ServiceAttributes{
Name: sidecar,
// This will ensure that the right AuthN policies are selected
Namespace: sidecarns,
},
}
}
func convertResolution(proxyType model.NodeType, service *model.Service) cluster.Cluster_DiscoveryType {
switch service.Resolution {
case model.ClientSideLB:
return cluster.Cluster_EDS
case model.DNSLB:
return cluster.Cluster_STRICT_DNS
case model.DNSRoundRobinLB:
return cluster.Cluster_LOGICAL_DNS
case model.Passthrough:
// Gateways cannot use passthrough clusters. So fallback to EDS
if proxyType == model.Router {
return cluster.Cluster_EDS
}
if service.Attributes.ServiceRegistry == provider.Kubernetes && features.EnableEDSForHeadless {
return cluster.Cluster_EDS
}
return cluster.Cluster_ORIGINAL_DST
default:
return cluster.Cluster_EDS
}
}
// ClusterMode defines whether the cluster is being built for SNI-DNATing (sni passthrough) or not
type ClusterMode string
const (
// SniDnatClusterMode indicates cluster is being built for SNI dnat mode
SniDnatClusterMode ClusterMode = "sni-dnat"
// DefaultClusterMode indicates usual cluster with mTLS et al
DefaultClusterMode ClusterMode = "outbound"
)
type buildClusterOpts struct {
mesh *meshconfig.MeshConfig
mutable *clusterWrapper
policy *networking.TrafficPolicy
port *model.Port
serviceAccounts []string
serviceTargets []model.ServiceTarget
// Used for traffic across multiple network clusters
// the east-west gateway in a remote cluster will use this value to route
// traffic to the appropriate service
istioMtlsSni string
clusterMode ClusterMode
direction model.TrafficDirection
meshExternal bool
serviceMTLSMode model.MutualTLSMode
// Indicates the service registry of the cluster being built.
serviceRegistry provider.ID
// Indicates if the destinationRule has a workloadSelector
isDrWithSelector bool
}
func applyTCPKeepalive(mesh *meshconfig.MeshConfig, c *cluster.Cluster, tcp *networking.ConnectionPoolSettings_TCPSettings) {
// Apply mesh wide TCP keepalive if available.
setKeepAliveSettings(c, mesh.TcpKeepalive)
// Apply/Override individual attributes with DestinationRule TCP keepalive if set.
if tcp != nil {
setKeepAliveSettings(c, tcp.TcpKeepalive)
}
}
func setKeepAliveSettings(c *cluster.Cluster, keepalive *networking.ConnectionPoolSettings_TCPSettings_TcpKeepalive) {
if keepalive == nil {
return
}
// Start with empty tcp_keepalive, which would set SO_KEEPALIVE on the socket with OS default values.
if c.UpstreamConnectionOptions == nil {
c.UpstreamConnectionOptions = &cluster.UpstreamConnectionOptions{
TcpKeepalive: &core.TcpKeepalive{},
}
}
if keepalive.Probes > 0 {
c.UpstreamConnectionOptions.TcpKeepalive.KeepaliveProbes = &wrappers.UInt32Value{Value: keepalive.Probes}
}
if keepalive.Time != nil {
c.UpstreamConnectionOptions.TcpKeepalive.KeepaliveTime = &wrappers.UInt32Value{Value: uint32(keepalive.Time.Seconds)}
}
if keepalive.Interval != nil {
c.UpstreamConnectionOptions.TcpKeepalive.KeepaliveInterval = &wrappers.UInt32Value{Value: uint32(keepalive.Interval.Seconds)}
}
}
// Build a struct which contains service metadata and will be added into cluster label.
func buildServiceMetadata(svc *model.Service) *structpb.Value {
return &structpb.Value{
Kind: &structpb.Value_StructValue{
StructValue: &structpb.Struct{
Fields: map[string]*structpb.Value{
// service fqdn
"host": {
Kind: &structpb.Value_StringValue{
StringValue: string(svc.Hostname),
},
},
// short name of the service
"name": {
Kind: &structpb.Value_StringValue{
StringValue: svc.Attributes.Name,
},
},
// namespace of the service
"namespace": {
Kind: &structpb.Value_StringValue{
StringValue: svc.Attributes.Namespace,
},
},
},
},
},
}
}
func getOrCreateIstioMetadata(cluster *cluster.Cluster) *structpb.Struct {
if cluster.Metadata == nil {
cluster.Metadata = &core.Metadata{
FilterMetadata: map[string]*structpb.Struct{},
}
}
// Create Istio metadata if does not exist yet
if _, ok := cluster.Metadata.FilterMetadata[util.IstioMetadataKey]; !ok {
cluster.Metadata.FilterMetadata[util.IstioMetadataKey] = &structpb.Struct{
Fields: map[string]*structpb.Value{},
}
}
return cluster.Metadata.FilterMetadata[util.IstioMetadataKey]
}
// Copyright Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"fmt"
"time"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
http "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"google.golang.org/protobuf/proto"
anypb "google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/structpb"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/telemetry"
"istio.io/istio/pilot/pkg/networking/util"
networkutil "istio.io/istio/pilot/pkg/util/network"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pilot/pkg/xds/endpoints"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/util/sets"
)
// passthroughHttpProtocolOptions are http protocol options used for pass through clusters.
// nolint
// revive:disable-next-line
var passthroughHttpProtocolOptions = protoconv.MessageToAny(&http.HttpProtocolOptions{
CommonHttpProtocolOptions: &core.HttpProtocolOptions{
IdleTimeout: durationpb.New(5 * time.Minute),
},
UpstreamProtocolOptions: &http.HttpProtocolOptions_UseDownstreamProtocolConfig{
UseDownstreamProtocolConfig: &http.HttpProtocolOptions_UseDownstreamHttpConfig{
HttpProtocolOptions: &core.Http1ProtocolOptions{},
Http2ProtocolOptions: http2ProtocolOptions(),
},
},
})
// clusterWrapper wraps Cluster object along with upstream protocol options.
type clusterWrapper struct {
cluster *cluster.Cluster
// httpProtocolOptions stores the HttpProtocolOptions which will be marshaled when build is called.
httpProtocolOptions *http.HttpProtocolOptions
}
// metadataCerts hosts client certificate related metadata specified in proxy metadata.
type metadataCerts struct {
// tlsClientCertChain is the absolute path to client cert-chain file
tlsClientCertChain string
// tlsClientKey is the absolute path to client private key file
tlsClientKey string
// tlsClientRootCert is the absolute path to client root cert file
tlsClientRootCert string
}
// ClusterBuilder interface provides an abstraction for building Envoy Clusters.
type ClusterBuilder struct {
// Proxy related information used to build clusters.
serviceTargets []model.ServiceTarget // Service targets of Proxy.
metadataCerts *metadataCerts // Client certificates specified in metadata.
clusterID string // Cluster in which proxy is running.
proxyID string // Identifier that uniquely identifies a proxy.
proxyVersion string // Version of Proxy.
proxyType model.NodeType // Indicates whether the proxy is sidecar or gateway.
sidecarScope *model.SidecarScope // Computed sidecar for the proxy.
passThroughBindIPs []string // Passthrough IPs to be used while building clusters.
supportsIPv4 bool // Whether Proxy IPs has IPv4 address.
supportsIPv6 bool // Whether Proxy IPs has IPv6 address.
hbone bool // Does the proxy support HBONE
locality *core.Locality // Locality information of proxy.
proxyLabels map[string]string // Proxy labels.
proxyView model.ProxyView // Proxy view of endpoints.
proxyIPAddresses []string // IP addresses on which proxy is listening on.
configNamespace string // Proxy config namespace.
// PushRequest to look for updates.
req *model.PushRequest
cache model.XdsCache
credentialSocketExist bool
}
// NewClusterBuilder builds an instance of ClusterBuilder.
func NewClusterBuilder(proxy *model.Proxy, req *model.PushRequest, cache model.XdsCache) *ClusterBuilder {
cb := &ClusterBuilder{
serviceTargets: proxy.ServiceTargets,
proxyID: proxy.ID,
proxyType: proxy.Type,
proxyVersion: proxy.Metadata.IstioVersion,
sidecarScope: proxy.SidecarScope,
passThroughBindIPs: getPassthroughBindIPs(proxy.GetIPMode()),
supportsIPv4: proxy.SupportsIPv4(),
supportsIPv6: proxy.SupportsIPv6(),
hbone: proxy.EnableHBONE() || proxy.IsWaypointProxy(),
locality: proxy.Locality,
proxyLabels: proxy.Labels,
proxyView: proxy.GetView(),
proxyIPAddresses: proxy.IPAddresses,
configNamespace: proxy.ConfigNamespace,
req: req,
cache: cache,
}
if proxy.Metadata != nil {
if proxy.Metadata.TLSClientCertChain != "" {
cb.metadataCerts = &metadataCerts{
tlsClientCertChain: proxy.Metadata.TLSClientCertChain,
tlsClientKey: proxy.Metadata.TLSClientKey,
tlsClientRootCert: proxy.Metadata.TLSClientRootCert,
}
}
cb.clusterID = string(proxy.Metadata.ClusterID)
if proxy.Metadata.Raw[security.CredentialMetaDataName] == "true" {
cb.credentialSocketExist = true
}
}
return cb
}
func (m *metadataCerts) String() string {
return m.tlsClientCertChain + "~" + m.tlsClientKey + "~" + m.tlsClientRootCert
}
// newClusterWrapper initializes clusterWrapper with the cluster passed.
func newClusterWrapper(cluster *cluster.Cluster) *clusterWrapper {
return &clusterWrapper{
cluster: cluster,
}
}
// sidecarProxy returns true if the clusters are being built for sidecar proxy otherwise false.
func (cb *ClusterBuilder) sidecarProxy() bool {
return cb.proxyType == model.SidecarProxy
}
func (cb *ClusterBuilder) buildSubsetCluster(
opts buildClusterOpts, destRule *config.Config, subset *networking.Subset, service *model.Service,
endpointBuilder *endpoints.EndpointBuilder,
) *cluster.Cluster {
opts.serviceMTLSMode = cb.req.Push.BestEffortInferServiceMTLSMode(subset.GetTrafficPolicy(), service, opts.port)
var subsetClusterName string
var defaultSni string
if opts.clusterMode == DefaultClusterMode {
subsetClusterName = model.BuildSubsetKey(model.TrafficDirectionOutbound, subset.Name, service.Hostname, opts.port.Port)
defaultSni = model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, subset.Name, service.Hostname, opts.port.Port)
} else {
subsetClusterName = model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, subset.Name, service.Hostname, opts.port.Port)
}
// clusters with discovery type STATIC, STRICT_DNS rely on cluster.LoadAssignment field.
// ServiceEntry's need to filter hosts based on subset.labels in order to perform weighted routing
var lbEndpoints []*endpoint.LocalityLbEndpoints
isPassthrough := subset.GetTrafficPolicy().GetLoadBalancer().GetSimple() == networking.LoadBalancerSettings_PASSTHROUGH
clusterType := opts.mutable.cluster.GetType()
if isPassthrough {
clusterType = cluster.Cluster_ORIGINAL_DST
}
if !(isPassthrough || clusterType == cluster.Cluster_EDS) {
lbEndpoints = endpointBuilder.WithSubset(subset.Name).FromServiceEndpoints()
}
subsetCluster := cb.buildCluster(subsetClusterName, clusterType, lbEndpoints, model.TrafficDirectionOutbound, opts.port, service, nil)
if subsetCluster == nil {
return nil
}
// Apply traffic policy for subset cluster with the destination rule traffic policy.
opts.mutable = subsetCluster
opts.istioMtlsSni = defaultSni
// If subset has a traffic policy, apply it so that it overrides the destination rule traffic policy.
opts.policy = util.MergeSubsetTrafficPolicy(opts.policy, subset.TrafficPolicy, opts.port)
if destRule != nil {
destinationRule := CastDestinationRule(destRule)
opts.isDrWithSelector = destinationRule.GetWorkloadSelector() != nil
}
// Apply traffic policy for the subset cluster.
cb.applyTrafficPolicy(opts)
maybeApplyEdsConfig(subsetCluster.cluster)
cb.applyMetadataExchange(opts.mutable.cluster)
// Add the DestinationRule+subsets metadata. Metadata here is generated on a per-cluster
// basis in buildCluster, so we can just insert without a copy.
subsetCluster.cluster.Metadata = util.AddConfigInfoMetadata(subsetCluster.cluster.Metadata, destRule.Meta)
util.AddSubsetToMetadata(subsetCluster.cluster.Metadata, subset.Name)
subsetCluster.cluster.Metadata = util.AddALPNOverrideToMetadata(subsetCluster.cluster.Metadata, opts.policy.GetTls().GetMode())
return subsetCluster.build()
}
// applyDestinationRule applies the destination rule if it exists for the Service.
// It returns the subset clusters if any created as it applies the destination rule.
func (cb *ClusterBuilder) applyDestinationRule(mc *clusterWrapper, clusterMode ClusterMode, service *model.Service,
port *model.Port, eb *endpoints.EndpointBuilder, destRule *config.Config, serviceAccounts []string,
) []*cluster.Cluster {
destinationRule := CastDestinationRule(destRule)
// merge applicable port level traffic policy settings
trafficPolicy, _ := util.GetPortLevelTrafficPolicy(destinationRule.GetTrafficPolicy(), port)
opts := buildClusterOpts{
mesh: cb.req.Push.Mesh,
serviceTargets: cb.serviceTargets,
mutable: mc,
policy: trafficPolicy,
port: port,
clusterMode: clusterMode,
direction: model.TrafficDirectionOutbound,
}
if clusterMode == DefaultClusterMode {
opts.serviceAccounts = serviceAccounts
opts.istioMtlsSni = model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port.Port)
opts.meshExternal = service.MeshExternal
opts.serviceRegistry = service.Attributes.ServiceRegistry
opts.serviceMTLSMode = cb.req.Push.BestEffortInferServiceMTLSMode(destinationRule.GetTrafficPolicy(), service, port)
}
if destRule != nil {
opts.isDrWithSelector = destinationRule.GetWorkloadSelector() != nil
}
// Apply traffic policy for the main default cluster.
cb.applyTrafficPolicy(opts)
// Apply EdsConfig if needed. This should be called after traffic policy is applied because, traffic policy might change
// discovery type.
maybeApplyEdsConfig(mc.cluster)
cb.applyMetadataExchange(opts.mutable.cluster)
if service.MeshExternal {
im := getOrCreateIstioMetadata(mc.cluster)
im.Fields["external"] = &structpb.Value{
Kind: &structpb.Value_BoolValue{
BoolValue: true,
},
}
}
if destRule != nil {
mc.cluster.Metadata = util.AddConfigInfoMetadata(mc.cluster.Metadata, destRule.Meta)
mc.cluster.Metadata = util.AddALPNOverrideToMetadata(mc.cluster.Metadata, opts.policy.GetTls().GetMode())
}
subsetClusters := make([]*cluster.Cluster, 0)
for _, subset := range destinationRule.GetSubsets() {
subsetCluster := cb.buildSubsetCluster(opts, destRule, subset, service, eb)
if subsetCluster != nil {
subsetClusters = append(subsetClusters, subsetCluster)
}
}
return subsetClusters
}
func (cb *ClusterBuilder) applyMetadataExchange(c *cluster.Cluster) {
if features.MetadataExchange {
c.Filters = append(c.Filters, xdsfilters.TCPClusterMx)
}
}
// buildCluster builds the default cluster and also applies global options.
// It is used for building both inbound and outbound cluster.
func (cb *ClusterBuilder) buildCluster(name string, discoveryType cluster.Cluster_DiscoveryType,
localityLbEndpoints []*endpoint.LocalityLbEndpoints, direction model.TrafficDirection,
port *model.Port, service *model.Service, inboundServices []model.ServiceTarget,
) *clusterWrapper {
c := &cluster.Cluster{
Name: name,
ClusterDiscoveryType: &cluster.Cluster_Type{Type: discoveryType},
CommonLbConfig: &cluster.Cluster_CommonLbConfig{},
}
switch discoveryType {
case cluster.Cluster_STRICT_DNS, cluster.Cluster_LOGICAL_DNS:
if networkutil.AllIPv4(cb.proxyIPAddresses) {
// IPv4 only
c.DnsLookupFamily = cluster.Cluster_V4_ONLY
} else if networkutil.AllIPv6(cb.proxyIPAddresses) {
// IPv6 only
c.DnsLookupFamily = cluster.Cluster_V6_ONLY
} else {
// Dual Stack
if features.EnableDualStack {
// using Cluster_ALL to enable Happy Eyeballsfor upstream connections
c.DnsLookupFamily = cluster.Cluster_ALL
} else {
// keep the original logic if Dual Stack is disable
c.DnsLookupFamily = cluster.Cluster_V4_ONLY
}
}
c.DnsRefreshRate = cb.req.Push.Mesh.DnsRefreshRate
c.RespectDnsTtl = true
fallthrough
case cluster.Cluster_STATIC:
if len(localityLbEndpoints) == 0 {
log.Debugf("locality endpoints missing for cluster %s", c.Name)
cb.req.Push.AddMetric(model.DNSNoEndpointClusters, c.Name, cb.proxyID,
fmt.Sprintf("%s cluster without endpoints %s found while pushing CDS", discoveryType.String(), c.Name))
return nil
}
c.LoadAssignment = &endpoint.ClusterLoadAssignment{
ClusterName: name,
Endpoints: localityLbEndpoints,
}
}
ec := newClusterWrapper(c)
cb.setUpstreamProtocol(ec, port)
addTelemetryMetadata(c, port, service, direction, inboundServices)
if direction == model.TrafficDirectionOutbound {
// If stat name is configured, build the alternate stats name.
if len(cb.req.Push.Mesh.OutboundClusterStatName) != 0 {
ec.cluster.AltStatName = telemetry.BuildStatPrefix(cb.req.Push.Mesh.OutboundClusterStatName,
string(service.Hostname), "", port, 0, &service.Attributes)
}
}
return ec
}
// buildInboundCluster constructs a single inbound cluster. The cluster will be bound to
// `inbound|clusterPort||`, and send traffic to <bind>:<instance.Endpoint.EndpointPort>. A workload
// will have a single inbound cluster per port. In general this works properly, with the exception of
// the Service-oriented DestinationRule, and upstream protocol selection. Our documentation currently
// requires a single protocol per port, and the DestinationRule issue is slated to move to Sidecar.
// Note: clusterPort and instance.Endpoint.EndpointPort are identical for standard Services; however,
// Sidecar.Ingress allows these to be different.
func (cb *ClusterBuilder) buildInboundCluster(clusterPort int, bind string,
proxy *model.Proxy, instance model.ServiceTarget, inboundServices []model.ServiceTarget,
) *clusterWrapper {
clusterName := model.BuildInboundSubsetKey(clusterPort)
localityLbEndpoints := buildInboundLocalityLbEndpoints(bind, instance.Port.TargetPort)
clusterType := cluster.Cluster_ORIGINAL_DST
if len(localityLbEndpoints) > 0 {
clusterType = cluster.Cluster_STATIC
}
localCluster := cb.buildCluster(clusterName, clusterType, localityLbEndpoints,
model.TrafficDirectionInbound, instance.Port.ServicePort, instance.Service, inboundServices)
// If stat name is configured, build the alt statname.
if len(cb.req.Push.Mesh.InboundClusterStatName) != 0 {
localCluster.cluster.AltStatName = telemetry.BuildStatPrefix(cb.req.Push.Mesh.InboundClusterStatName,
string(instance.Service.Hostname), "", instance.Port.ServicePort, clusterPort, &instance.Service.Attributes)
}
opts := buildClusterOpts{
mesh: cb.req.Push.Mesh,
mutable: localCluster,
policy: nil,
port: instance.Port.ServicePort,
serviceAccounts: nil,
serviceTargets: cb.serviceTargets,
istioMtlsSni: "",
clusterMode: DefaultClusterMode,
direction: model.TrafficDirectionInbound,
}
// When users specify circuit breakers, they need to be set on the receiver end
// (server side) as well as client side, so that the server has enough capacity
// (not the defaults) to handle the increased traffic volume
// TODO: This is not foolproof - if instance is part of multiple services listening on same port,
// choice of inbound cluster is arbitrary. So the connection pool settings may not apply cleanly.
cfg := proxy.SidecarScope.DestinationRule(model.TrafficDirectionInbound, proxy, instance.Service.Hostname).GetRule()
if cfg != nil {
destinationRule := CastDestinationRule(cfg)
opts.isDrWithSelector = destinationRule.GetWorkloadSelector() != nil
if destinationRule.TrafficPolicy != nil {
opts.policy, _ = util.GetPortLevelTrafficPolicy(destinationRule.TrafficPolicy, instance.Port.ServicePort)
util.AddConfigInfoMetadata(localCluster.cluster.Metadata, cfg.Meta)
}
}
// If there's a connection pool set on the Sidecar then override any settings derived from the DestinationRule
// with those set by Sidecar resource. This allows the user to resolve any ambiguity, e.g. in the case that
// multiple services are listening on the same port.
if sidecarConnPool := proxy.SidecarScope.InboundConnectionPoolForPort(clusterPort); sidecarConnPool != nil {
if opts.policy == nil {
// There was no destination rule, so no inbound traffic policy; we'll create a default
opts.policy = &networking.TrafficPolicy{}
} else {
// copy policy to prevent mutating the original destinationRule trafficPolicy
opts.policy = util.ShallowcopyTrafficPolicy(opts.policy)
}
opts.policy.ConnectionPool = sidecarConnPool
}
cb.applyTrafficPolicy(opts)
if bind != LocalhostAddress && bind != LocalhostIPv6Address {
// iptables will redirect our own traffic to localhost back to us if we do not use the "magic" upstream bind
// config which will be skipped.
localCluster.cluster.UpstreamBindConfig = &core.BindConfig{
SourceAddress: &core.SocketAddress{
Address: cb.passThroughBindIPs[0],
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: uint32(0),
},
},
}
// There is a usage doc here:
// https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/core/v3/address.proto#config-core-v3-bindconfig
// to support Dual Stack via Envoy BindConfig, and below is the related issue/PR in Envoy:
// https://github.com/envoyproxy/envoy/issues/9811
// https://github.com/envoyproxy/envoy/pull/22639.
// The extra source address for UpstreamBindConfig should be added if dual stack is enabled and there is
// more than one IP for the proxy.
if features.EnableDualStack && len(cb.passThroughBindIPs) > 1 {
// add extra source addresses to cluster builder
var extraSrcAddrs []*core.ExtraSourceAddress
for _, extraBdIP := range cb.passThroughBindIPs[1:] {
extraSrcAddr := &core.ExtraSourceAddress{
Address: &core.SocketAddress{
Address: extraBdIP,
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: uint32(0),
},
},
}
extraSrcAddrs = append(extraSrcAddrs, extraSrcAddr)
}
localCluster.cluster.UpstreamBindConfig.ExtraSourceAddresses = extraSrcAddrs
}
}
return localCluster
}
// buildInboundPassthroughClusters builds passthrough clusters for inbound.
func (cb *ClusterBuilder) buildInboundPassthroughClusters() []*cluster.Cluster {
// ipv4 and ipv6 feature detection. Envoy cannot ignore a config where the ip version is not supported
clusters := make([]*cluster.Cluster, 0, 2)
if cb.supportsIPv4 {
inboundPassthroughClusterIpv4 := cb.buildDefaultPassthroughCluster()
inboundPassthroughClusterIpv4.Name = util.InboundPassthroughClusterIpv4
inboundPassthroughClusterIpv4.Filters = nil
inboundPassthroughClusterIpv4.UpstreamBindConfig = &core.BindConfig{
SourceAddress: &core.SocketAddress{
Address: InboundPassthroughBindIpv4,
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: uint32(0),
},
},
}
clusters = append(clusters, inboundPassthroughClusterIpv4)
}
if cb.supportsIPv6 {
inboundPassthroughClusterIpv6 := cb.buildDefaultPassthroughCluster()
inboundPassthroughClusterIpv6.Name = util.InboundPassthroughClusterIpv6
inboundPassthroughClusterIpv6.Filters = nil
inboundPassthroughClusterIpv6.UpstreamBindConfig = &core.BindConfig{
SourceAddress: &core.SocketAddress{
Address: InboundPassthroughBindIpv6,
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: uint32(0),
},
},
}
clusters = append(clusters, inboundPassthroughClusterIpv6)
}
return clusters
}
// generates a cluster that sends traffic to dummy localport 0
// This cluster is used to catch all traffic to unresolved destinations in virtual service
func (cb *ClusterBuilder) buildBlackHoleCluster() *cluster.Cluster {
c := &cluster.Cluster{
Name: util.BlackHoleCluster,
ClusterDiscoveryType: &cluster.Cluster_Type{Type: cluster.Cluster_STATIC},
ConnectTimeout: proto.Clone(cb.req.Push.Mesh.ConnectTimeout).(*durationpb.Duration),
LbPolicy: cluster.Cluster_ROUND_ROBIN,
}
return c
}
// generates a cluster that sends traffic to the original destination.
// This cluster is used to catch all traffic to unknown listener ports
func (cb *ClusterBuilder) buildDefaultPassthroughCluster() *cluster.Cluster {
cluster := &cluster.Cluster{
Name: util.PassthroughCluster,
ClusterDiscoveryType: &cluster.Cluster_Type{Type: cluster.Cluster_ORIGINAL_DST},
ConnectTimeout: proto.Clone(cb.req.Push.Mesh.ConnectTimeout).(*durationpb.Duration),
LbPolicy: cluster.Cluster_CLUSTER_PROVIDED,
TypedExtensionProtocolOptions: map[string]*anypb.Any{
v3.HttpProtocolOptionsType: passthroughHttpProtocolOptions,
},
}
cb.applyConnectionPool(cb.req.Push.Mesh, newClusterWrapper(cluster), &networking.ConnectionPoolSettings{})
cb.applyMetadataExchange(cluster)
return cluster
}
// setH2Options make the cluster an h2 cluster by setting http2ProtocolOptions.
func setH2Options(mc *clusterWrapper) {
if mc == nil {
return
}
if mc.httpProtocolOptions == nil {
mc.httpProtocolOptions = &http.HttpProtocolOptions{}
}
options := mc.httpProtocolOptions
if options.UpstreamHttpProtocolOptions == nil {
options.UpstreamProtocolOptions = &http.HttpProtocolOptions_ExplicitHttpConfig_{
ExplicitHttpConfig: &http.HttpProtocolOptions_ExplicitHttpConfig{
ProtocolConfig: &http.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{
Http2ProtocolOptions: http2ProtocolOptions(),
},
},
}
}
}
type mtlsContextType int
const (
userSupplied mtlsContextType = iota
autoDetected
)
func (cb *ClusterBuilder) setUseDownstreamProtocol(mc *clusterWrapper) {
if mc.httpProtocolOptions == nil {
mc.httpProtocolOptions = &http.HttpProtocolOptions{}
}
options := mc.httpProtocolOptions
options.UpstreamProtocolOptions = &http.HttpProtocolOptions_UseDownstreamProtocolConfig{
UseDownstreamProtocolConfig: &http.HttpProtocolOptions_UseDownstreamHttpConfig{
HttpProtocolOptions: &core.Http1ProtocolOptions{},
Http2ProtocolOptions: http2ProtocolOptions(),
},
}
}
func http2ProtocolOptions() *core.Http2ProtocolOptions {
return &core.Http2ProtocolOptions{}
}
// nolint
// revive:disable-next-line
func (cb *ClusterBuilder) isHttp2Cluster(mc *clusterWrapper) bool {
options := mc.httpProtocolOptions
return options != nil && options.GetExplicitHttpConfig().GetHttp2ProtocolOptions() != nil
}
// This is called after traffic policy applied
func (cb *ClusterBuilder) setUpstreamProtocol(cluster *clusterWrapper, port *model.Port) {
if port.Protocol.IsHTTP2() {
setH2Options(cluster)
return
}
// Add use_downstream_protocol for sidecar proxy only if protocol sniffing is enabled. Since
// protocol detection is disabled for gateway and use_downstream_protocol is used under protocol
// detection for cluster to select upstream connection protocol when the service port is unnamed.
// use_downstream_protocol should be disabled for gateway; while it sort of makes sense there, even
// without sniffing, a concern is that clients will do ALPN negotiation, and we always advertise
// h2. Clients would then connect with h2, while the upstream may not support it. This is not a
// concern for plaintext, but we do not have a way to distinguish https vs http here. If users of
// gateway want this behavior, they can configure UseClientProtocol explicitly.
if cb.sidecarProxy() && port.Protocol.IsUnsupported() {
// Use downstream protocol. If the incoming traffic use HTTP 1.1, the
// upstream cluster will use HTTP 1.1, if incoming traffic use HTTP2,
// the upstream cluster will use HTTP2.
cb.setUseDownstreamProtocol(cluster)
}
}
// normalizeClusters normalizes clusters to avoid duplicate clusters. This should be called
// at the end before adding the cluster to list of clusters.
func (cb *ClusterBuilder) normalizeClusters(clusters []*discovery.Resource) []*discovery.Resource {
// resolve cluster name conflicts. there can be duplicate cluster names if there are conflicting service definitions.
// for any clusters that share the same name the first cluster is kept and the others are discarded.
have := sets.String{}
out := make([]*discovery.Resource, 0, len(clusters))
for _, c := range clusters {
if !have.InsertContains(c.Name) {
out = append(out, c)
} else {
cb.req.Push.AddMetric(model.DuplicatedClusters, c.Name, cb.proxyID,
fmt.Sprintf("Duplicate cluster %s found while pushing CDS", c.Name))
}
}
return out
}
// getAllCachedSubsetClusters either fetches all cached clusters for a given key (there may be multiple due to subsets)
// and returns them along with allFound=True, or returns allFound=False indicating a cache miss. In either case,
// the cache tokens are returned to allow future writes to the cache.
// This code will only trigger a cache hit if all subset clusters are present. This simplifies the code a bit,
// as the non-subset and subset cluster generation are tightly coupled, in exchange for a likely trivial cache hit rate impact.
func (cb *ClusterBuilder) getAllCachedSubsetClusters(clusterKey clusterCache) ([]*discovery.Resource, bool) {
if !features.EnableCDSCaching {
return nil, false
}
destinationRule := CastDestinationRule(clusterKey.destinationRule.GetRule())
res := make([]*discovery.Resource, 0, 1+len(destinationRule.GetSubsets()))
cachedCluster := cb.cache.Get(&clusterKey)
allFound := cachedCluster != nil
res = append(res, cachedCluster)
dir, _, host, port := model.ParseSubsetKey(clusterKey.clusterName)
for _, ss := range destinationRule.GetSubsets() {
clusterKey.clusterName = model.BuildSubsetKey(dir, ss.Name, host, port)
cachedCluster := cb.cache.Get(&clusterKey)
if cachedCluster == nil {
allFound = false
}
res = append(res, cachedCluster)
}
return res, allFound
}
// build does any final build operations needed, like marshaling etc.
func (mc *clusterWrapper) build() *cluster.Cluster {
if mc == nil {
return nil
}
// Marshall Http Protocol options if they exist.
if mc.httpProtocolOptions != nil {
// UpstreamProtocolOptions is required field in Envoy. If we have not set this option earlier
// we need to set it to default http protocol options.
if mc.httpProtocolOptions.UpstreamProtocolOptions == nil {
mc.httpProtocolOptions.UpstreamProtocolOptions = &http.HttpProtocolOptions_ExplicitHttpConfig_{
ExplicitHttpConfig: &http.HttpProtocolOptions_ExplicitHttpConfig{
ProtocolConfig: &http.HttpProtocolOptions_ExplicitHttpConfig_HttpProtocolOptions{},
},
}
}
mc.cluster.TypedExtensionProtocolOptions = map[string]*anypb.Any{
v3.HttpProtocolOptionsType: protoconv.MessageToAny(mc.httpProtocolOptions),
}
}
return mc.cluster
}
// CastDestinationRule returns the destination rule enclosed by the config, if not null.
// Otherwise, return nil.
func CastDestinationRule(config *config.Config) *networking.DestinationRule {
if config != nil {
return config.Spec.(*networking.DestinationRule)
}
return nil
}
// maybeApplyEdsConfig applies EdsClusterConfig on the passed in cluster if it is an EDS type of cluster.
func maybeApplyEdsConfig(c *cluster.Cluster) {
if c.GetType() != cluster.Cluster_EDS {
return
}
c.EdsClusterConfig = &cluster.Cluster_EdsClusterConfig{
ServiceName: c.Name,
EdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_Ads{
Ads: &core.AggregatedConfigSource{},
},
InitialFetchTimeout: durationpb.New(0),
ResourceApiVersion: core.ApiVersion_V3,
},
}
}
// buildExternalSDSCluster generates a cluster that acts as external SDS server
func (cb *ClusterBuilder) buildExternalSDSCluster(addr string) *cluster.Cluster {
ep := &endpoint.LbEndpoint{
HostIdentifier: &endpoint.LbEndpoint_Endpoint{
Endpoint: &endpoint.Endpoint{
Address: &core.Address{
Address: &core.Address_Pipe{
Pipe: &core.Pipe{
Path: addr,
},
},
},
},
},
}
options := &http.HttpProtocolOptions{}
options.UpstreamProtocolOptions = &http.HttpProtocolOptions_ExplicitHttpConfig_{
ExplicitHttpConfig: &http.HttpProtocolOptions_ExplicitHttpConfig{
ProtocolConfig: &http.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{
Http2ProtocolOptions: http2ProtocolOptions(),
},
},
}
c := &cluster.Cluster{
Name: security.SDSExternalClusterName,
ClusterDiscoveryType: &cluster.Cluster_Type{Type: cluster.Cluster_STATIC},
ConnectTimeout: proto.Clone(cb.req.Push.Mesh.ConnectTimeout).(*durationpb.Duration),
LoadAssignment: &endpoint.ClusterLoadAssignment{
ClusterName: security.SDSExternalClusterName,
Endpoints: []*endpoint.LocalityLbEndpoints{
{
LbEndpoints: []*endpoint.LbEndpoint{ep},
},
},
},
TypedExtensionProtocolOptions: map[string]*anypb.Any{
v3.HttpProtocolOptionsType: protoconv.MessageToAny(options),
},
}
return c
}
func addTelemetryMetadata(cluster *cluster.Cluster,
port *model.Port, service *model.Service,
direction model.TrafficDirection, inboundServices []model.ServiceTarget,
) {
if !features.EnableTelemetryLabel {
return
}
if cluster == nil {
return
}
if direction == model.TrafficDirectionInbound && (len(inboundServices) == 0 || port == nil) {
// At inbound, port and local service instance has to be provided
return
}
if direction == model.TrafficDirectionOutbound && service == nil {
// At outbound, the service corresponding to the cluster has to be provided.
return
}
im := getOrCreateIstioMetadata(cluster)
// Add services field into istio metadata
im.Fields["services"] = &structpb.Value{
Kind: &structpb.Value_ListValue{
ListValue: &structpb.ListValue{
Values: []*structpb.Value{},
},
},
}
svcMetaList := im.Fields["services"].GetListValue()
// Add service related metadata. This will be consumed by telemetry v2 filter for metric labels.
if direction == model.TrafficDirectionInbound {
// For inbound cluster, add all services on the cluster port
have := sets.New[host.Name]()
for _, svc := range inboundServices {
if svc.Port.Port != port.Port {
// If the service port is different from the port of the cluster that is being built,
// skip adding telemetry metadata for the service to the cluster.
continue
}
if have.Contains(svc.Service.Hostname) {
// Skip adding metadata for instance with the same host name.
// This could happen when a service has multiple IPs.
continue
}
svcMetaList.Values = append(svcMetaList.Values, buildServiceMetadata(svc.Service))
have.Insert(svc.Service.Hostname)
}
} else if direction == model.TrafficDirectionOutbound {
// For outbound cluster, add telemetry metadata based on the service that the cluster is built for.
svcMetaList.Values = append(svcMetaList.Values, buildServiceMetadata(service))
}
}
// Copyright Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"strconv"
"strings"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/xds/endpoints"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/hash"
)
var (
Separator = []byte{'~'}
Slash = []byte{'/'}
)
// clusterCache includes the variables that can influence a Cluster Configuration.
// Implements XdsCacheEntry interface.
type clusterCache struct {
clusterName string
// proxy related cache fields
proxyVersion string // will be matched by envoyfilter patches
locality *core.Locality // identifies the locality the cluster is generated for
proxyClusterID string // identifies the kubernetes cluster a proxy is in
proxySidecar bool // identifies if this proxy is a Sidecar
hbone bool
proxyView model.ProxyView
metadataCerts *metadataCerts // metadata certificates of proxy
endpointBuilder *endpoints.EndpointBuilder
// service attributes
http2 bool // http2 identifies if the cluster is for an http2 service
downstreamAuto bool
supportsIPv4 bool
// dependent configs
service *model.Service
destinationRule *model.ConsolidatedDestRule
envoyFilterKeys []string
peerAuthVersion string // identifies the versions of all peer authentications
serviceAccounts []string // contains all the service accounts associated with the service
}
func (t *clusterCache) Type() string {
return model.CDSType
}
func (t *clusterCache) Key() any {
// nolint: gosec
// Not security sensitive code
h := hash.New()
h.WriteString(t.clusterName)
h.Write(Separator)
h.WriteString(t.proxyVersion)
h.Write(Separator)
h.WriteString(util.LocalityToString(t.locality))
h.Write(Separator)
h.WriteString(t.proxyClusterID)
h.Write(Separator)
h.WriteString(strconv.FormatBool(t.proxySidecar))
h.Write(Separator)
h.WriteString(strconv.FormatBool(t.http2))
h.Write(Separator)
h.WriteString(strconv.FormatBool(t.downstreamAuto))
h.Write(Separator)
h.WriteString(strconv.FormatBool(t.supportsIPv4))
h.Write(Separator)
h.WriteString(strconv.FormatBool(t.hbone))
h.Write(Separator)
if t.proxyView != nil {
h.WriteString(t.proxyView.String())
}
h.Write(Separator)
if t.metadataCerts != nil {
h.WriteString(t.metadataCerts.String())
}
h.Write(Separator)
if t.service != nil {
h.WriteString(string(t.service.Hostname))
h.Write(Slash)
h.WriteString(t.service.Attributes.Namespace)
}
h.Write(Separator)
for _, dr := range t.destinationRule.GetFrom() {
h.WriteString(dr.Name)
h.Write(Slash)
h.WriteString(dr.Namespace)
}
h.Write(Separator)
for _, efk := range t.envoyFilterKeys {
h.WriteString(efk)
h.Write(Separator)
}
h.Write(Separator)
h.WriteString(t.peerAuthVersion)
h.Write(Separator)
for _, sa := range t.serviceAccounts {
h.WriteString(sa)
h.Write(Separator)
}
h.Write(Separator)
if t.endpointBuilder != nil {
t.endpointBuilder.WriteHash(h)
}
return h.Sum64()
}
func (t *clusterCache) DependentConfigs() []model.ConfigHash {
drs := t.destinationRule.GetFrom()
configs := make([]model.ConfigHash, 0, len(drs)+1+len(t.envoyFilterKeys))
if t.destinationRule != nil {
for _, dr := range drs {
configs = append(configs, model.ConfigKey{Kind: kind.DestinationRule, Name: dr.Name, Namespace: dr.Namespace}.HashCode())
}
}
if t.service != nil {
configs = append(configs, model.ConfigKey{Kind: kind.ServiceEntry, Name: string(t.service.Hostname), Namespace: t.service.Attributes.Namespace}.HashCode())
}
for _, efKey := range t.envoyFilterKeys {
items := strings.Split(efKey, "/")
configs = append(configs, model.ConfigKey{Kind: kind.EnvoyFilter, Name: items[1], Namespace: items[0]}.HashCode())
}
// For now, this matches EndpointBuilder's DependentConfigs. No need to duplicate them.
return configs
}
func (t *clusterCache) Cacheable() bool {
return true
}
// cacheStats keeps track of cache usage stats.
type cacheStats struct {
hits, miss int
}
func (c cacheStats) empty() bool {
return c.hits == 0 && c.miss == 0
}
func (c cacheStats) merge(other cacheStats) cacheStats {
return cacheStats{
hits: c.hits + other.hits,
miss: c.miss + other.miss,
}
}
func buildClusterKey(service *model.Service, port *model.Port, cb *ClusterBuilder, proxy *model.Proxy, efKeys []string) clusterCache {
clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port.Port)
dr := proxy.SidecarScope.DestinationRule(model.TrafficDirectionOutbound, proxy, service.Hostname)
var eb *endpoints.EndpointBuilder
if service.Resolution == model.DNSLB || service.Resolution == model.DNSRoundRobinLB {
eb = endpoints.NewCDSEndpointBuilder(
proxy,
cb.req.Push,
clusterName,
model.TrafficDirectionOutbound, "", service.Hostname, port.Port,
service, dr,
)
}
return clusterCache{
clusterName: clusterName,
proxyVersion: cb.proxyVersion,
locality: cb.locality,
proxyClusterID: cb.clusterID,
proxySidecar: cb.sidecarProxy(),
proxyView: cb.proxyView,
hbone: cb.hbone,
http2: port.Protocol.IsHTTP2(),
downstreamAuto: cb.sidecarProxy() && port.Protocol.IsUnsupported(),
supportsIPv4: cb.supportsIPv4,
service: service,
destinationRule: dr,
envoyFilterKeys: efKeys,
metadataCerts: cb.metadataCerts,
peerAuthVersion: cb.req.Push.AuthnPolicies.GetVersion(),
serviceAccounts: cb.req.Push.ServiceAccounts(service.Hostname, service.Attributes.Namespace),
endpointBuilder: eb,
}
}
// Copyright Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"fmt"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
internalupstream "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/internal_upstream/v3"
tlsv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
http "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3"
metadata "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3"
"google.golang.org/protobuf/types/known/structpb"
"istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
sec_model "istio.io/istio/pilot/pkg/security/model"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/util/protoconv"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/wellknown"
)
var istioMtlsTransportSocketMatch = &structpb.Struct{
Fields: map[string]*structpb.Value{
model.TLSModeLabelShortname: {Kind: &structpb.Value_StringValue{StringValue: model.IstioMutualTLSModeLabel}},
},
}
var internalUpstreamSocket = &core.TransportSocket{
Name: "envoy.transport_sockets.internal_upstream",
ConfigType: &core.TransportSocket_TypedConfig{TypedConfig: protoconv.MessageToAny(&internalupstream.InternalUpstreamTransport{
PassthroughMetadata: []*internalupstream.InternalUpstreamTransport_MetadataValueSource{
{
Kind: &metadata.MetadataKind{Kind: &metadata.MetadataKind_Host_{}},
Name: util.OriginalDstMetadataKey,
},
{
Kind: &metadata.MetadataKind{Kind: &metadata.MetadataKind_Cluster_{
Cluster: &metadata.MetadataKind_Cluster{},
}},
Name: "istio",
},
{
Kind: &metadata.MetadataKind{Kind: &metadata.MetadataKind_Host_{
Host: &metadata.MetadataKind_Host{},
}},
Name: "istio",
},
},
TransportSocket: xdsfilters.RawBufferTransportSocket,
})},
}
var hboneTransportSocket = &cluster.Cluster_TransportSocketMatch{
Name: "hbone",
Match: &structpb.Struct{
Fields: map[string]*structpb.Value{
model.TunnelLabelShortName: {Kind: &structpb.Value_StringValue{StringValue: model.TunnelHTTP}},
},
},
TransportSocket: internalUpstreamSocket,
}
var hboneOrPlaintextSocket = []*cluster.Cluster_TransportSocketMatch{
hboneTransportSocket,
defaultTransportSocketMatch(),
}
// applyUpstreamTLSSettings applies upstream tls context to the cluster
func (cb *ClusterBuilder) applyUpstreamTLSSettings(
opts *buildClusterOpts,
tls *networking.ClientTLSSettings,
mtlsCtxType mtlsContextType,
) {
c := opts.mutable
tlsContext, err := cb.buildUpstreamClusterTLSContext(opts, tls)
if err != nil {
log.Errorf("failed to build Upstream TLSContext: %s", err.Error())
return
}
if tlsContext != nil {
c.cluster.TransportSocket = &core.TransportSocket{
Name: wellknown.TransportSocketTLS,
ConfigType: &core.TransportSocket_TypedConfig{TypedConfig: protoconv.MessageToAny(tlsContext)},
}
}
istioAutodetectedMtls := tls != nil && tls.Mode == networking.ClientTLSSettings_ISTIO_MUTUAL &&
mtlsCtxType == autoDetected
if cb.hbone {
cb.applyHBONETransportSocketMatches(c.cluster, tls, istioAutodetectedMtls)
} else if c.cluster.GetType() != cluster.Cluster_ORIGINAL_DST {
// For headless service, discovery type will be `Cluster_ORIGINAL_DST`
// Apply auto mtls to clusters excluding these kind of headless services.
if istioAutodetectedMtls {
// convert to transport socket matcher if the mode was auto detected
transportSocket := c.cluster.TransportSocket
c.cluster.TransportSocket = nil
c.cluster.TransportSocketMatches = []*cluster.Cluster_TransportSocketMatch{
{
Name: "tlsMode-" + model.IstioMutualTLSModeLabel,
Match: istioMtlsTransportSocketMatch,
TransportSocket: transportSocket,
},
defaultTransportSocketMatch(),
}
}
}
}
func (cb *ClusterBuilder) buildUpstreamClusterTLSContext(opts *buildClusterOpts, tls *networking.ClientTLSSettings) (*tlsv3.UpstreamTlsContext, error) {
if tls == nil {
return nil, nil
}
// Hack to avoid egress sds cluster config generation for sidecar when
// CredentialName is set in DestinationRule without a workloadSelector.
// We do not want to support CredentialName setting in non workloadSelector based DestinationRules, because
// that would result in the CredentialName being supplied to all the sidecars which the DestinationRule is scoped to,
// resulting in delayed startup of sidecars who do not have access to the credentials.
if tls.CredentialName != "" && cb.sidecarProxy() && !opts.isDrWithSelector {
if tls.Mode == networking.ClientTLSSettings_SIMPLE || tls.Mode == networking.ClientTLSSettings_MUTUAL {
return nil, nil
}
}
c := opts.mutable
var tlsContext *tlsv3.UpstreamTlsContext
switch tls.Mode {
case networking.ClientTLSSettings_DISABLE:
tlsContext = nil
case networking.ClientTLSSettings_ISTIO_MUTUAL:
tlsContext = &tlsv3.UpstreamTlsContext{
CommonTlsContext: defaultUpstreamCommonTLSContext(),
Sni: tls.Sni,
}
tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs = append(tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs,
sec_model.ConstructSdsSecretConfig(sec_model.SDSDefaultResourceName))
tlsContext.CommonTlsContext.ValidationContextType = &tlsv3.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &tlsv3.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: &tlsv3.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)},
ValidationContextSdsSecretConfig: sec_model.ConstructSdsSecretConfig(sec_model.SDSRootResourceName),
},
}
// Set default SNI of cluster name for istio_mutual if sni is not set.
if len(tlsContext.Sni) == 0 {
tlsContext.Sni = c.cluster.Name
}
// `istio-peer-exchange` alpn is only used when using mtls communication between peers.
// We add `istio-peer-exchange` to the list of alpn strings.
// The code has repeated snippets because We want to use predefined alpn strings for efficiency.
if cb.isHttp2Cluster(c) {
// This is HTTP/2 in-mesh cluster, advertise it with ALPN.
if features.MetadataExchange && !features.DisableMxALPN {
tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNInMeshH2WithMxc
} else {
tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNInMeshH2
}
} else {
// This is in-mesh cluster, advertise it with ALPN.
if features.MetadataExchange && !features.DisableMxALPN {
tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNInMeshWithMxc
} else {
tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNInMesh
}
}
case networking.ClientTLSSettings_SIMPLE:
tlsContext = &tlsv3.UpstreamTlsContext{
CommonTlsContext: defaultUpstreamCommonTLSContext(),
Sni: tls.Sni,
}
cb.setAutoSniAndAutoSanValidation(c, tls)
// Use subject alt names specified in service entry if TLS settings does not have subject alt names.
if opts.serviceRegistry == provider.External && len(tls.SubjectAltNames) == 0 {
tls = tls.DeepCopy()
tls.SubjectAltNames = opts.serviceAccounts
}
if tls.CredentialName != "" {
// If credential name is specified at Destination Rule config and originating node is egress gateway, create
// SDS config for egress gateway to fetch key/cert at gateway agent.
sec_model.ApplyCustomSDSToClientCommonTLSContext(tlsContext.CommonTlsContext, tls, cb.credentialSocketExist)
} else {
// If CredentialName is not set fallback to files specified in DR.
res := security.SdsCertificateConfig{
CaCertificatePath: tls.CaCertificates,
}
// If tls.CaCertificate or CaCertificate in Metadata isn't configured, or tls.InsecureSkipVerify is true,
// don't set up SdsSecretConfig
if !res.IsRootCertificate() || tls.GetInsecureSkipVerify().GetValue() {
tlsContext.CommonTlsContext.ValidationContextType = &tlsv3.CommonTlsContext_ValidationContext{}
} else {
tlsContext.CommonTlsContext.ValidationContextType = &tlsv3.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &tlsv3.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: &tlsv3.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)},
ValidationContextSdsSecretConfig: sec_model.ConstructSdsSecretConfig(res.GetRootResourceName()),
},
}
}
}
applyTLSDefaults(tlsContext, opts.mesh.GetTlsDefaults())
if cb.isHttp2Cluster(c) {
// This is HTTP/2 cluster, advertise it with ALPN.
tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNH2Only
}
case networking.ClientTLSSettings_MUTUAL:
tlsContext = &tlsv3.UpstreamTlsContext{
CommonTlsContext: defaultUpstreamCommonTLSContext(),
Sni: tls.Sni,
}
cb.setAutoSniAndAutoSanValidation(c, tls)
// Use subject alt names specified in service entry if TLS settings does not have subject alt names.
if opts.serviceRegistry == provider.External && len(tls.SubjectAltNames) == 0 {
tls = tls.DeepCopy()
tls.SubjectAltNames = opts.serviceAccounts
}
if tls.CredentialName != "" {
// If credential name is specified at Destination Rule config and originating node is egress gateway, create
// SDS config for egress gateway to fetch key/cert at gateway agent.
sec_model.ApplyCustomSDSToClientCommonTLSContext(tlsContext.CommonTlsContext, tls, cb.credentialSocketExist)
} else {
// If CredentialName is not set fallback to file based approach
if tls.ClientCertificate == "" || tls.PrivateKey == "" {
err := fmt.Errorf("failed to apply tls setting for %s: client certificate and private key must not be empty",
c.cluster.Name)
return nil, err
}
// These are certs being mounted from within the pod and specified in Destination Rules.
// Rather than reading directly in Envoy, which does not support rotation, we will
// serve them over SDS by reading the files.
res := security.SdsCertificateConfig{
CertificatePath: tls.ClientCertificate,
PrivateKeyPath: tls.PrivateKey,
CaCertificatePath: tls.CaCertificates,
}
tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs = append(tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs,
sec_model.ConstructSdsSecretConfig(res.GetResourceName()))
// If tls.CaCertificate or CaCertificate in Metadata isn't configured, or tls.InsecureSkipVerify is true,
// don't set up SdsSecretConfig
if !res.IsRootCertificate() || tls.GetInsecureSkipVerify().GetValue() {
tlsContext.CommonTlsContext.ValidationContextType = &tlsv3.CommonTlsContext_ValidationContext{}
} else {
tlsContext.CommonTlsContext.ValidationContextType = &tlsv3.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &tlsv3.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: &tlsv3.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)},
ValidationContextSdsSecretConfig: sec_model.ConstructSdsSecretConfig(res.GetRootResourceName()),
},
}
}
}
applyTLSDefaults(tlsContext, opts.mesh.GetTlsDefaults())
if cb.isHttp2Cluster(c) {
// This is HTTP/2 cluster, advertise it with ALPN.
tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNH2Only
}
}
return tlsContext, nil
}
// applyTLSDefaults applies tls default settings from mesh config to UpstreamTlsContext.
func applyTLSDefaults(tlsContext *tlsv3.UpstreamTlsContext, tlsDefaults *v1alpha1.MeshConfig_TLSConfig) {
if tlsDefaults == nil {
return
}
if len(tlsDefaults.EcdhCurves) > 0 {
tlsContext.CommonTlsContext.TlsParams.EcdhCurves = tlsDefaults.EcdhCurves
}
if len(tlsDefaults.CipherSuites) > 0 {
tlsContext.CommonTlsContext.TlsParams.CipherSuites = tlsDefaults.CipherSuites
}
}
// Set auto_sni if EnableAutoSni feature flag is enabled and if sni field is not explicitly set in DR.
// Set auto_san_validation if VerifyCertAtClient feature flag is enabled and if there is no explicit SubjectAltNames specified in DR.
func (cb *ClusterBuilder) setAutoSniAndAutoSanValidation(mc *clusterWrapper, tls *networking.ClientTLSSettings) {
if mc == nil || !features.EnableAutoSni {
return
}
setAutoSni := false
setAutoSanValidation := false
if len(tls.Sni) == 0 {
setAutoSni = true
}
if features.VerifyCertAtClient && setAutoSni && len(tls.SubjectAltNames) == 0 && !tls.GetInsecureSkipVerify().GetValue() {
setAutoSanValidation = true
}
if setAutoSni || setAutoSanValidation {
if mc.httpProtocolOptions == nil {
mc.httpProtocolOptions = &http.HttpProtocolOptions{}
}
if mc.httpProtocolOptions.UpstreamHttpProtocolOptions == nil {
mc.httpProtocolOptions.UpstreamHttpProtocolOptions = &core.UpstreamHttpProtocolOptions{}
}
if setAutoSni {
mc.httpProtocolOptions.UpstreamHttpProtocolOptions.AutoSni = true
}
if setAutoSanValidation {
mc.httpProtocolOptions.UpstreamHttpProtocolOptions.AutoSanValidation = true
}
}
}
func (cb *ClusterBuilder) applyHBONETransportSocketMatches(c *cluster.Cluster, tls *networking.ClientTLSSettings,
istioAutoDetectedMtls bool,
) {
if tls == nil {
c.TransportSocketMatches = hboneOrPlaintextSocket
return
}
// For headless service, discovery type will be `Cluster_ORIGINAL_DST`
// Apply auto mtls to clusters excluding these kind of headless services.
if c.GetType() != cluster.Cluster_ORIGINAL_DST {
// convert to transport socket matcher if the mode was auto detected
if istioAutoDetectedMtls {
transportSocket := c.TransportSocket
c.TransportSocket = nil
c.TransportSocketMatches = []*cluster.Cluster_TransportSocketMatch{
hboneTransportSocket,
{
Name: "tlsMode-" + model.IstioMutualTLSModeLabel,
Match: istioMtlsTransportSocketMatch,
TransportSocket: transportSocket,
},
defaultTransportSocketMatch(),
}
} else {
if c.TransportSocket == nil {
c.TransportSocketMatches = hboneOrPlaintextSocket
} else {
ts := c.TransportSocket
c.TransportSocket = nil
c.TransportSocketMatches = []*cluster.Cluster_TransportSocketMatch{
hboneTransportSocket,
{
Name: "tlsMode-" + model.IstioMutualTLSModeLabel,
TransportSocket: ts,
},
}
}
}
}
}
func defaultUpstreamCommonTLSContext() *tlsv3.CommonTlsContext {
return &tlsv3.CommonTlsContext{
TlsParams: &tlsv3.TlsParameters{
// if not specified, envoy use TLSv1_2 as default for client.
TlsMaximumProtocolVersion: tlsv3.TlsParameters_TLSv1_3,
TlsMinimumProtocolVersion: tlsv3.TlsParameters_TLSv1_2,
},
}
}
// defaultTransportSocketMatch applies to endpoints that have no security.istio.io/tlsMode label
// or those whose label value does not match "istio"
func defaultTransportSocketMatch() *cluster.Cluster_TransportSocketMatch {
return &cluster.Cluster_TransportSocketMatch{
Name: "tlsMode-disabled",
Match: &structpb.Struct{},
TransportSocket: xdsfilters.RawBufferTransportSocket,
}
}
// buildUpstreamTLSSettings fills key cert fields for all TLSSettings when the mode is `ISTIO_MUTUAL`.
// If the (input) TLS setting is nil (i.e not set), *and* the service mTLS mode is STRICT, it also
// creates and populates the config as if they are set as ISTIO_MUTUAL.
func (cb *ClusterBuilder) buildUpstreamTLSSettings(
tls *networking.ClientTLSSettings,
serviceAccounts []string,
sni string,
autoMTLSEnabled bool,
meshExternal bool,
serviceMTLSMode model.MutualTLSMode,
) (*networking.ClientTLSSettings, mtlsContextType) {
if tls != nil {
if tls.Mode == networking.ClientTLSSettings_DISABLE || tls.Mode == networking.ClientTLSSettings_SIMPLE {
return tls, userSupplied
}
// For backward compatibility, use metadata certs if provided.
if cb.hasMetadataCerts() {
// When building Mutual TLS settings, we should always use user supplied SubjectAltNames and SNI
// in destination rule. The Service Accounts and auto computed SNI should only be used for
// ISTIO_MUTUAL.
return cb.buildMutualTLS(tls.SubjectAltNames, tls.Sni), userSupplied
}
if tls.Mode != networking.ClientTLSSettings_ISTIO_MUTUAL {
return tls, userSupplied
}
// Update TLS settings for ISTIO_MUTUAL. Use client provided SNI if set. Otherwise,
// overwrite with the auto generated SNI. User specified SNIs in the istio mtls settings
// are useful when routing via gateways. Use Service Accounts if Subject Alt names
// are not specified in TLS settings.
sniToUse := tls.Sni
if len(sniToUse) == 0 {
sniToUse = sni
}
subjectAltNamesToUse := tls.SubjectAltNames
if subjectAltNamesToUse == nil {
subjectAltNamesToUse = serviceAccounts
}
return cb.buildIstioMutualTLS(subjectAltNamesToUse, sniToUse), userSupplied
}
if meshExternal || !autoMTLSEnabled || serviceMTLSMode == model.MTLSUnknown || serviceMTLSMode == model.MTLSDisable {
return nil, userSupplied
}
// For backward compatibility, use metadata certs if provided.
if cb.hasMetadataCerts() {
return cb.buildMutualTLS(serviceAccounts, sni), autoDetected
}
// Build settings for auto MTLS.
return cb.buildIstioMutualTLS(serviceAccounts, sni), autoDetected
}
func (cb *ClusterBuilder) hasMetadataCerts() bool {
return cb.metadataCerts != nil
}
// buildMutualTLS returns a `TLSSettings` for MUTUAL mode with proxy metadata certificates.
func (cb *ClusterBuilder) buildMutualTLS(serviceAccounts []string, sni string) *networking.ClientTLSSettings {
return &networking.ClientTLSSettings{
Mode: networking.ClientTLSSettings_MUTUAL,
CaCertificates: cb.metadataCerts.tlsClientRootCert,
ClientCertificate: cb.metadataCerts.tlsClientCertChain,
PrivateKey: cb.metadataCerts.tlsClientKey,
SubjectAltNames: serviceAccounts,
Sni: sni,
}
}
// buildIstioMutualTLS returns a `TLSSettings` for ISTIO_MUTUAL mode.
func (cb *ClusterBuilder) buildIstioMutualTLS(san []string, sni string) *networking.ClientTLSSettings {
return &networking.ClientTLSSettings{
Mode: networking.ClientTLSSettings_ISTIO_MUTUAL,
SubjectAltNames: san,
Sni: sni,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"math"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
proxyprotocol "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/proxy_protocol/v3"
http "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3"
xdstype "github.com/envoyproxy/go-control-plane/envoy/type/v3"
"github.com/golang/protobuf/ptypes/duration"
"github.com/golang/protobuf/ptypes/wrappers"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/wrapperspb"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/loadbalancer"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/log"
)
// applyTrafficPolicy applies the trafficPolicy defined within destinationRule,
// which can be called for both outbound and inbound cluster, but only connection pool will be applied to inbound cluster.
func (cb *ClusterBuilder) applyTrafficPolicy(opts buildClusterOpts) {
connectionPool, outlierDetection, loadBalancer, tls, proxyProtocol := selectTrafficPolicyComponents(opts.policy)
// Connection pool settings are applicable for both inbound and outbound clusters.
if connectionPool == nil {
connectionPool = &networking.ConnectionPoolSettings{}
}
cb.applyConnectionPool(opts.mesh, opts.mutable, connectionPool)
if opts.direction != model.TrafficDirectionInbound {
cb.applyH2Upgrade(opts.mutable, opts.port, opts.mesh, connectionPool)
applyOutlierDetection(opts.mutable.cluster, outlierDetection)
applyLoadBalancer(opts.mutable.cluster, loadBalancer, opts.port, cb.locality, cb.proxyLabels, opts.mesh)
if opts.clusterMode != SniDnatClusterMode {
autoMTLSEnabled := opts.mesh.GetEnableAutoMtls().Value
tls, mtlsCtxType := cb.buildUpstreamTLSSettings(tls, opts.serviceAccounts, opts.istioMtlsSni,
autoMTLSEnabled, opts.meshExternal, opts.serviceMTLSMode)
cb.applyUpstreamTLSSettings(&opts, tls, mtlsCtxType)
// donot apply proxy protocol for proxy that supports HBONE
if !cb.hbone {
cb.applyUpstreamProxyProtocol(&opts, proxyProtocol)
}
}
}
if opts.mutable.cluster.GetType() == cluster.Cluster_ORIGINAL_DST {
opts.mutable.cluster.LbPolicy = cluster.Cluster_CLUSTER_PROVIDED
}
}
// selectTrafficPolicyComponents returns the components of TrafficPolicy that should be used for given port.
func selectTrafficPolicyComponents(policy *networking.TrafficPolicy) (
*networking.ConnectionPoolSettings,
*networking.OutlierDetection,
*networking.LoadBalancerSettings,
*networking.ClientTLSSettings,
*networking.TrafficPolicy_ProxyProtocol,
) {
if policy == nil {
return nil, nil, nil, nil, nil
}
connectionPool := policy.ConnectionPool
outlierDetection := policy.OutlierDetection
loadBalancer := policy.LoadBalancer
tls := policy.Tls
proxyProtocol := policy.ProxyProtocol
// Check if CA Certificate should be System CA Certificate
if features.VerifyCertAtClient && tls != nil && tls.CaCertificates == "" {
tls.CaCertificates = "system"
}
return connectionPool, outlierDetection, loadBalancer, tls, proxyProtocol
}
// FIXME: there isn't a way to distinguish between unset values and zero values
func (cb *ClusterBuilder) applyConnectionPool(mesh *meshconfig.MeshConfig,
mc *clusterWrapper, settings *networking.ConnectionPoolSettings,
) {
if settings == nil {
return
}
threshold := getDefaultCircuitBreakerThresholds()
var idleTimeout *durationpb.Duration
var maxRequestsPerConnection uint32
var maxConcurrentStreams uint32
var maxConnectionDuration *duration.Duration
if settings.Http != nil {
if settings.Http.Http2MaxRequests > 0 {
// Envoy only applies MaxRequests in HTTP/2 clusters
threshold.MaxRequests = &wrapperspb.UInt32Value{Value: uint32(settings.Http.Http2MaxRequests)}
}
if settings.Http.Http1MaxPendingRequests > 0 {
// Envoy only applies MaxPendingRequests in HTTP/1.1 clusters
threshold.MaxPendingRequests = &wrapperspb.UInt32Value{Value: uint32(settings.Http.Http1MaxPendingRequests)}
}
// FIXME: zero is a valid value if explicitly set, otherwise we want to use the default
if settings.Http.MaxRetries > 0 {
threshold.MaxRetries = &wrapperspb.UInt32Value{Value: uint32(settings.Http.MaxRetries)}
}
idleTimeout = settings.Http.IdleTimeout
maxRequestsPerConnection = uint32(settings.Http.MaxRequestsPerConnection)
maxConcurrentStreams = uint32(settings.Http.MaxConcurrentStreams)
}
cb.applyDefaultConnectionPool(mc.cluster)
if settings.Tcp != nil {
if settings.Tcp.ConnectTimeout != nil {
mc.cluster.ConnectTimeout = settings.Tcp.ConnectTimeout
}
if settings.Tcp.MaxConnections > 0 {
threshold.MaxConnections = &wrapperspb.UInt32Value{Value: uint32(settings.Tcp.MaxConnections)}
}
if settings.Tcp.MaxConnectionDuration != nil {
maxConnectionDuration = settings.Tcp.MaxConnectionDuration
}
if idleTimeout == nil {
idleTimeout = settings.Tcp.IdleTimeout
}
}
applyTCPKeepalive(mesh, mc.cluster, settings.Tcp)
mc.cluster.CircuitBreakers = &cluster.CircuitBreakers{
Thresholds: []*cluster.CircuitBreakers_Thresholds{threshold},
}
if maxConnectionDuration != nil || idleTimeout != nil || maxRequestsPerConnection > 0 || maxConcurrentStreams > 0 {
if mc.httpProtocolOptions == nil {
mc.httpProtocolOptions = &http.HttpProtocolOptions{}
}
options := mc.httpProtocolOptions
if options.CommonHttpProtocolOptions == nil {
options.CommonHttpProtocolOptions = &core.HttpProtocolOptions{}
}
if idleTimeout != nil {
idleTimeoutDuration := idleTimeout
options.CommonHttpProtocolOptions.IdleTimeout = idleTimeoutDuration
}
if maxRequestsPerConnection > 0 {
options.CommonHttpProtocolOptions.MaxRequestsPerConnection = &wrapperspb.UInt32Value{Value: maxRequestsPerConnection}
}
if maxConnectionDuration != nil {
options.CommonHttpProtocolOptions.MaxConnectionDuration = maxConnectionDuration
}
// Check if cluster is HTTP2
http2ProtocolOptions := options.GetExplicitHttpConfig().GetHttp2ProtocolOptions()
if http2ProtocolOptions != nil && maxConcurrentStreams > 0 {
http2ProtocolOptions.MaxConcurrentStreams = &wrapperspb.UInt32Value{Value: maxConcurrentStreams}
}
}
if settings.Http != nil && settings.Http.UseClientProtocol {
// Use downstream protocol. If the incoming traffic use HTTP 1.1, the
// upstream cluster will use HTTP 1.1, if incoming traffic use HTTP2,
// the upstream cluster will use HTTP2.
cb.setUseDownstreamProtocol(mc)
}
}
// applyH2Upgrade function will upgrade cluster to http2 if specified by configuration.
// applyH2Upgrade can only be called for outbound cluster
func (cb *ClusterBuilder) applyH2Upgrade(mc *clusterWrapper, port *model.Port,
mesh *meshconfig.MeshConfig, connectionPool *networking.ConnectionPoolSettings,
) {
if shouldH2Upgrade(mc.cluster.Name, port, mesh, connectionPool) {
setH2Options(mc)
}
}
// shouldH2Upgrade function returns true if the cluster should be upgraded to http2.
// shouldH2Upgrade can only be called for outbound cluster
func shouldH2Upgrade(clusterName string, port *model.Port, mesh *meshconfig.MeshConfig,
connectionPool *networking.ConnectionPoolSettings,
) bool {
// TODO (mjog)
// Upgrade if tls.GetMode() == networking.TLSSettings_ISTIO_MUTUAL
if connectionPool != nil && connectionPool.Http != nil {
override := connectionPool.Http.H2UpgradePolicy
// If user wants an upgrade at destination rule/port level that means he is sure that
// it is a Http port - upgrade in such case. This is useful incase protocol sniffing is
// enabled and user wants to upgrade/preserve http protocol from client.
if override == networking.ConnectionPoolSettings_HTTPSettings_UPGRADE {
log.Debugf("Upgrading cluster: %v (%v %v)", clusterName, mesh.H2UpgradePolicy, override)
return true
}
if override == networking.ConnectionPoolSettings_HTTPSettings_DO_NOT_UPGRADE {
log.Debugf("Not upgrading cluster: %v (%v %v)", clusterName, mesh.H2UpgradePolicy, override)
return false
}
}
// Do not upgrade non-http ports. This also ensures that we are only upgrading
// named ports so that protocol sniffing does not interfere. Protocol sniffing
// uses downstream protocol. Therefore if the client upgrades connection to http2,
// the server will send h2 stream to the application,even though the application only
// supports http 1.1.
if port != nil && !port.Protocol.IsHTTP() {
return false
}
return mesh.H2UpgradePolicy == meshconfig.MeshConfig_UPGRADE
}
func (cb *ClusterBuilder) applyDefaultConnectionPool(cluster *cluster.Cluster) {
cluster.ConnectTimeout = proto.Clone(cb.req.Push.Mesh.ConnectTimeout).(*durationpb.Duration)
}
func applyLoadBalancer(c *cluster.Cluster, lb *networking.LoadBalancerSettings, port *model.Port,
locality *core.Locality, proxyLabels map[string]string, meshConfig *meshconfig.MeshConfig,
) {
// Disable panic threshold when SendUnhealthyEndpoints is enabled as enabling it "may" send traffic to unready
// end points when load balancer is in panic mode.
if features.SendUnhealthyEndpoints.Load() {
c.CommonLbConfig.HealthyPanicThreshold = &xdstype.Percent{Value: 0}
}
localityLbSetting := loadbalancer.GetLocalityLbSetting(meshConfig.GetLocalityLbSetting(), lb.GetLocalityLbSetting())
if localityLbSetting != nil {
c.CommonLbConfig.LocalityConfigSpecifier = &cluster.Cluster_CommonLbConfig_LocalityWeightedLbConfig_{
LocalityWeightedLbConfig: &cluster.Cluster_CommonLbConfig_LocalityWeightedLbConfig{},
}
}
// Use locality lb settings from load balancer settings if present, else use mesh wide locality lb settings
applyLocalityLBSetting(locality, proxyLabels, c, localityLbSetting)
if c.GetType() == cluster.Cluster_ORIGINAL_DST {
c.LbPolicy = cluster.Cluster_CLUSTER_PROVIDED
return
}
// Redis protocol must be defaulted with MAGLEV to benefit from client side sharding.
if features.EnableRedisFilter && port != nil && port.Protocol == protocol.Redis {
c.LbPolicy = cluster.Cluster_MAGLEV
return
}
// DO not do if else here. since lb.GetSimple returns a enum value (not pointer).
switch lb.GetSimple() {
// nolint: staticcheck
case networking.LoadBalancerSettings_LEAST_CONN, networking.LoadBalancerSettings_LEAST_REQUEST:
applyLeastRequestLoadBalancer(c, lb)
case networking.LoadBalancerSettings_RANDOM:
c.LbPolicy = cluster.Cluster_RANDOM
case networking.LoadBalancerSettings_ROUND_ROBIN:
applyRoundRobinLoadBalancer(c, lb)
case networking.LoadBalancerSettings_PASSTHROUGH:
c.LbPolicy = cluster.Cluster_CLUSTER_PROVIDED
c.ClusterDiscoveryType = &cluster.Cluster_Type{Type: cluster.Cluster_ORIGINAL_DST}
// Wipe out any LoadAssignment, if set. This can occur when we have a STATIC Service but PASSTHROUGH traffic policy
c.LoadAssignment = nil
default:
applySimpleDefaultLoadBalancer(c, lb)
}
ApplyRingHashLoadBalancer(c, lb)
}
func applyLocalityLBSetting(locality *core.Locality, proxyLabels map[string]string, cluster *cluster.Cluster,
localityLB *networking.LocalityLoadBalancerSetting,
) {
// Failover should only be applied with outlier detection, or traffic will never failover.
enabledFailover := cluster.OutlierDetection != nil
if cluster.LoadAssignment != nil {
// TODO: enable failoverPriority for `STRICT_DNS` cluster type
loadbalancer.ApplyLocalityLBSetting(cluster.LoadAssignment, nil, locality, proxyLabels, localityLB, enabledFailover)
}
}
// applySimpleDefaultLoadBalancer will set the DefaultLBPolicy and create an LbConfig if used in LoadBalancerSettings
func applySimpleDefaultLoadBalancer(c *cluster.Cluster, loadbalancer *networking.LoadBalancerSettings) {
c.LbPolicy = defaultLBAlgorithm()
switch c.LbPolicy {
case cluster.Cluster_ROUND_ROBIN:
applyRoundRobinLoadBalancer(c, loadbalancer)
case cluster.Cluster_LEAST_REQUEST:
applyLeastRequestLoadBalancer(c, loadbalancer)
}
}
func defaultLBAlgorithm() cluster.Cluster_LbPolicy {
return cluster.Cluster_LEAST_REQUEST
}
// applyRoundRobinLoadBalancer will set the LbPolicy and create an LbConfig for ROUND_ROBIN if used in LoadBalancerSettings
func applyRoundRobinLoadBalancer(c *cluster.Cluster, loadbalancer *networking.LoadBalancerSettings) {
c.LbPolicy = cluster.Cluster_ROUND_ROBIN
if loadbalancer.GetWarmupDurationSecs() != nil {
c.LbConfig = &cluster.Cluster_RoundRobinLbConfig_{
RoundRobinLbConfig: &cluster.Cluster_RoundRobinLbConfig{
SlowStartConfig: setSlowStartConfig(loadbalancer.GetWarmupDurationSecs()),
},
}
}
}
// applyLeastRequestLoadBalancer will set the LbPolicy and create an LbConfig for LEAST_REQUEST if used in LoadBalancerSettings
func applyLeastRequestLoadBalancer(c *cluster.Cluster, loadbalancer *networking.LoadBalancerSettings) {
c.LbPolicy = cluster.Cluster_LEAST_REQUEST
if loadbalancer.GetWarmupDurationSecs() != nil {
c.LbConfig = &cluster.Cluster_LeastRequestLbConfig_{
LeastRequestLbConfig: &cluster.Cluster_LeastRequestLbConfig{
SlowStartConfig: setSlowStartConfig(loadbalancer.GetWarmupDurationSecs()),
},
}
}
}
// setSlowStartConfig will set the warmupDurationSecs for LEAST_REQUEST and ROUND_ROBIN if provided in DestinationRule
func setSlowStartConfig(dur *durationpb.Duration) *cluster.Cluster_SlowStartConfig {
return &cluster.Cluster_SlowStartConfig{
SlowStartWindow: dur,
}
}
// getDefaultCircuitBreakerThresholds returns a copy of the default circuit breaker thresholds for the given traffic direction.
func getDefaultCircuitBreakerThresholds() *cluster.CircuitBreakers_Thresholds {
return &cluster.CircuitBreakers_Thresholds{
// DefaultMaxRetries specifies the default for the Envoy circuit breaker parameter max_retries. This
// defines the maximum number of parallel retries a given Envoy will allow to the upstream cluster. Envoy defaults
// this value to 3, however that has shown to be insufficient during periods of pod churn (e.g. rolling updates),
// where multiple endpoints in a cluster are terminated. In these scenarios the circuit breaker can kick
// in before Pilot is able to deliver an updated endpoint list to Envoy, leading to client-facing 503s.
MaxRetries: &wrappers.UInt32Value{Value: math.MaxUint32},
MaxRequests: &wrappers.UInt32Value{Value: math.MaxUint32},
MaxConnections: &wrappers.UInt32Value{Value: math.MaxUint32},
MaxPendingRequests: &wrappers.UInt32Value{Value: math.MaxUint32},
TrackRemaining: true,
}
}
// FIXME: there isn't a way to distinguish between unset values and zero values
func applyOutlierDetection(c *cluster.Cluster, outlier *networking.OutlierDetection) {
if outlier == nil {
return
}
out := &cluster.OutlierDetection{}
// SuccessRate based outlier detection should be disabled.
out.EnforcingSuccessRate = &wrapperspb.UInt32Value{Value: 0}
if e := outlier.Consecutive_5XxErrors; e != nil {
v := e.GetValue()
out.Consecutive_5Xx = &wrapperspb.UInt32Value{Value: v}
if v > 0 {
v = 100
}
out.EnforcingConsecutive_5Xx = &wrapperspb.UInt32Value{Value: v}
}
if e := outlier.ConsecutiveGatewayErrors; e != nil {
v := e.GetValue()
out.ConsecutiveGatewayFailure = &wrapperspb.UInt32Value{Value: v}
if v > 0 {
v = 100
}
out.EnforcingConsecutiveGatewayFailure = &wrapperspb.UInt32Value{Value: v}
}
if outlier.Interval != nil {
out.Interval = outlier.Interval
}
if outlier.BaseEjectionTime != nil {
out.BaseEjectionTime = outlier.BaseEjectionTime
}
if outlier.MaxEjectionPercent > 0 {
out.MaxEjectionPercent = &wrapperspb.UInt32Value{Value: uint32(outlier.MaxEjectionPercent)}
}
if outlier.SplitExternalLocalOriginErrors {
out.SplitExternalLocalOriginErrors = true
if outlier.ConsecutiveLocalOriginFailures.GetValue() > 0 {
out.ConsecutiveLocalOriginFailure = &wrapperspb.UInt32Value{Value: outlier.ConsecutiveLocalOriginFailures.Value}
out.EnforcingConsecutiveLocalOriginFailure = &wrapperspb.UInt32Value{Value: 100}
}
// SuccessRate based outlier detection should be disabled.
out.EnforcingLocalOriginSuccessRate = &wrapperspb.UInt32Value{Value: 0}
}
c.OutlierDetection = out
// Disable panic threshold by default as its not typically applicable in k8s environments
// with few pods per service.
// To do so, set the healthy_panic_threshold field even if its value is 0 (defaults to 50 in Envoy).
// FIXME: we can't distinguish between it being unset or being explicitly set to 0
minHealthPercent := outlier.MinHealthPercent
if minHealthPercent >= 0 {
// When we are sending unhealthy endpoints, we should disable Panic Threshold. Otherwise
// Envoy will send traffic to "Unready" pods when the percentage of healthy hosts fall
// below minimum health percentage.
if features.SendUnhealthyEndpoints.Load() {
minHealthPercent = 0
}
c.CommonLbConfig.HealthyPanicThreshold = &xdstype.Percent{Value: float64(minHealthPercent)}
}
}
// ApplyRingHashLoadBalancer will set the LbPolicy and create an LbConfig for RING_HASH if used in LoadBalancerSettings
func ApplyRingHashLoadBalancer(c *cluster.Cluster, lb *networking.LoadBalancerSettings) {
consistentHash := lb.GetConsistentHash()
if consistentHash == nil {
return
}
switch {
case consistentHash.GetMaglev() != nil:
c.LbPolicy = cluster.Cluster_MAGLEV
if consistentHash.GetMaglev().TableSize != 0 {
c.LbConfig = &cluster.Cluster_MaglevLbConfig_{
MaglevLbConfig: &cluster.Cluster_MaglevLbConfig{
TableSize: &wrapperspb.UInt64Value{Value: consistentHash.GetMaglev().TableSize},
},
}
}
case consistentHash.GetRingHash() != nil:
c.LbPolicy = cluster.Cluster_RING_HASH
if consistentHash.GetRingHash().MinimumRingSize != 0 {
c.LbConfig = &cluster.Cluster_RingHashLbConfig_{
RingHashLbConfig: &cluster.Cluster_RingHashLbConfig{
MinimumRingSize: &wrapperspb.UInt64Value{Value: consistentHash.GetRingHash().MinimumRingSize},
},
}
}
default:
// Check the deprecated MinimumRingSize.
// TODO: MinimumRingSize is an int, and zero could potentially
// be a valid value unable to distinguish between set and unset
// case currently.
// 1024 is the default value for envoy.
minRingSize := &wrapperspb.UInt64Value{Value: 1024}
if consistentHash.MinimumRingSize != 0 { // nolint: staticcheck
minRingSize = &wrapperspb.UInt64Value{Value: consistentHash.GetMinimumRingSize()} // nolint: staticcheck
}
c.LbPolicy = cluster.Cluster_RING_HASH
c.LbConfig = &cluster.Cluster_RingHashLbConfig_{
RingHashLbConfig: &cluster.Cluster_RingHashLbConfig{
MinimumRingSize: minRingSize,
},
}
}
}
func (cb *ClusterBuilder) applyUpstreamProxyProtocol(
opts *buildClusterOpts,
proxyProtocol *networking.TrafficPolicy_ProxyProtocol,
) {
if proxyProtocol == nil {
return
}
c := opts.mutable
if c.cluster.TransportSocket != nil {
// add an upstream proxy protocol wrapper for transportSocket
c.cluster.TransportSocket = &core.TransportSocket{
Name: "envoy.transport_sockets.upstream_proxy_protocol",
ConfigType: &core.TransportSocket_TypedConfig{TypedConfig: protoconv.MessageToAny(&proxyprotocol.ProxyProtocolUpstreamTransport{
Config: &core.ProxyProtocolConfig{Version: core.ProxyProtocolConfig_Version(proxyProtocol.Version)},
TransportSocket: c.cluster.TransportSocket,
})},
}
}
// add an upstream proxy protocol wrapper for each transportSocket
for _, tsm := range c.cluster.TransportSocketMatches {
tsm.TransportSocket = &core.TransportSocket{
Name: "envoy.transport_sockets.upstream_proxy_protocol",
ConfigType: &core.TransportSocket_TypedConfig{TypedConfig: protoconv.MessageToAny(&proxyprotocol.ProxyProtocolUpstreamTransport{
Config: &core.ProxyProtocolConfig{Version: core.ProxyProtocolConfig_Version(proxyProtocol.Version)},
TransportSocket: tsm.TransportSocket,
})},
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"time"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
http "github.com/envoyproxy/go-control-plane/envoy/extensions/upstreams/http/v3"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
metadata "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/structpb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/util/protoconv"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/spiffe"
)
// buildInternalUpstreamCluster builds a single endpoint cluster to the internal listener.
func buildInternalUpstreamCluster(name string, internalListener string) *cluster.Cluster {
return &cluster.Cluster{
Name: name,
ClusterDiscoveryType: &cluster.Cluster_Type{Type: cluster.Cluster_STATIC},
LoadAssignment: &endpoint.ClusterLoadAssignment{
ClusterName: name,
Endpoints: util.BuildInternalEndpoint(internalListener, nil),
},
TransportSocket: util.DefaultInternalUpstreamTransportSocket,
TypedExtensionProtocolOptions: map[string]*anypb.Any{
v3.HttpProtocolOptionsType: passthroughHttpProtocolOptions,
},
}
}
var (
MainInternalCluster = buildInternalUpstreamCluster(MainInternalName, MainInternalName)
EncapCluster = buildInternalUpstreamCluster(EncapClusterName, ConnectOriginate)
)
func (configgen *ConfigGeneratorImpl) buildInboundHBONEClusters() *cluster.Cluster {
return MainInternalCluster
}
func (configgen *ConfigGeneratorImpl) buildWaypointInboundClusters(
cb *ClusterBuilder,
proxy *model.Proxy,
push *model.PushContext,
svcs map[host.Name]*model.Service,
) []*cluster.Cluster {
clusters := make([]*cluster.Cluster, 0)
// Creates "main_internal" cluster to route to the main internal listener.
// Creates "encap" cluster to route to the encap listener.
clusters = append(clusters, MainInternalCluster, EncapCluster)
// Creates per-VIP load balancing upstreams.
clusters = append(clusters, cb.buildWaypointInboundVIP(proxy, svcs)...)
// Upstream of the "encap" listener.
clusters = append(clusters, cb.buildWaypointConnectOriginate(proxy, push))
for _, c := range clusters {
if c.TransportSocket != nil && c.TransportSocketMatches != nil {
log.Errorf("invalid cluster, multiple matches: %v", c.Name)
}
}
return clusters
}
// `inbound-vip||hostname|port`. EDS routing to the internal listener for each pod in the VIP.
func (cb *ClusterBuilder) buildWaypointInboundVIPCluster(svc *model.Service, port model.Port, subset string) *clusterWrapper {
clusterName := model.BuildSubsetKey(model.TrafficDirectionInboundVIP, subset, svc.Hostname, port.Port)
clusterType := cluster.Cluster_EDS
localCluster := cb.buildCluster(clusterName, clusterType, nil,
model.TrafficDirectionInbound, &port, nil, nil)
// Ensure VIP cluster has services metadata for stats filter usage
im := getOrCreateIstioMetadata(localCluster.cluster)
im.Fields["services"] = &structpb.Value{
Kind: &structpb.Value_ListValue{
ListValue: &structpb.ListValue{
Values: []*structpb.Value{},
},
},
}
svcMetaList := im.Fields["services"].GetListValue()
svcMetaList.Values = append(svcMetaList.Values, buildServiceMetadata(svc))
// no TLS, we are just going to internal address
localCluster.cluster.TransportSocketMatches = nil
localCluster.cluster.TransportSocket = util.TunnelHostInternalUpstreamTransportSocket
maybeApplyEdsConfig(localCluster.cluster)
return localCluster
}
// `inbound-vip|protocol|hostname|port`. EDS routing to the internal listener for each pod in the VIP.
func (cb *ClusterBuilder) buildWaypointInboundVIP(proxy *model.Proxy, svcs map[host.Name]*model.Service) []*cluster.Cluster {
clusters := []*cluster.Cluster{}
for _, svc := range svcs {
for _, port := range svc.Ports {
if port.Protocol == protocol.UDP {
continue
}
if port.Protocol.IsUnsupported() || port.Protocol.IsTCP() {
clusters = append(clusters, cb.buildWaypointInboundVIPCluster(svc, *port, "tcp").build())
}
if port.Protocol.IsUnsupported() || port.Protocol.IsHTTP() {
clusters = append(clusters, cb.buildWaypointInboundVIPCluster(svc, *port, "http").build())
}
cfg := cb.sidecarScope.DestinationRule(model.TrafficDirectionInbound, proxy, svc.Hostname).GetRule()
if cfg != nil {
destinationRule := cfg.Spec.(*networking.DestinationRule)
for _, ss := range destinationRule.Subsets {
if port.Protocol.IsUnsupported() || port.Protocol.IsTCP() {
clusters = append(clusters, cb.buildWaypointInboundVIPCluster(svc, *port, "tcp/"+ss.Name).build())
}
if port.Protocol.IsUnsupported() || port.Protocol.IsHTTP() {
clusters = append(clusters, cb.buildWaypointInboundVIPCluster(svc, *port, "http/"+ss.Name).build())
}
}
}
}
}
return clusters
}
// CONNECT origination cluster
func (cb *ClusterBuilder) buildWaypointConnectOriginate(proxy *model.Proxy, push *model.PushContext) *cluster.Cluster {
// Restrict upstream SAN to waypoint scope.
scope := proxy.WaypointScope()
m := &matcher.StringMatcher{}
if scope.ServiceAccount != "" {
m.MatchPattern = &matcher.StringMatcher_Exact{
Exact: spiffe.MustGenSpiffeURI(scope.Namespace, scope.ServiceAccount),
}
} else {
m.MatchPattern = &matcher.StringMatcher_Prefix{
Prefix: spiffe.URIPrefix + spiffe.GetTrustDomain() + "/ns/" + scope.Namespace + "/sa/",
}
}
return cb.buildConnectOriginate(proxy, push, m)
}
func (cb *ClusterBuilder) buildConnectOriginate(proxy *model.Proxy, push *model.PushContext, uriSanMatcher *matcher.StringMatcher) *cluster.Cluster {
ctx := buildCommonConnectTLSContext(proxy, push)
validationCtx := ctx.GetCombinedValidationContext().DefaultValidationContext
if uriSanMatcher != nil {
validationCtx.MatchTypedSubjectAltNames = append(validationCtx.MatchTypedSubjectAltNames, &tls.SubjectAltNameMatcher{
SanType: tls.SubjectAltNameMatcher_URI,
Matcher: uriSanMatcher,
})
}
return &cluster.Cluster{
Name: ConnectOriginate,
ClusterDiscoveryType: &cluster.Cluster_Type{Type: cluster.Cluster_ORIGINAL_DST},
LbPolicy: cluster.Cluster_CLUSTER_PROVIDED,
ConnectTimeout: durationpb.New(2 * time.Second),
CleanupInterval: durationpb.New(60 * time.Second),
TypedExtensionProtocolOptions: h2connectUpgrade(),
LbConfig: &cluster.Cluster_OriginalDstLbConfig_{
OriginalDstLbConfig: &cluster.Cluster_OriginalDstLbConfig{
UpstreamPortOverride: &wrappers.UInt32Value{
Value: model.HBoneInboundListenPort,
},
// Used to override destination pods with waypoints.
MetadataKey: &metadata.MetadataKey{
Key: util.OriginalDstMetadataKey,
Path: []*metadata.MetadataKey_PathSegment{{
Segment: &metadata.MetadataKey_PathSegment_Key{
Key: "waypoint",
},
}},
},
},
},
TransportSocket: &core.TransportSocket{
Name: "tls",
ConfigType: &core.TransportSocket_TypedConfig{TypedConfig: protoconv.MessageToAny(&tls.UpstreamTlsContext{
CommonTlsContext: ctx,
})},
},
}
}
func h2connectUpgrade() map[string]*anypb.Any {
return map[string]*anypb.Any{
v3.HttpProtocolOptionsType: protoconv.MessageToAny(&http.HttpProtocolOptions{
UpstreamProtocolOptions: &http.HttpProtocolOptions_ExplicitHttpConfig_{ExplicitHttpConfig: &http.HttpProtocolOptions_ExplicitHttpConfig{
ProtocolConfig: &http.HttpProtocolOptions_ExplicitHttpConfig_Http2ProtocolOptions{
Http2ProtocolOptions: &core.Http2ProtocolOptions{
AllowConnect: true,
},
},
}},
}),
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/model"
)
type ConfigGeneratorImpl struct {
Cache model.XdsCache
}
func NewConfigGenerator(cache model.XdsCache) *ConfigGeneratorImpl {
return &ConfigGeneratorImpl{
Cache: cache,
}
}
// MeshConfigChanged is called when mesh config is changed.
func (configgen *ConfigGeneratorImpl) MeshConfigChanged(_ *meshconfig.MeshConfig) {
accessLogBuilder.reset()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envoyfilter
import (
"fmt"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"google.golang.org/protobuf/proto"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/util/runtime"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/proto/merge"
)
// ApplyClusterMerge processes the MERGE operation and merges the supplied configuration to the matched clusters.
func ApplyClusterMerge(pctx networking.EnvoyFilter_PatchContext, efw *model.EnvoyFilterWrapper,
c *cluster.Cluster, hosts []host.Name,
) (out *cluster.Cluster) {
defer runtime.HandleCrash(runtime.LogPanic, func(any) {
log.Errorf("clusters patch %s/%s caused panic, so the patches did not take effect", efw.Namespace, efw.Name)
IncrementEnvoyFilterErrorMetric(Cluster)
})
// In case the patches cause panic, use the clusters generated before to reduce the influence.
out = c
if efw == nil {
return
}
for _, cp := range efw.Patches[networking.EnvoyFilter_CLUSTER] {
applied := false
if cp.Operation != networking.EnvoyFilter_Patch_MERGE {
IncrementEnvoyFilterMetric(cp.Key(), Cluster, applied)
continue
}
if commonConditionMatch(pctx, cp) && clusterMatch(c, cp, hosts) {
tsMerged, err := mergeTransportSocketCluster(c, cp)
if err != nil {
log.Debugf("Merge of transport socket failed for cluster: %v", err)
continue
}
applied = true
if !tsMerged {
merge.Merge(c, cp.Value)
}
}
IncrementEnvoyFilterMetric(cp.Key(), Cluster, applied)
}
return c
}
// Test if the patch contains a config for TransportSocket
// Returns a boolean indicating if the merge was handled by this function; if false, it should still be called
// outside of this function.
func mergeTransportSocketCluster(c *cluster.Cluster, cp *model.EnvoyFilterConfigPatchWrapper) (merged bool, err error) {
cpValueCast, okCpCast := (cp.Value).(*cluster.Cluster)
if !okCpCast {
return false, fmt.Errorf("cast of cp.Value failed: %v", okCpCast)
}
// Check if cluster patch has a transport socket.
if cpValueCast.GetTransportSocket() == nil {
return false, nil
}
var ts *core.TransportSocket
// First check if the transport socket matches with any cluster transport socket matches.
if len(c.GetTransportSocketMatches()) > 0 {
for _, tsm := range c.GetTransportSocketMatches() {
if tsm.GetTransportSocket() != nil && cpValueCast.GetTransportSocket().Name == tsm.GetTransportSocket().Name {
ts = tsm.GetTransportSocket()
break
}
}
if ts == nil {
// If we merged we would get both a transport_socket and transport_socket_matches which is not valid
// Drop the filter, but indicate that we handled the merge so that the outer function does not try
// to merge it again
return true, nil
}
} else if c.GetTransportSocket() != nil {
if cpValueCast.GetTransportSocket().Name == c.GetTransportSocket().Name {
ts = c.GetTransportSocket()
}
}
// This means either there is a name mismatch or cluster does not have transport socket matches/transport socket.
// We cannot do a deep merge. Instead just replace the transport socket
if ts == nil {
c.TransportSocket = cpValueCast.TransportSocket
} else {
// Merge the patch and the cluster at a lower level
dst := ts.GetTypedConfig()
srcPatch := cpValueCast.GetTransportSocket().GetTypedConfig()
if dst != nil && srcPatch != nil {
retVal, errMerge := util.MergeAnyWithAny(dst, srcPatch)
if errMerge != nil {
return false, fmt.Errorf("function MergeAnyWithAny failed for ApplyClusterMerge: %v", errMerge)
}
// Merge the above result with the whole cluster
merge.Merge(dst, retVal)
}
}
return true, nil
}
// ShouldKeepCluster checks if there is a REMOVE patch on the cluster, returns false if there is one so that it is removed.
func ShouldKeepCluster(pctx networking.EnvoyFilter_PatchContext, efw *model.EnvoyFilterWrapper, c *cluster.Cluster, hosts []host.Name) bool {
if efw == nil {
return true
}
for _, cp := range efw.Patches[networking.EnvoyFilter_CLUSTER] {
if cp.Operation != networking.EnvoyFilter_Patch_REMOVE {
continue
}
if commonConditionMatch(pctx, cp) && clusterMatch(c, cp, hosts) {
return false
}
}
return true
}
// InsertedClusters collects all clusters that are added via ADD operation and match the patch context.
func InsertedClusters(pctx networking.EnvoyFilter_PatchContext, efw *model.EnvoyFilterWrapper) []*cluster.Cluster {
if efw == nil {
return nil
}
var result []*cluster.Cluster
// Add cluster if the operation is add, and patch context matches
for _, cp := range efw.Patches[networking.EnvoyFilter_CLUSTER] {
if cp.Operation == networking.EnvoyFilter_Patch_ADD {
// If cluster ADD patch does not specify a patch context, only add for sidecar outbound and gateway.
if cp.Match.Context == networking.EnvoyFilter_ANY && pctx != networking.EnvoyFilter_SIDECAR_OUTBOUND &&
pctx != networking.EnvoyFilter_GATEWAY {
continue
}
if commonConditionMatch(pctx, cp) {
result = append(result, proto.Clone(cp.Value).(*cluster.Cluster))
}
}
}
return result
}
func clusterMatch(cluster *cluster.Cluster, cp *model.EnvoyFilterConfigPatchWrapper, hosts []host.Name) bool {
cMatch := cp.Match.GetCluster()
if cMatch == nil {
return true
}
if cMatch.Name != "" {
return cMatch.Name == cluster.Name
}
direction, subset, hostname, port := model.ParseSubsetKey(cluster.Name)
hostMatches := []host.Name{hostname}
// For inbound clusters, host parsed from subset key will be empty. Use the passed in service name.
if direction == model.TrafficDirectionInbound && len(hosts) > 0 {
hostMatches = hosts
}
if cMatch.Subset != "" && cMatch.Subset != subset {
return false
}
if cMatch.Service != "" && !hostContains(hostMatches, host.Name(cMatch.Service)) {
return false
}
// FIXME: Ports on a cluster can be 0. the API only takes uint32 for ports
// We should either make that field in API as a wrapper type or switch to int
if cMatch.PortNumber != 0 && int(cMatch.PortNumber) != port {
return false
}
return true
}
func hostContains(hosts []host.Name, service host.Name) bool {
for _, h := range hosts {
if h == service {
return true
}
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envoyfilter
import (
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"google.golang.org/protobuf/proto"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
// InsertedExtensionConfigurations returns extension configurations added via EnvoyFilter.
func InsertedExtensionConfigurations(efw *model.EnvoyFilterWrapper, names []string) []*core.TypedExtensionConfig {
result := make([]*core.TypedExtensionConfig, 0)
if efw == nil {
return result
}
hasName := sets.New(names...)
for _, p := range efw.Patches[networking.EnvoyFilter_EXTENSION_CONFIG] {
if p.Operation != networking.EnvoyFilter_Patch_ADD {
continue
}
ec, ok := p.Value.(*core.TypedExtensionConfig)
if !ok {
log.Errorf("extension config patch %+v does not match TypeExtensionConfig type", p.Value)
continue
}
if hasName.Contains(ec.GetName()) {
result = append(result, proto.Clone(p.Value).(*core.TypedExtensionConfig))
}
}
return result
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envoyfilter
import (
"fmt"
"strings"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
"google.golang.org/protobuf/proto"
anypb "google.golang.org/protobuf/types/known/anypb"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pilot/pkg/util/runtime"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/proto/merge"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/wellknown"
)
// ApplyListenerPatches applies patches to LDS output
func ApplyListenerPatches(
patchContext networking.EnvoyFilter_PatchContext,
efw *model.EnvoyFilterWrapper,
lis []*listener.Listener,
skipAdds bool,
) (out []*listener.Listener) {
defer runtime.HandleCrash(runtime.LogPanic, func(any) {
IncrementEnvoyFilterErrorMetric(Listener)
log.Errorf("listeners patch %s/%s caused panic, so the patches did not take effect", efw.Namespace, efw.Name)
})
// In case the patches cause panic, use the listeners generated before to reduce the influence.
out = lis
if efw == nil {
return
}
return patchListeners(patchContext, efw, lis, skipAdds)
}
func patchListeners(
patchContext networking.EnvoyFilter_PatchContext,
efw *model.EnvoyFilterWrapper,
listeners []*listener.Listener,
skipAdds bool,
) []*listener.Listener {
listenersRemoved := false
// do all the changes for a single envoy filter crd object. [including adds]
// then move on to the next one
// only removes/merges plus next level object operations [add/remove/merge]
for _, lis := range listeners {
if lis.Name == "" {
// removed by another op
continue
}
patchListener(patchContext, efw.Patches, lis, &listenersRemoved)
}
// adds at listener level if enabled
if !skipAdds {
for _, lp := range efw.Patches[networking.EnvoyFilter_LISTENER] {
if lp.Operation == networking.EnvoyFilter_Patch_ADD {
// If listener ADD patch does not specify a patch context, only add for sidecar outbound and gateway.
if lp.Match.Context == networking.EnvoyFilter_ANY && patchContext != networking.EnvoyFilter_SIDECAR_OUTBOUND &&
patchContext != networking.EnvoyFilter_GATEWAY {
continue
}
if !commonConditionMatch(patchContext, lp) {
IncrementEnvoyFilterMetric(lp.Key(), Listener, false)
continue
}
// clone before append. Otherwise, subsequent operations on this listener will corrupt
// the master value stored in CP.
listeners = append(listeners, proto.Clone(lp.Value).(*listener.Listener))
IncrementEnvoyFilterMetric(lp.Key(), Listener, true)
}
}
}
if listenersRemoved {
return slices.FilterInPlace(listeners, func(l *listener.Listener) bool {
return l.Name != ""
})
}
return listeners
}
func patchListener(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
lis *listener.Listener, listenersRemoved *bool,
) {
for _, lp := range patches[networking.EnvoyFilter_LISTENER] {
if !commonConditionMatch(patchContext, lp) ||
!listenerMatch(lis, lp) {
IncrementEnvoyFilterMetric(lp.Key(), Listener, false)
continue
}
IncrementEnvoyFilterMetric(lp.Key(), Listener, true)
if lp.Operation == networking.EnvoyFilter_Patch_REMOVE {
lis.Name = ""
*listenersRemoved = true
// terminate the function here as we have nothing more do to for this listener
return
} else if lp.Operation == networking.EnvoyFilter_Patch_MERGE {
merge.Merge(lis, lp.Value)
}
}
patchListenerFilters(patchContext, patches[networking.EnvoyFilter_LISTENER_FILTER], lis)
patchFilterChains(patchContext, patches, lis)
}
// patchListenerFilters patches passed in listener filters with listener filter patches.
func patchListenerFilters(patchContext networking.EnvoyFilter_PatchContext,
patches []*model.EnvoyFilterConfigPatchWrapper,
lis *listener.Listener,
) {
for _, lp := range patches {
if !commonConditionMatch(patchContext, lp) ||
!listenerMatch(lis, lp) {
IncrementEnvoyFilterMetric(lp.Key(), ListenerFilter, false)
continue
}
applied := false
if lp.Operation == networking.EnvoyFilter_Patch_ADD {
lis.ListenerFilters = append(lis.ListenerFilters, proto.Clone(lp.Value).(*listener.ListenerFilter))
applied = true
} else if lp.Operation == networking.EnvoyFilter_Patch_INSERT_FIRST {
lis.ListenerFilters = append([]*listener.ListenerFilter{proto.Clone(lp.Value).(*listener.ListenerFilter)}, lis.ListenerFilters...)
applied = true
} else if lp.Operation == networking.EnvoyFilter_Patch_INSERT_AFTER {
// Insert after without a filter match is same as ADD in the end
if !hasListenerFilterMatch(lp) {
lis.ListenerFilters = append(lis.ListenerFilters, proto.Clone(lp.Value).(*listener.ListenerFilter))
applied = true
continue
}
// find the matching filter first
insertPosition := -1
for i := 0; i < len(lis.ListenerFilters); i++ {
if listenerFilterMatch(lis.ListenerFilters[i], lp) {
insertPosition = i + 1
break
}
}
if insertPosition == -1 {
continue
}
applied = true
clonedVal := proto.Clone(lp.Value).(*listener.ListenerFilter)
lis.ListenerFilters = append(lis.ListenerFilters, clonedVal)
if insertPosition < len(lis.ListenerFilters)-1 {
copy(lis.ListenerFilters[insertPosition+1:], lis.ListenerFilters[insertPosition:])
lis.ListenerFilters[insertPosition] = clonedVal
}
} else if lp.Operation == networking.EnvoyFilter_Patch_INSERT_BEFORE {
// insert before without a filter match is same as insert in the beginning
if !hasListenerFilterMatch(lp) {
lis.ListenerFilters = append([]*listener.ListenerFilter{proto.Clone(lp.Value).(*listener.ListenerFilter)}, lis.ListenerFilters...)
continue
}
// find the matching filter first
insertPosition := -1
for i := 0; i < len(lis.ListenerFilters); i++ {
if listenerFilterMatch(lis.ListenerFilters[i], lp) {
insertPosition = i
break
}
}
// If matching filter is not found, then don't insert and continue.
if insertPosition == -1 {
continue
}
applied = true
clonedVal := proto.Clone(lp.Value).(*listener.ListenerFilter)
lis.ListenerFilters = append(lis.ListenerFilters, clonedVal)
copy(lis.ListenerFilters[insertPosition+1:], lis.ListenerFilters[insertPosition:])
lis.ListenerFilters[insertPosition] = clonedVal
} else if lp.Operation == networking.EnvoyFilter_Patch_REPLACE {
if !hasListenerFilterMatch(lp) {
continue
}
// find the matching filter first
replacePosition := -1
for i := 0; i < len(lis.ListenerFilters); i++ {
if listenerFilterMatch(lis.ListenerFilters[i], lp) {
replacePosition = i
break
}
}
if replacePosition == -1 {
continue
}
applied = true
lis.ListenerFilters[replacePosition] = proto.Clone(lp.Value).(*listener.ListenerFilter)
} else if lp.Operation == networking.EnvoyFilter_Patch_REMOVE {
if !hasListenerFilterMatch(lp) {
continue
}
lis.ListenerFilters = slices.FilterInPlace(lis.ListenerFilters, func(filter *listener.ListenerFilter) bool {
return !listenerFilterMatch(filter, lp)
})
}
IncrementEnvoyFilterMetric(lp.Key(), ListenerFilter, applied)
}
}
func patchFilterChains(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
lis *listener.Listener,
) {
filterChainsRemoved := false
for i, fc := range lis.FilterChains {
if fc.Filters == nil {
continue
}
patchFilterChain(patchContext, patches, lis, lis.FilterChains[i], &filterChainsRemoved)
}
if fc := lis.GetDefaultFilterChain(); fc.GetFilters() != nil {
removed := false
patchFilterChain(patchContext, patches, lis, fc, &removed)
if removed {
lis.DefaultFilterChain = nil
}
}
for _, lp := range patches[networking.EnvoyFilter_FILTER_CHAIN] {
if lp.Operation == networking.EnvoyFilter_Patch_ADD {
if !commonConditionMatch(patchContext, lp) ||
!listenerMatch(lis, lp) {
IncrementEnvoyFilterMetric(lp.Key(), FilterChain, false)
continue
}
IncrementEnvoyFilterMetric(lp.Key(), FilterChain, true)
lis.FilterChains = append(lis.FilterChains, proto.Clone(lp.Value).(*listener.FilterChain))
}
}
if filterChainsRemoved {
lis.FilterChains = slices.FilterInPlace(lis.FilterChains, func(fc *listener.FilterChain) bool {
return fc.Filters != nil
})
}
}
func patchFilterChain(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
lis *listener.Listener,
fc *listener.FilterChain, filterChainRemoved *bool,
) {
for _, lp := range patches[networking.EnvoyFilter_FILTER_CHAIN] {
if !commonConditionMatch(patchContext, lp) ||
!listenerMatch(lis, lp) ||
!filterChainMatch(lis, fc, lp) {
IncrementEnvoyFilterMetric(lp.Key(), FilterChain, false)
continue
}
IncrementEnvoyFilterMetric(lp.Key(), FilterChain, true)
if lp.Operation == networking.EnvoyFilter_Patch_REMOVE {
fc.Filters = nil
*filterChainRemoved = true
// nothing more to do in other patches as we removed this filter chain
return
} else if lp.Operation == networking.EnvoyFilter_Patch_MERGE {
merged, err := mergeTransportSocketListener(fc, lp)
if err != nil {
log.Debugf("merge of transport socket failed for listener: %v", err)
continue
}
if !merged {
merge.Merge(fc, lp.Value)
}
}
}
patchNetworkFilters(patchContext, patches, lis, fc)
}
// Test if the patch contains a config for TransportSocket
// Returns a boolean indicating if the merge was handled by this function; if false, it should still be called
// outside of this function.
func mergeTransportSocketListener(fc *listener.FilterChain, lp *model.EnvoyFilterConfigPatchWrapper) (merged bool, err error) {
lpValueCast, ok := (lp.Value).(*listener.FilterChain)
if !ok {
return false, fmt.Errorf("cast of cp.Value failed: %v", ok)
}
// Test if the patch contains a config for TransportSocket
applyPatch := false
if lpValueCast.GetTransportSocket() != nil {
if fc.GetTransportSocket() == nil {
// There is no existing filter chain, we will add it outside this function; report back that we did not merge.
return false, nil
}
// Test if the listener contains a config for TransportSocket
applyPatch = fc.GetTransportSocket() != nil && lpValueCast.GetTransportSocket().Name == fc.GetTransportSocket().Name
} else {
return false, nil
}
if applyPatch {
// Merge the patch and the listener at a lower level
dstListener := fc.GetTransportSocket().GetTypedConfig()
srcPatch := lpValueCast.GetTransportSocket().GetTypedConfig()
if dstListener != nil && srcPatch != nil {
retVal, errMerge := util.MergeAnyWithAny(dstListener, srcPatch)
if errMerge != nil {
return false, fmt.Errorf("function mergeAnyWithAny failed for doFilterChainOperation: %v", errMerge)
}
// Merge the above result with the whole listener
merge.Merge(dstListener, retVal)
}
}
// If we already applied the patch, we skip merge.Merge() in the outer function
return applyPatch, nil
}
func patchNetworkFilters(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
lis *listener.Listener, fc *listener.FilterChain,
) {
for _, lp := range patches[networking.EnvoyFilter_NETWORK_FILTER] {
if !commonConditionMatch(patchContext, lp) ||
!listenerMatch(lis, lp) ||
!filterChainMatch(lis, fc, lp) {
IncrementEnvoyFilterMetric(lp.Key(), NetworkFilter, false)
continue
}
applied := false
if lp.Operation == networking.EnvoyFilter_Patch_ADD {
fc.Filters = append(fc.Filters, proto.Clone(lp.Value).(*listener.Filter))
applied = true
} else if lp.Operation == networking.EnvoyFilter_Patch_INSERT_FIRST {
fc.Filters = append([]*listener.Filter{proto.Clone(lp.Value).(*listener.Filter)}, fc.Filters...)
applied = true
} else if lp.Operation == networking.EnvoyFilter_Patch_INSERT_AFTER {
// Insert after without a filter match is same as ADD in the end
if !hasNetworkFilterMatch(lp) {
fc.Filters = append(fc.Filters, proto.Clone(lp.Value).(*listener.Filter))
continue
}
// find the matching filter first
insertPosition := -1
for i := 0; i < len(fc.Filters); i++ {
if networkFilterMatch(fc.Filters[i], lp) {
insertPosition = i + 1
break
}
}
if insertPosition == -1 {
continue
}
applied = true
clonedVal := proto.Clone(lp.Value).(*listener.Filter)
fc.Filters = append(fc.Filters, clonedVal)
if insertPosition < len(fc.Filters)-1 {
copy(fc.Filters[insertPosition+1:], fc.Filters[insertPosition:])
fc.Filters[insertPosition] = clonedVal
}
} else if lp.Operation == networking.EnvoyFilter_Patch_INSERT_BEFORE {
// insert before without a filter match is same as insert in the beginning
if !hasNetworkFilterMatch(lp) {
fc.Filters = append([]*listener.Filter{proto.Clone(lp.Value).(*listener.Filter)}, fc.Filters...)
continue
}
// find the matching filter first
insertPosition := -1
for i := 0; i < len(fc.Filters); i++ {
if networkFilterMatch(fc.Filters[i], lp) {
insertPosition = i
break
}
}
// If matching filter is not found, then don't insert and continue.
if insertPosition == -1 {
continue
}
applied = true
clonedVal := proto.Clone(lp.Value).(*listener.Filter)
fc.Filters = append(fc.Filters, clonedVal)
copy(fc.Filters[insertPosition+1:], fc.Filters[insertPosition:])
fc.Filters[insertPosition] = clonedVal
} else if lp.Operation == networking.EnvoyFilter_Patch_REPLACE {
if !hasNetworkFilterMatch(lp) {
continue
}
// find the matching filter first
replacePosition := -1
for i := 0; i < len(fc.Filters); i++ {
if networkFilterMatch(fc.Filters[i], lp) {
replacePosition = i
break
}
}
if replacePosition == -1 {
continue
}
applied = true
fc.Filters[replacePosition] = proto.Clone(lp.Value).(*listener.Filter)
} else if lp.Operation == networking.EnvoyFilter_Patch_REMOVE {
if !hasNetworkFilterMatch(lp) {
continue
}
fc.Filters = slices.FilterInPlace(fc.Filters, func(filter *listener.Filter) bool {
return !networkFilterMatch(filter, lp)
})
}
IncrementEnvoyFilterMetric(lp.Key(), NetworkFilter, applied)
}
for i := range fc.Filters {
patchNetworkFilter(patchContext, patches, lis, fc, fc.Filters[i])
}
}
// patchNetworkFilter patches passed in filter if it is MERGE operation.
// The return value indicates whether the filter has been removed for REMOVE operations.
func patchNetworkFilter(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
lis *listener.Listener, fc *listener.FilterChain,
filter *listener.Filter,
) {
for _, lp := range patches[networking.EnvoyFilter_NETWORK_FILTER] {
if !commonConditionMatch(patchContext, lp) ||
!listenerMatch(lis, lp) ||
!filterChainMatch(lis, fc, lp) ||
!networkFilterMatch(filter, lp) {
IncrementEnvoyFilterMetric(lp.Key(), NetworkFilter, false)
continue
}
if lp.Operation == networking.EnvoyFilter_Patch_MERGE {
// proto merge doesn't work well when merging two filters with ANY typed configs
// especially when the incoming cp.Value is a struct that could contain the json config
// of an ANY typed filter. So convert our filter's typed config to Struct (retaining the any
// typed output of json)
if filter.GetTypedConfig() == nil {
// TODO(rshriram): fixme
// skip this op as we would possibly have to do a merge of Any with struct
// which doesn't seem to work well.
continue
}
userFilter := lp.Value.(*listener.Filter)
var err error
// we need to be able to overwrite filter names or simply empty out a filter's configs
// as they could be supplied through per route filter configs
filterName := filter.Name
if userFilter.Name != "" {
filterName = userFilter.Name
}
var retVal *anypb.Any
if userFilter.GetTypedConfig() != nil {
IncrementEnvoyFilterMetric(lp.Key(), NetworkFilter, true)
// user has any typed struct
// The type may not match up exactly. For example, if we use v2 internally but they use v3.
// Assuming they are not using deprecated/new fields, we can safely swap out the TypeUrl
// If we did not do this, merge.Merge below will panic (which is recovered), so even though this
// is not 100% reliable its better than doing nothing
if userFilter.GetTypedConfig().TypeUrl != filter.GetTypedConfig().TypeUrl {
userFilter.ConfigType.(*listener.Filter_TypedConfig).TypedConfig.TypeUrl = filter.GetTypedConfig().TypeUrl
}
if retVal, err = util.MergeAnyWithAny(filter.GetTypedConfig(), userFilter.GetTypedConfig()); err != nil {
retVal = filter.GetTypedConfig()
}
}
filter.Name = filterName
if retVal != nil {
filter.ConfigType = &listener.Filter_TypedConfig{TypedConfig: retVal}
}
}
}
if filter.Name == wellknown.HTTPConnectionManager {
patchHTTPFilters(patchContext, patches, lis, fc, filter)
}
}
func patchHTTPFilters(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
lis *listener.Listener, fc *listener.FilterChain, filter *listener.Filter,
) {
httpconn := &hcm.HttpConnectionManager{}
if filter.GetTypedConfig() != nil {
if err := filter.GetTypedConfig().UnmarshalTo(httpconn); err != nil {
return
// todo: figure out a non noisy logging option here
// as this loop will be called very frequently
}
}
for _, lp := range patches[networking.EnvoyFilter_HTTP_FILTER] {
applied := false
if !commonConditionMatch(patchContext, lp) ||
!listenerMatch(lis, lp) ||
!filterChainMatch(lis, fc, lp) ||
!networkFilterMatch(filter, lp) {
IncrementEnvoyFilterMetric(lp.Key(), HttpFilter, false)
continue
}
if lp.Operation == networking.EnvoyFilter_Patch_ADD {
applied = true
httpconn.HttpFilters = append(httpconn.HttpFilters, proto.Clone(lp.Value).(*hcm.HttpFilter))
} else if lp.Operation == networking.EnvoyFilter_Patch_INSERT_FIRST {
httpconn.HttpFilters = append([]*hcm.HttpFilter{proto.Clone(lp.Value).(*hcm.HttpFilter)}, httpconn.HttpFilters...)
} else if lp.Operation == networking.EnvoyFilter_Patch_INSERT_AFTER {
// Insert after without a filter match is same as ADD in the end
if !hasHTTPFilterMatch(lp) {
httpconn.HttpFilters = append(httpconn.HttpFilters, proto.Clone(lp.Value).(*hcm.HttpFilter))
continue
}
// find the matching filter first
insertPosition := -1
for i := 0; i < len(httpconn.HttpFilters); i++ {
if httpFilterMatch(httpconn.HttpFilters[i], lp) {
insertPosition = i + 1
break
}
}
if insertPosition == -1 {
continue
}
applied = true
clonedVal := proto.Clone(lp.Value).(*hcm.HttpFilter)
httpconn.HttpFilters = append(httpconn.HttpFilters, clonedVal)
if insertPosition < len(httpconn.HttpFilters)-1 {
copy(httpconn.HttpFilters[insertPosition+1:], httpconn.HttpFilters[insertPosition:])
httpconn.HttpFilters[insertPosition] = clonedVal
}
} else if lp.Operation == networking.EnvoyFilter_Patch_INSERT_BEFORE {
// insert before without a filter match is same as insert in the beginning
if !hasHTTPFilterMatch(lp) {
httpconn.HttpFilters = append([]*hcm.HttpFilter{proto.Clone(lp.Value).(*hcm.HttpFilter)}, httpconn.HttpFilters...)
continue
}
// find the matching filter first
insertPosition := -1
for i := 0; i < len(httpconn.HttpFilters); i++ {
if httpFilterMatch(httpconn.HttpFilters[i], lp) {
insertPosition = i
break
}
}
if insertPosition == -1 {
continue
}
applied = true
clonedVal := proto.Clone(lp.Value).(*hcm.HttpFilter)
httpconn.HttpFilters = append(httpconn.HttpFilters, clonedVal)
copy(httpconn.HttpFilters[insertPosition+1:], httpconn.HttpFilters[insertPosition:])
httpconn.HttpFilters[insertPosition] = clonedVal
} else if lp.Operation == networking.EnvoyFilter_Patch_REPLACE {
if !hasHTTPFilterMatch(lp) {
continue
}
// find the matching filter first
replacePosition := -1
for i := 0; i < len(httpconn.HttpFilters); i++ {
if httpFilterMatch(httpconn.HttpFilters[i], lp) {
replacePosition = i
break
}
}
if replacePosition == -1 {
log.Debugf("EnvoyFilter patch %v is not applied because no matching HTTP filter found.", lp)
continue
}
applied = true
clonedVal := proto.Clone(lp.Value).(*hcm.HttpFilter)
httpconn.HttpFilters[replacePosition] = clonedVal
} else if lp.Operation == networking.EnvoyFilter_Patch_REMOVE {
if !hasHTTPFilterMatch(lp) {
continue
}
httpconn.HttpFilters = slices.FilterInPlace(httpconn.HttpFilters, func(h *hcm.HttpFilter) bool {
return !httpFilterMatch(h, lp)
})
}
IncrementEnvoyFilterMetric(lp.Key(), HttpFilter, applied)
}
for _, httpFilter := range httpconn.HttpFilters {
patchHTTPFilter(patchContext, patches, lis, fc, filter, httpFilter)
}
if filter.GetTypedConfig() != nil {
// convert to any type
filter.ConfigType = &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(httpconn)}
}
}
// patchHTTPFilter patches passed in filter if it is MERGE operation.
// The return value indicates whether the filter has been removed for REMOVE operations.
func patchHTTPFilter(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
listener *listener.Listener, fc *listener.FilterChain, filter *listener.Filter,
httpFilter *hcm.HttpFilter,
) {
for _, lp := range patches[networking.EnvoyFilter_HTTP_FILTER] {
applied := false
if !commonConditionMatch(patchContext, lp) ||
!listenerMatch(listener, lp) ||
!filterChainMatch(listener, fc, lp) ||
!networkFilterMatch(filter, lp) ||
!httpFilterMatch(httpFilter, lp) {
IncrementEnvoyFilterMetric(lp.Key(), HttpFilter, applied)
continue
}
if lp.Operation == networking.EnvoyFilter_Patch_MERGE {
// proto merge doesn't work well when merging two filters with ANY typed configs
// especially when the incoming cp.Value is a struct that could contain the json config
// of an ANY typed filter. So convert our filter's typed config to Struct (retaining the any
// typed output of json)
if httpFilter.GetTypedConfig() == nil {
// TODO(rshriram): fixme
// skip this op as we would possibly have to do a merge of Any with struct
// which doesn't seem to work well.
continue
}
userHTTPFilter := lp.Value.(*hcm.HttpFilter)
var err error
// we need to be able to overwrite filter names or simply empty out a filter's configs
// as they could be supplied through per route filter configs
httpFilterName := httpFilter.Name
if userHTTPFilter.Name != "" {
httpFilterName = userHTTPFilter.Name
}
var retVal *anypb.Any
if userHTTPFilter.GetTypedConfig() != nil {
// user has any typed struct
// The type may not match up exactly. For example, if we use v2 internally but they use v3.
// Assuming they are not using deprecated/new fields, we can safely swap out the TypeUrl
// If we did not do this, merge.Merge below will panic (which is recovered), so even though this
// is not 100% reliable its better than doing nothing
if userHTTPFilter.GetTypedConfig().TypeUrl != httpFilter.GetTypedConfig().TypeUrl {
userHTTPFilter.ConfigType.(*hcm.HttpFilter_TypedConfig).TypedConfig.TypeUrl = httpFilter.GetTypedConfig().TypeUrl
}
if retVal, err = util.MergeAnyWithAny(httpFilter.GetTypedConfig(), userHTTPFilter.GetTypedConfig()); err != nil {
retVal = httpFilter.GetTypedConfig()
}
}
applied = true
httpFilter.Name = httpFilterName
if retVal != nil {
httpFilter.ConfigType = &hcm.HttpFilter_TypedConfig{TypedConfig: retVal}
}
}
IncrementEnvoyFilterMetric(lp.Key(), HttpFilter, applied)
}
}
func listenerMatch(listener *listener.Listener, lp *model.EnvoyFilterConfigPatchWrapper) bool {
lMatch := lp.Match.GetListener()
if lMatch == nil {
return true
}
if lMatch.Name != "" && lMatch.Name != listener.Name {
return false
}
// skip listener port check for special virtual inbound and outbound listeners
// to support portNumber listener filter field within those special listeners as well
if lp.ApplyTo != networking.EnvoyFilter_LISTENER &&
(listener.Name == model.VirtualInboundListenerName || listener.Name == model.VirtualOutboundListenerName) {
return true
}
// FIXME: Ports on a listener can be 0. the API only takes uint32 for ports
// We should either make that field in API as a wrapper type or switch to int
if lMatch.PortNumber != 0 {
sockAddr := listener.Address.GetSocketAddress()
if sockAddr == nil || sockAddr.GetPortValue() != lMatch.PortNumber {
return false
}
}
return true
}
// We assume that the parent listener has already been matched
func filterChainMatch(listener *listener.Listener, fc *listener.FilterChain, lp *model.EnvoyFilterConfigPatchWrapper) bool {
lMatch := lp.Match.GetListener()
if lMatch == nil {
return true
}
isVirtual := listener.Name == model.VirtualInboundListenerName || listener.Name == model.VirtualOutboundListenerName
// We only do this for virtual listeners, which will move the listener port into a FCM. For non-virtual listeners,
// we will handle this in the proper listener match.
if isVirtual && lMatch.GetPortNumber() > 0 && fc.GetFilterChainMatch().GetDestinationPort().GetValue() != lMatch.GetPortNumber() {
return false
}
match := lMatch.FilterChain
if match == nil {
return true
}
if match.Name != "" {
if match.Name != fc.Name {
return false
}
}
if match.Sni != "" {
if fc.FilterChainMatch == nil || len(fc.FilterChainMatch.ServerNames) == 0 {
return false
}
sniMatched := false
for _, sni := range fc.FilterChainMatch.ServerNames {
if sni == match.Sni {
sniMatched = true
break
}
}
if !sniMatched {
return false
}
}
if match.TransportProtocol != "" {
if fc.FilterChainMatch == nil || fc.FilterChainMatch.TransportProtocol != match.TransportProtocol {
return false
}
}
if match.ApplicationProtocols != "" {
if fc.FilterChainMatch == nil {
return false
}
for _, p := range strings.Split(match.ApplicationProtocols, ",") {
if !slices.Contains(fc.FilterChainMatch.ApplicationProtocols, p) {
return false
}
}
}
// check match for destination port within the FilterChainMatch
if match.DestinationPort > 0 {
if fc.FilterChainMatch == nil || fc.FilterChainMatch.DestinationPort == nil {
return false
} else if fc.FilterChainMatch.DestinationPort.Value != match.DestinationPort {
return false
}
}
return true
}
func hasListenerFilterMatch(lp *model.EnvoyFilterConfigPatchWrapper) bool {
lMatch := lp.Match.GetListener()
if lMatch == nil {
return false
}
return lMatch.ListenerFilter != ""
}
// We assume that the parent listener has already been matched
func listenerFilterMatch(filter *listener.ListenerFilter, cp *model.EnvoyFilterConfigPatchWrapper) bool {
if !hasListenerFilterMatch(cp) {
return true
}
return cp.Match.GetListener().ListenerFilter == filter.Name
}
func hasNetworkFilterMatch(lp *model.EnvoyFilterConfigPatchWrapper) bool {
lMatch := lp.Match.GetListener()
if lMatch == nil {
return false
}
fcMatch := lMatch.FilterChain
if fcMatch == nil {
return false
}
return fcMatch.Filter != nil
}
// We assume that the parent listener and filter chain have already been matched
func networkFilterMatch(filter *listener.Filter, cp *model.EnvoyFilterConfigPatchWrapper) bool {
if !hasNetworkFilterMatch(cp) {
return true
}
return cp.Match.GetListener().FilterChain.Filter.Name == filter.Name
}
func hasHTTPFilterMatch(lp *model.EnvoyFilterConfigPatchWrapper) bool {
if !hasNetworkFilterMatch(lp) {
return false
}
match := lp.Match.GetListener().FilterChain.Filter.SubFilter
return match != nil
}
// We assume that the parent listener and filter chain, and network filter have already been matched
func httpFilterMatch(filter *hcm.HttpFilter, lp *model.EnvoyFilterConfigPatchWrapper) bool {
if !hasHTTPFilterMatch(lp) {
return true
}
match := lp.Match.GetListener().FilterChain.Filter.SubFilter
return match.Name == filter.Name
}
func patchContextMatch(patchContext networking.EnvoyFilter_PatchContext,
lp *model.EnvoyFilterConfigPatchWrapper,
) bool {
return lp.Match.Context == patchContext || lp.Match.Context == networking.EnvoyFilter_ANY
}
func commonConditionMatch(patchContext networking.EnvoyFilter_PatchContext,
lp *model.EnvoyFilterConfigPatchWrapper,
) bool {
return patchContextMatch(patchContext, lp)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envoyfilter
import (
"fmt"
"testing"
"time"
udpa "github.com/cncf/xds/go/udpa/type/v1"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
fault "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
redis "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/redis_proxy/v3"
tcp_proxy "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
"github.com/golang/protobuf/ptypes/wrappers"
"github.com/google/go-cmp/cmp"
"google.golang.org/protobuf/testing/protocmp"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/wrapperspb"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/config/memory"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
memregistry "istio.io/istio/pilot/pkg/serviceregistry/memory"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/log"
istio_proto "istio.io/istio/pkg/proto"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/wellknown"
)
var testMesh = &meshconfig.MeshConfig{
ConnectTimeout: &durationpb.Duration{
Seconds: 10,
Nanos: 1,
},
}
func buildEnvoyFilterConfigStore(configPatches []*networking.EnvoyFilter_EnvoyConfigObjectPatch) model.ConfigStore {
store := memory.Make(collections.Pilot)
for i, cp := range configPatches {
_, err := store.Create(config.Config{
Meta: config.Meta{
Name: fmt.Sprintf("test-envoyfilter-%d", i),
Namespace: "not-default",
GroupVersionKind: gvk.EnvoyFilter,
},
Spec: &networking.EnvoyFilter{
ConfigPatches: []*networking.EnvoyFilter_EnvoyConfigObjectPatch{cp},
},
})
if err != nil {
log.Errorf("create envoyfilter failed %v", err)
}
}
return store
}
func buildEnvoyFilterConfigStoreWithPriorities(configPatches []*networking.EnvoyFilter_EnvoyConfigObjectPatch, priorities []int32) model.ConfigStore {
store := memory.Make(collections.Pilot)
for i, cp := range configPatches {
_, err := store.Create(config.Config{
Meta: config.Meta{
Name: fmt.Sprintf("test-envoyfilter-%d", i),
Namespace: "not-default",
GroupVersionKind: gvk.EnvoyFilter,
},
Spec: &networking.EnvoyFilter{
ConfigPatches: []*networking.EnvoyFilter_EnvoyConfigObjectPatch{cp},
Priority: priorities[i],
},
})
if err != nil {
log.Errorf("create envoyfilter failed %v", err)
}
}
return store
}
func buildPatchStruct(config string) *structpb.Struct {
val := &structpb.Struct{}
_ = protomarshal.UnmarshalString(config, val)
return val
}
// nolint: unparam
func buildGolangPatchStruct(config string) *structpb.Struct {
val := &structpb.Struct{}
_ = protomarshal.Unmarshal([]byte(config), val)
return val
}
func newTestEnvironment(serviceDiscovery model.ServiceDiscovery, meshConfig *meshconfig.MeshConfig,
configStore model.ConfigStore,
) *model.Environment {
e := &model.Environment{
ServiceDiscovery: serviceDiscovery,
ConfigStore: configStore,
Watcher: mesh.NewFixedWatcher(meshConfig),
}
pushContext := model.NewPushContext()
e.Init()
_ = pushContext.InitContext(e, nil, nil)
e.SetPushContext(pushContext)
return e
}
func TestApplyListenerPatches(t *testing.T) {
configPatchesPriorities := []*networking.EnvoyFilter_EnvoyConfigObjectPatch{
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_GATEWAY,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter-to-be-removed-then-add"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_ADD,
Value: buildPatchStruct(`{"name":"http-filter-to-be-removed-then-add"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_GATEWAY,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter-to-be-removed-then-add"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
}
priorities := []int32{
2, 1,
}
configPatches := []*networking.EnvoyFilter_EnvoyConfigObjectPatch{
{
ApplyTo: networking.EnvoyFilter_LISTENER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
Proxy: &networking.EnvoyFilter_ProxyMatch{
Metadata: map[string]string{"foo": "sidecar"},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_ADD,
Value: buildPatchStruct(`{"name":"new-outbound-listener1"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{Name: "filter1"},
},
},
},
Proxy: &networking.EnvoyFilter_ProxyMatch{
ProxyVersion: `^1\.[2-9](.*?)$`,
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_BEFORE,
Value: buildPatchStruct(`{"name":"filter0"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{Name: "filter1"},
},
},
},
Proxy: &networking.EnvoyFilter_ProxyMatch{
ProxyVersion: `^1\.[5-9](.*?)$`,
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_FIRST,
Value: buildPatchStruct(`{"name":"filter0"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{Name: "filter2"},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
{
ApplyTo: networking.EnvoyFilter_LISTENER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
{
ApplyTo: networking.EnvoyFilter_LISTENER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`{listener_filters: nil}`),
},
},
{
ApplyTo: networking.EnvoyFilter_FILTER_CHAIN,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{TransportProtocol: "tls"},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
{
ApplyTo: networking.EnvoyFilter_LISTENER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_GATEWAY,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`{"listener_filters": [{"name":"foo"}]}`),
},
},
{
ApplyTo: networking.EnvoyFilter_FILTER_CHAIN,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_GATEWAY,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Sni: "*.foo.com",
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`{"filter_chain_match": { "server_names": ["foo.com"] }}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{Name: wellknown.RedisProxy},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`
{"name": "envoy.filters.network.redis_proxy",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy",
"settings": {"op_timeout": "0.2s"}
}
}`),
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_GATEWAY,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Sni: "*.foo.com",
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter2"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_AFTER,
Value: buildPatchStruct(`{"name": "http-filter3"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_GATEWAY,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter-to-be-removed-then-add"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_GATEWAY,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter-to-be-removed-then-add"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_ADD,
Value: buildPatchStruct(`{"name":"http-filter-to-be-removed-then-add"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter2"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_BEFORE,
Value: buildPatchStruct(`{"name": "http-filter3"}`),
},
},
// Remove inline http filter patch
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter3"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_BEFORE,
Value: buildPatchStruct(`{"name": "http-filter-to-be-removed2"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter-to-be-removed2"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter2"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_AFTER,
Value: buildPatchStruct(`{"name": "http-filter4"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter-to-be-removed"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
// remove then add
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter-to-be-removed-then-add"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter-to-be-removed-then-add"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_ADD,
Value: buildPatchStruct(`{"name":"http-filter-to-be-removed-then-add"}`),
},
},
// Merge v3 any with v2 any
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "envoy.filters.http.fault"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`
{"name": "envoy.filters.http.fault",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault",
"downstreamNodes": ["foo"]
}
}`),
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter2"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_FIRST,
Value: buildPatchStruct(`{"name": "http-filter0"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{
Name: wellknown.Fault,
},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`{"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.fault.v3.HTTPFault",
"upstream_cluster": "scooby"}}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`
{"name": "envoy.filters.network.http_connection_manager",
"typed_config": {
"@type": "type.googleapis.com/type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"xffNumTrustedHops": "4"
}
}`),
},
},
// Ensure we can mix v3 patches with v2 internal
// Note that alwaysSetRequestIdInResponse is only present in v3 protos. It will be silently ignored
// as we are working in v2 protos internally
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`
{"name": "envoy.filters.network.http_connection_manager",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"mergeSlashes": true,
"alwaysSetRequestIdInResponse": true
}
}`),
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{Name: "http-filter2"},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_AFTER,
Value: buildPatchStruct(`{"name": "http-filter-5"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 6379,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: "filter1",
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REPLACE,
Value: buildPatchStruct(`
{"name": "envoy.redis_proxy",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy",
"stat_prefix": "redis_stats",
"prefix_routes": {
"catch_all_route": {
"cluster": "custom-redis-cluster"
}
}
}
}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 6381,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: "default-network-filter",
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REPLACE,
Value: buildPatchStruct(`{"name": "default-network-filter-replaced"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 6381,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: "default-network-filter-removed",
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
// This patch should not be applied because network filter name doesn't match
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 6380,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: "network-filter-should-not-be-replaced-not-match",
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REPLACE,
Value: buildPatchStruct(`{"name": "network-filter-replaced-should-not-be-applied"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
Name: "listener-http-filter-to-be-replaced",
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{
Name: "http-filter-to-be-replaced",
},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REPLACE,
Value: buildPatchStruct(`{"name": "http-filter-replaced"}`),
},
},
// This patch should not be applied because the subfilter name doesn't match
{
ApplyTo: networking.EnvoyFilter_HTTP_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
Name: "listener-http-filter-to-be-replaced-not-found",
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: wellknown.HTTPConnectionManager,
SubFilter: &networking.EnvoyFilter_ListenerMatch_SubFilterMatch{
Name: "http-filter-should-not-be-replaced-not-match",
},
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REPLACE,
Value: buildPatchStruct(`{"name": "http-filter-replaced-should-not-be-applied"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
Name: model.VirtualInboundListenerName,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
DestinationPort: 6380,
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: "network-filter-to-be-replaced",
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REPLACE,
Value: buildPatchStruct(`
{"name": "envoy.redis_proxy",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.redis_proxy.v3.RedisProxy",
"stat_prefix": "redis_stats",
"prefix_routes": {
"catch_all_route": {
"cluster": "custom-redis-cluster"
}
}
}
}`),
},
},
{
ApplyTo: networking.EnvoyFilter_FILTER_CHAIN,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{TransportProtocol: "tls"},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`
{"transport_socket":{
"name":"envoy.transport_sockets.tls",
"typed_config":{
"@type":"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext",
"common_tls_context":{
"tls_params":{
"tls_maximum_protocol_version":"TLSv1_3",
"tls_minimum_protocol_version":"TLSv1_2"}}}}}`),
},
},
{
ApplyTo: networking.EnvoyFilter_FILTER_CHAIN,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{ApplicationProtocols: "http/1.1"},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
{
ApplyTo: networking.EnvoyFilter_LISTENER_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
{
ApplyTo: networking.EnvoyFilter_LISTENER_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_ADD,
Value: buildPatchStruct(`{"name":"http-filter-to-be-removed-then-add"}`),
},
},
// Patch custom TLS type
{
ApplyTo: networking.EnvoyFilter_FILTER_CHAIN,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 7777,
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`
{"transport_socket":{
"name":"transport_sockets.alts",
"typed_config":{
"@type":"type.googleapis.com/udpa.type.v1.TypedStruct",
"type_url": "type.googleapis.com/envoy.extensions.transport_sockets.alts.v3.Alts",
"value":{"handshaker_service":"1.2.3.4"}}}}`),
},
},
// Patch custom TLS type to a FC without TLS already set
{
ApplyTo: networking.EnvoyFilter_FILTER_CHAIN,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 7778,
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`
{"transport_socket":{
"name":"transport_sockets.alts",
"typed_config":{
"@type":"type.googleapis.com/udpa.type.v1.TypedStruct",
"type_url": "type.googleapis.com/envoy.extensions.transport_sockets.alts.v3.Alts",
"value":{"handshaker_service":"1.2.3.4"}}}}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
Name: model.VirtualInboundListenerName,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Name: "filter-chain-name-match",
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: "custom-network-filter-2",
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
// Remove inline network filter patch
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
Name: model.VirtualInboundListenerName,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Name: "filter-chain-name-match",
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: "custom-network-filter-1",
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_BEFORE,
Value: buildPatchStruct(`{"name":"network-filter-to-be-removed"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_NETWORK_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
Name: model.VirtualInboundListenerName,
FilterChain: &networking.EnvoyFilter_ListenerMatch_FilterChainMatch{
Name: "filter-chain-name-match",
Filter: &networking.EnvoyFilter_ListenerMatch_FilterMatch{
Name: "network-filter-to-be-removed",
},
},
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
// Add transport socket to virtual inbound.
{
ApplyTo: networking.EnvoyFilter_FILTER_CHAIN,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 80,
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`
{"transport_socket":{
"name":"envoy.transport_sockets.tls",
"typed_config":{
"@type":"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext",
"common_tls_context":{
"alpn_protocols": [ "h2-80", "http/1.1-80" ]}}}}`),
},
},
{
ApplyTo: networking.EnvoyFilter_FILTER_CHAIN,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_INBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 6380,
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_MERGE,
Value: buildPatchStruct(`
{"transport_socket":{
"name":"envoy.transport_sockets.tls",
"typed_config":{
"@type":"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext",
"common_tls_context":{
"alpn_protocols": [ "h2-6380", "http/1.1-6380" ]}}}}`),
},
},
{
ApplyTo: networking.EnvoyFilter_LISTENER_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_ADD,
Value: buildPatchStruct(`{"name":"envoy.filters.listener.proxy_protocol"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_LISTENER_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
ListenerFilter: "envoy.filters.listener.proxy_protocol",
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_BEFORE,
Value: buildPatchStruct(`{"name":"before proxy_protocol"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_LISTENER_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 12345,
ListenerFilter: "envoy.filters.listener.proxy_protocol",
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_INSERT_AFTER,
Value: buildPatchStruct(`{"name":"after proxy_protocol"}`),
},
},
{
ApplyTo: networking.EnvoyFilter_LISTENER_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 6381,
ListenerFilter: "filter-to-be-removed",
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REMOVE,
},
},
{
ApplyTo: networking.EnvoyFilter_LISTENER_FILTER,
Match: &networking.EnvoyFilter_EnvoyConfigObjectMatch{
Context: networking.EnvoyFilter_SIDECAR_OUTBOUND,
ObjectTypes: &networking.EnvoyFilter_EnvoyConfigObjectMatch_Listener{
Listener: &networking.EnvoyFilter_ListenerMatch{
PortNumber: 6381,
ListenerFilter: "filter-before-replace",
},
},
},
Patch: &networking.EnvoyFilter_Patch{
Operation: networking.EnvoyFilter_Patch_REPLACE,
Value: buildPatchStruct(`{"name":"filter-after-replace"}`),
},
},
}
sidecarOutboundIn := []*listener.Listener{
{
Name: "12345",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 12345,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{TransportProtocol: "tls"},
TransportSocket: &core.TransportSocket{
Name: "envoy.transport_sockets.tls",
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tls.DownstreamTlsContext{
CommonTlsContext: &tls.CommonTlsContext{
TlsParams: &tls.TlsParameters{
EcdhCurves: []string{"X25519"},
TlsMaximumProtocolVersion: tls.TlsParameters_TLSv1_1,
},
},
}),
},
},
Filters: []*listener.Filter{{Name: "envoy.transport_sockets.tls"}},
},
{
FilterChainMatch: &listener.FilterChainMatch{ApplicationProtocols: []string{"http/1.1", "h2c"}},
Filters: []*listener.Filter{
{Name: "filter1"},
},
},
{
Filters: []*listener.Filter{
{Name: "filter1"},
{Name: "filter2"},
},
},
},
},
{
Name: "network-filter-to-be-replaced",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 6379,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Filters: []*listener.Filter{
{Name: "filter1"},
{Name: "filter2"},
},
},
},
},
{
Name: "redis-proxy",
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrapperspb.UInt32Value{
Value: 9999,
},
},
Filters: []*listener.Filter{
{
Name: wellknown.RedisProxy,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&redis.RedisProxy{
Settings: &redis.RedisProxy_ConnPoolSettings{
OpTimeout: durationpb.New(time.Second * 5),
},
}),
},
},
},
},
},
},
{
Name: "network-filter-to-be-replaced-not-found",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 6380,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Filters: []*listener.Filter{
{Name: "network-filter-should-not-be-replaced"},
},
},
},
},
{
Name: "default-filter-chain",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 6381,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Filters: []*listener.Filter{
{Name: "network-filter"},
},
},
},
DefaultFilterChain: &listener.FilterChain{
Filters: []*listener.Filter{
{Name: "default-network-filter"},
{Name: "default-network-filter-removed"},
},
},
ListenerFilters: []*listener.ListenerFilter{
{
Name: "filter-to-be-removed",
},
{
Name: "filter-before-replace",
},
},
},
{
Name: "another-listener",
},
{
Name: "listener-http-filter-to-be-replaced",
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrapperspb.UInt32Value{
Value: 80,
},
},
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{
{Name: "http-filter-to-be-replaced"},
{Name: "another-http-filter"},
},
}),
},
},
},
},
},
},
{
Name: "listener-http-filter-to-be-replaced-not-found",
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrapperspb.UInt32Value{
Value: 80,
},
},
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{
{Name: "http-filter-should-not-be-replaced"},
{Name: "another-http-filter"},
},
}),
},
},
},
},
},
},
{
Name: "custom-tls-replacement",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 7777,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
TransportSocket: &core.TransportSocket{
Name: "envoy.transport_sockets.tls",
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tls.DownstreamTlsContext{
CommonTlsContext: &tls.CommonTlsContext{
TlsParams: &tls.TlsParameters{},
},
}),
},
},
Filters: []*listener.Filter{{Name: "filter"}},
},
},
},
{
Name: "custom-tls-addition",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 7778,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Filters: []*listener.Filter{{Name: "filter"}},
},
},
},
}
sidecarOutboundOut := []*listener.Listener{
{
Name: "12345",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 12345,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{TransportProtocol: "tls"},
TransportSocket: &core.TransportSocket{
Name: "envoy.transport_sockets.tls",
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tls.DownstreamTlsContext{
CommonTlsContext: &tls.CommonTlsContext{
TlsParams: &tls.TlsParameters{
EcdhCurves: []string{"X25519"},
TlsMaximumProtocolVersion: tls.TlsParameters_TLSv1_3,
TlsMinimumProtocolVersion: tls.TlsParameters_TLSv1_2,
},
},
}),
},
},
Filters: []*listener.Filter{{Name: "envoy.transport_sockets.tls"}},
},
{
Filters: []*listener.Filter{
{Name: "filter0"},
{Name: "filter1"},
},
},
},
ListenerFilters: []*listener.ListenerFilter{
{
Name: "http-filter-to-be-removed-then-add",
},
{
Name: "before proxy_protocol",
},
{
Name: "envoy.filters.listener.proxy_protocol",
},
{
Name: "after proxy_protocol",
},
},
},
{
Name: "network-filter-to-be-replaced",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 6379,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Filters: []*listener.Filter{
{
Name: "envoy.redis_proxy",
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&redis.RedisProxy{
StatPrefix: "redis_stats",
PrefixRoutes: &redis.RedisProxy_PrefixRoutes{
CatchAllRoute: &redis.RedisProxy_PrefixRoutes_Route{
Cluster: "custom-redis-cluster",
},
},
}),
},
},
{Name: "filter2"},
},
},
},
},
{
Name: "redis-proxy",
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrapperspb.UInt32Value{
Value: 9999,
},
},
Filters: []*listener.Filter{
{
Name: wellknown.RedisProxy,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&redis.RedisProxy{
Settings: &redis.RedisProxy_ConnPoolSettings{
OpTimeout: durationpb.New(time.Millisecond * 200),
},
}),
},
},
},
},
},
},
{
Name: "network-filter-to-be-replaced-not-found",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 6380,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Filters: []*listener.Filter{
{Name: "network-filter-should-not-be-replaced"},
},
},
},
},
{
Name: "default-filter-chain",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 6381,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Filters: []*listener.Filter{
{Name: "network-filter"},
},
},
},
DefaultFilterChain: &listener.FilterChain{
Filters: []*listener.Filter{
{Name: "default-network-filter-replaced"},
},
},
ListenerFilters: []*listener.ListenerFilter{
{
Name: "filter-after-replace",
},
},
},
{
Name: "another-listener",
},
{
Name: "listener-http-filter-to-be-replaced",
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrapperspb.UInt32Value{
Value: 80,
},
},
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{
{Name: "http-filter-replaced"},
{Name: "another-http-filter"},
},
}),
},
},
},
},
},
},
{
Name: "listener-http-filter-to-be-replaced-not-found",
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrapperspb.UInt32Value{
Value: 80,
},
},
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{
{Name: "http-filter-should-not-be-replaced"},
{Name: "another-http-filter"},
},
}),
},
},
},
},
},
},
{
Name: "custom-tls-replacement",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 7777,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
TransportSocket: &core.TransportSocket{
Name: "transport_sockets.alts",
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&udpa.TypedStruct{
TypeUrl: "type.googleapis.com/envoy.extensions.transport_sockets.alts.v3.Alts",
Value: buildGolangPatchStruct(`{"handshaker_service":"1.2.3.4"}`),
}),
},
},
Filters: []*listener.Filter{{Name: "filter"}},
},
},
},
{
Name: "custom-tls-addition",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 7778,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
TransportSocket: &core.TransportSocket{
Name: "transport_sockets.alts",
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&udpa.TypedStruct{
TypeUrl: "type.googleapis.com/envoy.extensions.transport_sockets.alts.v3.Alts",
Value: buildGolangPatchStruct(`{"handshaker_service":"1.2.3.4"}`),
}),
},
},
Filters: []*listener.Filter{{Name: "filter"}},
},
},
},
{
Name: "new-outbound-listener1",
},
}
sidecarOutboundInNoAdd := []*listener.Listener{
{
Name: "12345",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 12345,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{TransportProtocol: "tls"},
TransportSocket: &core.TransportSocket{
Name: "envoy.transport_sockets.tls",
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tls.DownstreamTlsContext{}),
},
},
Filters: []*listener.Filter{{Name: "envoy.transport_sockets.tls"}},
},
{
FilterChainMatch: &listener.FilterChainMatch{ApplicationProtocols: []string{"http/1.1", "h2c"}},
Filters: []*listener.Filter{
{Name: "filter1"},
},
},
{
Filters: []*listener.Filter{
{Name: "filter1"},
{Name: "filter2"},
},
},
},
},
{
Name: "another-listener",
},
}
sidecarOutboundOutNoAdd := []*listener.Listener{
{
Name: "12345",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 12345,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{TransportProtocol: "tls"},
TransportSocket: &core.TransportSocket{
Name: "envoy.transport_sockets.tls",
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tls.DownstreamTlsContext{
CommonTlsContext: &tls.CommonTlsContext{
TlsParams: &tls.TlsParameters{
TlsMaximumProtocolVersion: tls.TlsParameters_TLSv1_3,
TlsMinimumProtocolVersion: tls.TlsParameters_TLSv1_2,
},
},
}),
},
},
Filters: []*listener.Filter{{Name: "envoy.transport_sockets.tls"}},
},
{
Filters: []*listener.Filter{
{Name: "filter0"},
{Name: "filter1"},
},
},
},
ListenerFilters: []*listener.ListenerFilter{
{
Name: "http-filter-to-be-removed-then-add",
},
{
Name: "before proxy_protocol",
},
{
Name: "envoy.filters.listener.proxy_protocol",
},
{
Name: "after proxy_protocol",
},
},
},
{
Name: "another-listener",
},
}
faultFilterIn := &fault.HTTPFault{
UpstreamCluster: "foobar",
}
faultFilterInAny := protoconv.MessageToAny(faultFilterIn)
faultFilterOut := &fault.HTTPFault{
UpstreamCluster: "scooby",
DownstreamNodes: []string{"foo"},
}
faultFilterOutAny := protoconv.MessageToAny(faultFilterOut)
gatewayIn := []*listener.Listener{
{
Name: "80",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 80,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{
ServerNames: []string{"match.com", "*.foo.com"},
},
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{
{Name: "http-filter1"},
{Name: "http-filter2"},
},
}),
},
},
},
},
},
},
{
Name: "another-listener",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 443,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{
ServerNames: []string{"nomatch.com", "*.foo.com"},
},
Filters: []*listener.Filter{{Name: "network-filter"}},
},
},
},
}
gatewayOut := []*listener.Listener{
{
Name: "80",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 80,
},
},
},
},
ListenerFilters: []*listener.ListenerFilter{{Name: "foo"}},
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{
ServerNames: []string{"match.com", "*.foo.com", "foo.com"},
},
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{
{Name: "http-filter1"},
{Name: "http-filter2"},
{Name: "http-filter3"},
{Name: "http-filter-to-be-removed-then-add"},
},
}),
},
},
},
},
},
},
{
Name: "another-listener",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 443,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
FilterChainMatch: &listener.FilterChainMatch{
ServerNames: []string{"nomatch.com", "*.foo.com"},
},
Filters: []*listener.Filter{{Name: "network-filter"}},
},
},
},
}
gatewayPriorityIn := []*listener.Listener{
{
Name: "80",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 80,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{},
}),
},
},
},
},
},
},
}
gatewayPriorityOut := []*listener.Listener{
{
Name: "80",
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 80,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{
{Name: "http-filter-to-be-removed-then-add"},
},
}),
},
},
},
},
},
},
}
sidecarVirtualInboundIn := []*listener.Listener{
{
Name: model.VirtualInboundListenerName,
UseOriginalDst: istio_proto.BoolTrue,
TrafficDirection: core.TrafficDirection_INBOUND,
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 15006,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Name: "virtualInbound-blackhole",
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrappers.UInt32Value{
Value: 15006,
},
},
Filters: []*listener.Filter{
{
Name: wellknown.TCPProxy,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tcp_proxy.TcpProxy{
StatPrefix: util.BlackHoleCluster,
ClusterSpecifier: &tcp_proxy.TcpProxy_Cluster{Cluster: util.BlackHoleCluster},
}),
},
},
},
},
{
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrapperspb.UInt32Value{
Value: 80,
},
},
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{
{
Name: wellknown.Fault,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: faultFilterInAny},
},
{Name: "http-filter2"},
{Name: "http-filter-to-be-removed"},
},
}),
},
},
},
},
{
FilterChainMatch: &listener.FilterChainMatch{
AddressSuffix: "0.0.0.0",
},
Filters: []*listener.Filter{
{Name: "network-filter-should-not-be-replaced"},
},
},
{
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrapperspb.UInt32Value{
Value: 6380,
},
},
Filters: []*listener.Filter{
{Name: "network-filter-to-be-replaced"},
},
},
{
Name: "filter-chain-name-not-match",
Filters: []*listener.Filter{
{Name: "custom-network-filter-1"},
{Name: "custom-network-filter-2"},
},
},
{
Name: "filter-chain-name-match",
Filters: []*listener.Filter{
{Name: "custom-network-filter-1"},
{Name: "custom-network-filter-2"},
},
},
{
Name: "catch-all",
FilterChainMatch: &listener.FilterChainMatch{},
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{
{Name: "base"},
},
}),
},
},
},
},
},
},
}
sidecarVirtualInboundOut := []*listener.Listener{
{
Name: model.VirtualInboundListenerName,
UseOriginalDst: istio_proto.BoolTrue,
TrafficDirection: core.TrafficDirection_INBOUND,
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: 15006,
},
},
},
},
FilterChains: []*listener.FilterChain{
{
Name: "virtualInbound-blackhole",
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrappers.UInt32Value{
Value: 15006,
},
},
Filters: []*listener.Filter{
{
Name: wellknown.TCPProxy,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tcp_proxy.TcpProxy{
StatPrefix: util.BlackHoleCluster,
ClusterSpecifier: &tcp_proxy.TcpProxy_Cluster{Cluster: util.BlackHoleCluster},
}),
},
},
},
},
{
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrapperspb.UInt32Value{
Value: 80,
},
},
TransportSocket: &core.TransportSocket{
Name: "envoy.transport_sockets.tls",
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tls.DownstreamTlsContext{
CommonTlsContext: &tls.CommonTlsContext{
AlpnProtocols: []string{"h2-80", "http/1.1-80"},
},
}),
},
},
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
XffNumTrustedHops: 4,
MergeSlashes: true,
AlwaysSetRequestIdInResponse: true,
HttpFilters: []*hcm.HttpFilter{
{Name: "http-filter0"},
{
Name: wellknown.Fault,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: faultFilterOutAny},
},
{Name: "http-filter3"},
{Name: "http-filter2"},
{Name: "http-filter-5"},
{Name: "http-filter4"},
{Name: "http-filter-to-be-removed-then-add"},
},
}),
},
},
},
},
{
FilterChainMatch: &listener.FilterChainMatch{
AddressSuffix: "0.0.0.0",
},
Filters: []*listener.Filter{
{Name: "network-filter-should-not-be-replaced"},
},
},
{
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrapperspb.UInt32Value{
Value: 6380,
},
},
TransportSocket: &core.TransportSocket{
Name: "envoy.transport_sockets.tls",
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tls.DownstreamTlsContext{
CommonTlsContext: &tls.CommonTlsContext{
AlpnProtocols: []string{"h2-6380", "http/1.1-6380"},
},
}),
},
},
Filters: []*listener.Filter{
{
Name: "envoy.redis_proxy",
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&redis.RedisProxy{
StatPrefix: "redis_stats",
PrefixRoutes: &redis.RedisProxy_PrefixRoutes{
CatchAllRoute: &redis.RedisProxy_PrefixRoutes_Route{
Cluster: "custom-redis-cluster",
},
},
}),
},
},
},
},
{
Name: "filter-chain-name-not-match",
Filters: []*listener.Filter{
{Name: "custom-network-filter-1"},
{Name: "custom-network-filter-2"},
},
},
{
Name: "filter-chain-name-match",
Filters: []*listener.Filter{
{Name: "custom-network-filter-1"},
},
},
{
Name: "catch-all",
FilterChainMatch: &listener.FilterChainMatch{},
Filters: []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: []*hcm.HttpFilter{
{Name: "base"},
},
}),
},
},
},
},
},
},
}
sidecarProxy := &model.Proxy{
Type: model.SidecarProxy,
ConfigNamespace: "not-default",
Metadata: &model.NodeMetadata{
IstioVersion: "1.2.2",
Raw: map[string]any{
"foo": "sidecar",
"bar": "proxy",
},
},
}
gatewayProxy := &model.Proxy{
Type: model.Router,
ConfigNamespace: "not-default",
Metadata: &model.NodeMetadata{
IstioVersion: "1.2.2",
Raw: map[string]any{
"foo": "sidecar",
"bar": "proxy",
},
},
}
serviceDiscovery := memregistry.NewServiceDiscovery()
e := newTestEnvironment(serviceDiscovery, testMesh, buildEnvoyFilterConfigStore(configPatches))
push := model.NewPushContext()
_ = push.InitContext(e, nil, nil)
// Test different priorities
ep := newTestEnvironment(serviceDiscovery, testMesh, buildEnvoyFilterConfigStoreWithPriorities(configPatchesPriorities, priorities))
pushPriorities := model.NewPushContext()
_ = pushPriorities.InitContext(ep, nil, nil)
type args struct {
patchContext networking.EnvoyFilter_PatchContext
proxy *model.Proxy
push *model.PushContext
listeners []*listener.Listener
skipAdds bool
}
tests := []struct {
name string
args args
want []*listener.Listener
}{
{
name: "gateway lds",
args: args{
patchContext: networking.EnvoyFilter_GATEWAY,
proxy: gatewayProxy,
push: push,
listeners: gatewayIn,
skipAdds: false,
},
want: gatewayOut,
},
{
name: "gateway lds with priority",
args: args{
patchContext: networking.EnvoyFilter_GATEWAY,
proxy: gatewayProxy,
push: pushPriorities,
listeners: gatewayPriorityIn,
skipAdds: false,
},
want: gatewayPriorityOut,
},
{
name: "sidecar outbound lds",
args: args{
patchContext: networking.EnvoyFilter_SIDECAR_OUTBOUND,
proxy: sidecarProxy,
push: push,
listeners: sidecarOutboundIn,
skipAdds: false,
},
want: sidecarOutboundOut,
},
{
name: "sidecar outbound lds - skip adds",
args: args{
patchContext: networking.EnvoyFilter_SIDECAR_OUTBOUND,
proxy: sidecarProxy,
push: push,
listeners: sidecarOutboundInNoAdd,
skipAdds: true,
},
want: sidecarOutboundOutNoAdd,
},
{
name: "sidecar inbound virtual",
args: args{
patchContext: networking.EnvoyFilter_SIDECAR_INBOUND,
proxy: sidecarProxy,
push: push,
listeners: sidecarVirtualInboundIn,
skipAdds: false,
},
want: sidecarVirtualInboundOut,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := ApplyListenerPatches(tt.args.patchContext, tt.args.push.EnvoyFilters(tt.args.proxy),
tt.args.listeners, tt.args.skipAdds)
if diff := cmp.Diff(tt.want, got, protocmp.Transform()); diff != "" {
t.Errorf("ApplyListenerPatches(): %s mismatch (-want +got):\n%s", tt.name, diff)
}
})
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envoyfilter
import (
"sync"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/monitoring"
)
type Result string
const (
Error Result = "error"
Applied Result = "applied"
)
type PatchType string
const (
Cluster PatchType = "cluster"
Listener PatchType = "listener"
ListenerFilter PatchType = "listenerfilter"
FilterChain PatchType = "filterchain"
NetworkFilter PatchType = "networkfilter"
// nolint
HttpFilter PatchType = "httpfilter"
Route PatchType = "route"
VirtualHost PatchType = "vhost"
)
var (
patchType = monitoring.CreateLabel("patch")
resultType = monitoring.CreateLabel("result")
nameType = monitoring.CreateLabel("name")
envoyFilterStatus = monitoring.NewGauge(
"pilot_envoy_filter_status",
"Status of Envoy filters whether it was applied or errored.",
monitoring.WithEnabled(func() bool {
return features.EnableEnvoyFilterMetrics
}),
)
)
var (
envoyFilterStatusMap = map[string]map[string]bool{} // Map of Envoy filter name, patch and status.
envoyFilterMutex sync.RWMutex
)
// IncrementEnvoyFilterMetric increments filter metric.
func IncrementEnvoyFilterMetric(name string, pt PatchType, applied bool) {
if !features.EnableEnvoyFilterMetrics {
return
}
envoyFilterMutex.Lock()
defer envoyFilterMutex.Unlock()
if _, exists := envoyFilterStatusMap[name]; !exists {
envoyFilterStatusMap[name] = make(map[string]bool)
}
if applied {
envoyFilterStatusMap[name][string(pt)] = true
}
}
// IncrementEnvoyFilterErrorMetric increments filter metric for errors.
func IncrementEnvoyFilterErrorMetric(pt PatchType) {
if !features.EnableEnvoyFilterMetrics {
return
}
envoyFilterStatus.With(patchType.Value(string(pt))).With(resultType.Value(string(Error))).Record(1)
}
func RecordMetrics() {
if !features.EnableEnvoyFilterMetrics {
return
}
envoyFilterMutex.RLock()
defer envoyFilterMutex.RUnlock()
for name, pmap := range envoyFilterStatusMap {
for pt, applied := range pmap {
if applied {
envoyFilterStatus.With(nameType.Value(name)).With(patchType.Value(pt)).
With(resultType.Value(string(Applied))).Record(1)
} else {
envoyFilterStatus.With(nameType.Value(name)).With(patchType.Value(pt)).
With(resultType.Value(string(Applied))).Record(0)
}
}
}
envoyFilterStatusMap = make(map[string]map[string]bool)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envoyfilter
import (
"strconv"
"strings"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
"google.golang.org/protobuf/proto"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/util/runtime"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/proto/merge"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
func ApplyRouteConfigurationPatches(
patchContext networking.EnvoyFilter_PatchContext,
proxy *model.Proxy,
efw *model.EnvoyFilterWrapper,
routeConfiguration *route.RouteConfiguration,
) (out *route.RouteConfiguration) {
defer runtime.HandleCrash(runtime.LogPanic, func(any) {
IncrementEnvoyFilterErrorMetric(Route)
log.Errorf("route patch %s/%s caused panic, so the patches did not take effect", efw.Namespace, efw.Name)
})
// In case the patches cause panic, use the route generated before to reduce the influence.
out = routeConfiguration
if efw == nil {
return out
}
var portMap model.GatewayPortMap
if proxy.MergedGateway != nil {
portMap = proxy.MergedGateway.PortMap
}
// only merge is applicable for route configuration.
for _, rp := range efw.Patches[networking.EnvoyFilter_ROUTE_CONFIGURATION] {
if rp.Operation != networking.EnvoyFilter_Patch_MERGE {
continue
}
if commonConditionMatch(patchContext, rp) &&
routeConfigurationMatch(patchContext, routeConfiguration, rp, portMap) {
merge.Merge(routeConfiguration, rp.Value)
IncrementEnvoyFilterMetric(rp.Key(), Route, true)
} else {
IncrementEnvoyFilterMetric(rp.Key(), Route, false)
}
}
patchVirtualHosts(patchContext, efw.Patches, routeConfiguration, portMap)
return routeConfiguration
}
func patchVirtualHosts(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
routeConfiguration *route.RouteConfiguration, portMap model.GatewayPortMap,
) {
removedVirtualHosts := sets.New[string]()
// first do removes/merges/replaces
for i := range routeConfiguration.VirtualHosts {
if patchVirtualHost(patchContext, patches, routeConfiguration, routeConfiguration.VirtualHosts, i, portMap) {
removedVirtualHosts.Insert(routeConfiguration.VirtualHosts[i].Name)
}
}
// now for the adds
for _, rp := range patches[networking.EnvoyFilter_VIRTUAL_HOST] {
if rp.Operation != networking.EnvoyFilter_Patch_ADD {
continue
}
if commonConditionMatch(patchContext, rp) &&
routeConfigurationMatch(patchContext, routeConfiguration, rp, portMap) {
routeConfiguration.VirtualHosts = append(routeConfiguration.VirtualHosts, proto.Clone(rp.Value).(*route.VirtualHost))
IncrementEnvoyFilterMetric(rp.Key(), VirtualHost, true)
} else {
IncrementEnvoyFilterMetric(rp.Key(), VirtualHost, false)
}
}
if removedVirtualHosts.Len() > 0 {
routeConfiguration.VirtualHosts = slices.FilterInPlace(routeConfiguration.VirtualHosts, func(virtualHost *route.VirtualHost) bool {
return !removedVirtualHosts.Contains(virtualHost.Name)
})
}
}
// patchVirtualHost patches passed in virtual host if it is MERGE operation.
// The return value indicates whether the virtual host has been removed for REMOVE operations.
func patchVirtualHost(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
routeConfiguration *route.RouteConfiguration, virtualHosts []*route.VirtualHost,
idx int, portMap model.GatewayPortMap,
) bool {
for _, rp := range patches[networking.EnvoyFilter_VIRTUAL_HOST] {
applied := false
if commonConditionMatch(patchContext, rp) &&
routeConfigurationMatch(patchContext, routeConfiguration, rp, portMap) &&
virtualHostMatch(virtualHosts[idx], rp) {
applied = true
if rp.Operation == networking.EnvoyFilter_Patch_REMOVE {
return true
} else if rp.Operation == networking.EnvoyFilter_Patch_MERGE {
merge.Merge(virtualHosts[idx], rp.Value)
} else if rp.Operation == networking.EnvoyFilter_Patch_REPLACE {
virtualHosts[idx] = proto.Clone(rp.Value).(*route.VirtualHost)
}
}
IncrementEnvoyFilterMetric(rp.Key(), VirtualHost, applied)
}
patchHTTPRoutes(patchContext, patches, routeConfiguration, virtualHosts[idx], portMap)
return false
}
func hasRouteMatch(rp *model.EnvoyFilterConfigPatchWrapper) bool {
cMatch := rp.Match.GetRouteConfiguration()
if cMatch == nil {
return false
}
vhMatch := cMatch.Vhost
if vhMatch == nil {
return false
}
return vhMatch.Route != nil
}
func patchHTTPRoutes(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
routeConfiguration *route.RouteConfiguration, virtualHost *route.VirtualHost, portMap model.GatewayPortMap,
) {
clonedVhostRoutes := false
routesRemoved := false
// Apply the route level removes/merges if any.
for index := range virtualHost.Routes {
patchHTTPRoute(patchContext, patches, routeConfiguration, virtualHost, index, &routesRemoved, portMap, &clonedVhostRoutes)
}
// now for the adds
for _, rp := range patches[networking.EnvoyFilter_HTTP_ROUTE] {
applied := false
if !commonConditionMatch(patchContext, rp) ||
!routeConfigurationMatch(patchContext, routeConfiguration, rp, portMap) ||
!virtualHostMatch(virtualHost, rp) {
IncrementEnvoyFilterMetric(rp.Key(), Route, applied)
continue
}
if rp.Operation == networking.EnvoyFilter_Patch_ADD {
virtualHost.Routes = append(virtualHost.Routes, proto.Clone(rp.Value).(*route.Route))
applied = true
} else if rp.Operation == networking.EnvoyFilter_Patch_INSERT_AFTER {
// Insert after without a route match is same as ADD in the end
if !hasRouteMatch(rp) {
virtualHost.Routes = append(virtualHost.Routes, proto.Clone(rp.Value).(*route.Route))
continue
}
// find the matching route first
insertPosition := -1
for i := 0; i < len(virtualHost.Routes); i++ {
if routeMatch(virtualHost.Routes[i], rp) {
insertPosition = i + 1
break
}
}
if insertPosition == -1 {
continue
}
applied = true
clonedVal := proto.Clone(rp.Value).(*route.Route)
virtualHost.Routes = append(virtualHost.Routes, clonedVal)
if insertPosition < len(virtualHost.Routes)-1 {
copy(virtualHost.Routes[insertPosition+1:], virtualHost.Routes[insertPosition:])
virtualHost.Routes[insertPosition] = clonedVal
}
} else if rp.Operation == networking.EnvoyFilter_Patch_INSERT_BEFORE || rp.Operation == networking.EnvoyFilter_Patch_INSERT_FIRST {
// insert before/first without a route match is same as insert in the beginning
if !hasRouteMatch(rp) {
virtualHost.Routes = append([]*route.Route{proto.Clone(rp.Value).(*route.Route)}, virtualHost.Routes...)
continue
}
// find the matching route first
insertPosition := -1
for i := 0; i < len(virtualHost.Routes); i++ {
if routeMatch(virtualHost.Routes[i], rp) {
insertPosition = i
break
}
}
// If matching route is not found, then don't insert and continue.
if insertPosition == -1 {
continue
}
applied = true
// In case of INSERT_FIRST, if a match is found, still insert it at the top of the routes.
if rp.Operation == networking.EnvoyFilter_Patch_INSERT_FIRST {
insertPosition = 0
}
clonedVal := proto.Clone(rp.Value).(*route.Route)
virtualHost.Routes = append(virtualHost.Routes, clonedVal)
copy(virtualHost.Routes[insertPosition+1:], virtualHost.Routes[insertPosition:])
virtualHost.Routes[insertPosition] = clonedVal
}
IncrementEnvoyFilterMetric(rp.Key(), Route, applied)
}
if routesRemoved {
virtualHost.Routes = slices.FilterInPlace(virtualHost.Routes, func(r *route.Route) bool {
return r != nil
})
}
}
func patchHTTPRoute(patchContext networking.EnvoyFilter_PatchContext,
patches map[networking.EnvoyFilter_ApplyTo][]*model.EnvoyFilterConfigPatchWrapper,
routeConfiguration *route.RouteConfiguration, virtualHost *route.VirtualHost, routeIndex int, routesRemoved *bool, portMap model.GatewayPortMap,
clonedVhostRoutes *bool,
) {
for _, rp := range patches[networking.EnvoyFilter_HTTP_ROUTE] {
applied := false
if commonConditionMatch(patchContext, rp) &&
routeConfigurationMatch(patchContext, routeConfiguration, rp, portMap) &&
virtualHostMatch(virtualHost, rp) &&
routeMatch(virtualHost.Routes[routeIndex], rp) {
if !*clonedVhostRoutes {
// different virtualHosts may share same routes pointer
virtualHost.Routes = slices.Clone(virtualHost.Routes)
*clonedVhostRoutes = true
}
if rp.Operation == networking.EnvoyFilter_Patch_REMOVE {
virtualHost.Routes[routeIndex] = nil
*routesRemoved = true
return
} else if rp.Operation == networking.EnvoyFilter_Patch_MERGE {
cloneVhostRouteByRouteIndex(virtualHost, routeIndex)
merge.Merge(virtualHost.Routes[routeIndex], rp.Value)
}
applied = true
}
IncrementEnvoyFilterMetric(rp.Key(), Route, applied)
}
}
func routeConfigurationMatch(patchContext networking.EnvoyFilter_PatchContext, rc *route.RouteConfiguration,
rp *model.EnvoyFilterConfigPatchWrapper, portMap model.GatewayPortMap,
) bool {
rMatch := rp.Match.GetRouteConfiguration()
if rMatch == nil {
return true
}
// we match on the port number and virtual host for sidecars
// we match on port number, server port name, gateway name, plus virtual host for gateways
if patchContext != networking.EnvoyFilter_GATEWAY {
listenerPort := 0
if strings.HasPrefix(rc.Name, string(model.TrafficDirectionInbound)) {
_, _, _, listenerPort = model.ParseSubsetKey(rc.Name)
} else {
listenerPort, _ = strconv.Atoi(rc.Name)
}
// FIXME: Ports on a route can be 0. the API only takes uint32 for ports
// We should either make that field in API as a wrapper type or switch to int
if rMatch.PortNumber != 0 && int(rMatch.PortNumber) != listenerPort {
return false
}
if rMatch.Name != "" && rMatch.Name != rc.Name {
return false
}
return true
}
// This is a gateway. Get all the fields in the gateway's RDS route name
routePortNumber, portName, gateway := model.ParseGatewayRDSRouteName(rc.Name)
if rMatch.PortNumber != 0 && !anyPortMatches(portMap, routePortNumber, int(rMatch.PortNumber)) {
return false
}
if rMatch.PortName != "" && rMatch.PortName != portName {
return false
}
if rMatch.Gateway != "" && rMatch.Gateway != gateway {
return false
}
if rMatch.Name != "" && rMatch.Name != rc.Name {
return false
}
return true
}
func anyPortMatches(m model.GatewayPortMap, number int, matchNumber int) bool {
if servicePorts, f := m[number]; f {
// We do have service ports mapping to this, see if we match those
for s := range servicePorts {
if s == matchNumber {
return true
}
}
return false
}
// Otherwise, check the port directly
return number == matchNumber
}
func virtualHostMatch(vh *route.VirtualHost, rp *model.EnvoyFilterConfigPatchWrapper) bool {
rMatch := rp.Match.GetRouteConfiguration()
if rMatch == nil {
return true
}
match := rMatch.Vhost
if match == nil {
// match any virtual host in the named route configuration
return true
}
if vh == nil {
// route configuration has a specific match for a virtual host but
// we do not have a virtual host to match.
return false
}
// check if virtual host names match
return match.Name == "" || match.Name == vh.Name
}
func routeMatch(httpRoute *route.Route, rp *model.EnvoyFilterConfigPatchWrapper) bool {
rMatch := rp.Match.GetRouteConfiguration()
if rMatch == nil {
return true
}
vMatch := rMatch.Vhost
if vMatch == nil {
// match any virtual host in the named httpRoute configuration
return true
}
match := vMatch.Route
if match == nil {
// match any httpRoute in the virtual host
return true
}
if httpRoute == nil {
// we have a specific match for particular httpRoute but
// we do not have a httpRoute to match.
return false
}
// check if httpRoute names match
if match.Name != "" && match.Name != httpRoute.Name {
return false
}
if match.Action != networking.EnvoyFilter_RouteConfigurationMatch_RouteMatch_ANY {
switch httpRoute.Action.(type) {
case *route.Route_Route:
return match.Action == networking.EnvoyFilter_RouteConfigurationMatch_RouteMatch_ROUTE
case *route.Route_Redirect:
return match.Action == networking.EnvoyFilter_RouteConfigurationMatch_RouteMatch_REDIRECT
case *route.Route_DirectResponse:
return match.Action == networking.EnvoyFilter_RouteConfigurationMatch_RouteMatch_DIRECT_RESPONSE
}
}
return true
}
func cloneVhostRouteByRouteIndex(virtualHost *route.VirtualHost, routeIndex int) {
virtualHost.Routes[routeIndex] = proto.Clone(virtualHost.Routes[routeIndex]).(*route.Route)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package extension
import (
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
wasmextensions "github.com/envoyproxy/go-control-plane/envoy/extensions/wasm/v3"
"google.golang.org/protobuf/types/known/durationpb"
extensions "istio.io/api/extensions/v1alpha1"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config/xds"
"istio.io/istio/pkg/util/sets"
_ "istio.io/istio/pkg/wasm" // include for registering wasm logging scope
)
var defaultConfigSource = &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_Ads{
Ads: &core.AggregatedConfigSource{},
},
ResourceApiVersion: core.ApiVersion_V3,
// we block proxy init until WasmPlugins are loaded because they might be
// critical for security (e.g. authn/authz)
InitialFetchTimeout: &durationpb.Duration{Seconds: 0},
}
// PopAppendHTTP takes a list of filters and a set of WASM plugins, keyed by phase. It will remove all
// plugins of a provided phase from the WASM plugin set and append them to the list of filters
func PopAppendHTTP(list []*hcm.HttpFilter,
filterMap map[extensions.PluginPhase][]*model.WasmPluginWrapper,
phase extensions.PluginPhase,
) []*hcm.HttpFilter {
for _, ext := range filterMap[phase] {
list = append(list, toEnvoyHTTPFilter(ext))
}
delete(filterMap, phase)
return list
}
// PopAppendNetwork takes a list of filters and a set of WASM plugins, keyed by phase. It will remove all
// plugins of a provided phase from the WASM plugin set and append them to the list of filters
func PopAppendNetwork(list []*listener.Filter,
filterMap map[extensions.PluginPhase][]*model.WasmPluginWrapper,
phase extensions.PluginPhase,
) []*listener.Filter {
for _, ext := range filterMap[phase] {
list = append(list, toEnvoyNetworkFilter(ext))
}
delete(filterMap, phase)
return list
}
func toEnvoyHTTPFilter(wasmPlugin *model.WasmPluginWrapper) *hcm.HttpFilter {
return &hcm.HttpFilter{
Name: wasmPlugin.ResourceName,
ConfigType: &hcm.HttpFilter_ConfigDiscovery{
ConfigDiscovery: &core.ExtensionConfigSource{
ConfigSource: defaultConfigSource,
TypeUrls: []string{
xds.WasmHTTPFilterType,
xds.RBACHTTPFilterType,
},
},
},
}
}
func toEnvoyNetworkFilter(wasmPlugin *model.WasmPluginWrapper) *listener.Filter {
return &listener.Filter{
Name: wasmPlugin.ResourceName,
ConfigType: &listener.Filter_ConfigDiscovery{
ConfigDiscovery: &core.ExtensionConfigSource{
ConfigSource: defaultConfigSource,
TypeUrls: []string{
xds.WasmNetworkFilterType,
xds.RBACNetworkFilterType,
},
},
},
}
}
// InsertedExtensionConfigurations builds added via WasmPlugin.
func InsertedExtensionConfigurations(
wasmPlugins map[extensions.PluginPhase][]*model.WasmPluginWrapper,
names []string, pullSecrets map[string][]byte,
) []*core.TypedExtensionConfig {
result := make([]*core.TypedExtensionConfig, 0)
if len(wasmPlugins) == 0 {
return result
}
hasName := sets.New(names...)
for _, list := range wasmPlugins {
for _, p := range list {
if !hasName.Contains(p.ResourceName) {
continue
}
switch {
case p.Type == extensions.PluginType_NETWORK:
wasmExtensionConfig := p.BuildNetworkWasmFilter()
if wasmExtensionConfig == nil {
continue
}
updatePluginConfig(wasmExtensionConfig.GetConfig(), pullSecrets)
typedConfig := protoconv.MessageToAny(wasmExtensionConfig)
ec := &core.TypedExtensionConfig{
Name: p.ResourceName,
TypedConfig: typedConfig,
}
result = append(result, ec)
default:
wasmExtensionConfig := p.BuildHTTPWasmFilter()
if wasmExtensionConfig == nil {
continue
}
updatePluginConfig(wasmExtensionConfig.GetConfig(), pullSecrets)
typedConfig := protoconv.MessageToAny(wasmExtensionConfig)
ec := &core.TypedExtensionConfig{
Name: p.ResourceName,
TypedConfig: typedConfig,
}
result = append(result, ec)
}
}
}
return result
}
func updatePluginConfig(pluginConfig *wasmextensions.PluginConfig, pullSecrets map[string][]byte) {
// Find the pull secret resource name from wasm vm env variables.
// The Wasm extension config should already have a `ISTIO_META_WASM_IMAGE_PULL_SECRET` env variable
// at in the VM env variables, with value being the secret resource name. We try to find the actual
// secret, and replace the env variable value with it. When ECDS config update reaches the proxy,
// agent will extract out the secret from env variable, use it for image pulling, and strip the
// env variable from VM config before forwarding it to envoy.
envs := pluginConfig.GetVmConfig().GetEnvironmentVariables().GetKeyValues()
secretName := envs[model.WasmSecretEnv]
if secretName != "" {
if sec, found := pullSecrets[secretName]; found {
envs[model.WasmSecretEnv] = string(sec)
} else {
envs[model.WasmSecretEnv] = ""
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/envoyfilter"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/extension"
)
// BuildExtensionConfiguration returns the list of extension configuration for the given proxy and list of names.
// This is the ECDS output.
func (configgen *ConfigGeneratorImpl) BuildExtensionConfiguration(
proxy *model.Proxy, push *model.PushContext, extensionConfigNames []string, pullSecrets map[string][]byte,
) []*core.TypedExtensionConfig {
envoyFilterPatches := push.EnvoyFilters(proxy)
extensions := envoyfilter.InsertedExtensionConfigurations(envoyFilterPatches, extensionConfigNames)
wasmPlugins := push.WasmPlugins(proxy)
extensions = append(extensions, extension.InsertedExtensionConfigurations(wasmPlugins, extensionConfigNames, pullSecrets)...)
return extensions
}
//go:build !agent
// +build !agent
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"bytes"
"text/template"
"time"
"github.com/Masterminds/sprig/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
meshconfig "istio.io/api/mesh/v1alpha1"
configaggregate "istio.io/istio/pilot/pkg/config/aggregate"
"istio.io/istio/pilot/pkg/config/kube/crd"
"istio.io/istio/pilot/pkg/config/memory"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry"
"istio.io/istio/pilot/pkg/serviceregistry/aggregate"
memregistry "istio.io/istio/pilot/pkg/serviceregistry/memory"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/serviceregistry/serviceentry"
cluster2 "istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/test"
"istio.io/istio/pkg/test/util/retry"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/wellknown"
)
type TestOptions struct {
// If provided, these configs will be used directly
Configs []config.Config
ConfigPointers []*config.Config
// If provided, the yaml string will be parsed and used as configs
ConfigString string
// If provided, the ConfigString will be treated as a go template, with this as input params
ConfigTemplateInput any
// Services to pre-populate as part of the service discovery
Services []*model.Service
Instances []*model.ServiceInstance
Gateways []model.NetworkGateway
// If provided, this mesh config will be used
MeshConfig *meshconfig.MeshConfig
NetworksWatcher mesh.NetworksWatcher
// Additional service registries to use. A ServiceEntry and memory registry will always be created.
ServiceRegistries []serviceregistry.Instance
// Base ConfigController to use. If not set, a in-memory store will be used
ConfigController model.ConfigStoreController
// Additional ConfigStoreController to use
ConfigStoreCaches []model.ConfigStoreController
// CreateConfigStore defines a function that, given a ConfigStoreController, returns another ConfigStoreController to use
CreateConfigStore func(c model.ConfigStoreController) model.ConfigStoreController
// If set, we will not run immediately, allowing adding event handlers, etc prior to start.
SkipRun bool
// Used to set the serviceentry registry's cluster id
ClusterID cluster2.ID
// XDSUpdater to use. Otherwise, our own will be used
XDSUpdater model.XDSUpdater
}
func (to TestOptions) FuzzValidate() bool {
for _, csc := range to.ConfigStoreCaches {
if csc == nil {
return false
}
}
for _, sr := range to.ServiceRegistries {
if sr == nil {
return false
}
}
return true
}
type ConfigGenTest struct {
t test.Failer
store model.ConfigStoreController
env *model.Environment
ConfigGen *ConfigGeneratorImpl
MemRegistry *memregistry.ServiceDiscovery
ServiceEntryRegistry *serviceentry.Controller
Registry model.Controller
initialConfigs []config.Config
stop chan struct{}
MemServiceRegistry serviceregistry.Simple
}
func NewConfigGenTest(t test.Failer, opts TestOptions) *ConfigGenTest {
t.Helper()
configs := getConfigs(t, opts)
cc := opts.ConfigController
if cc == nil {
cc = memory.NewSyncController(memory.MakeSkipValidation(collections.PilotGatewayAPI()))
}
controllers := []model.ConfigStoreController{cc}
if opts.CreateConfigStore != nil {
controllers = append(controllers, opts.CreateConfigStore(cc))
}
controllers = append(controllers, opts.ConfigStoreCaches...)
configController, _ := configaggregate.MakeWriteableCache(controllers, cc)
m := opts.MeshConfig
if m == nil {
m = mesh.DefaultMeshConfig()
}
env := model.NewEnvironment()
xdsUpdater := opts.XDSUpdater
if xdsUpdater == nil {
xdsUpdater = model.NewEndpointIndexUpdater(env.EndpointIndex)
}
serviceDiscovery := aggregate.NewController(aggregate.Options{})
se := serviceentry.NewController(
configController,
xdsUpdater,
serviceentry.WithClusterID(opts.ClusterID))
// TODO allow passing in registry, for k8s, mem reigstry
serviceDiscovery.AddRegistry(se)
msd := memregistry.NewServiceDiscovery(opts.Services...)
msd.XdsUpdater = xdsUpdater
for _, instance := range opts.Instances {
msd.AddInstance(instance)
}
msd.AddGateways(opts.Gateways...)
msd.ClusterID = cluster2.ID(provider.Mock)
memserviceRegistry := serviceregistry.Simple{
ClusterID: cluster2.ID(provider.Mock),
ProviderID: provider.Mock,
DiscoveryController: msd,
}
serviceDiscovery.AddRegistry(memserviceRegistry)
for _, reg := range opts.ServiceRegistries {
serviceDiscovery.AddRegistry(reg)
}
env.Watcher = mesh.NewFixedWatcher(m)
if opts.NetworksWatcher == nil {
opts.NetworksWatcher = mesh.NewFixedNetworksWatcher(nil)
}
env.ServiceDiscovery = serviceDiscovery
env.ConfigStore = configController
env.NetworksWatcher = opts.NetworksWatcher
env.Init()
fake := &ConfigGenTest{
t: t,
store: configController,
env: env,
initialConfigs: configs,
stop: test.NewStop(t),
ConfigGen: NewConfigGenerator(&model.DisabledCache{}),
MemRegistry: msd,
MemServiceRegistry: memserviceRegistry,
Registry: serviceDiscovery,
ServiceEntryRegistry: se,
}
if !opts.SkipRun {
fake.Run()
if err := env.InitNetworksManager(xdsUpdater); err != nil {
t.Fatal(err)
}
if err := env.PushContext().InitContext(env, nil, nil); err != nil {
t.Fatalf("Failed to initialize push context: %v", err)
}
}
return fake
}
func (f *ConfigGenTest) Run() {
go f.Registry.Run(f.stop)
go f.store.Run(f.stop)
// Setup configuration. This should be done after registries are added so they can process events.
for _, cfg := range f.initialConfigs {
if _, err := f.store.Create(cfg); err != nil {
f.t.Fatalf("failed to create config %v: %v", cfg.Name, err)
}
}
// TODO allow passing event handlers for controller
retry.UntilOrFail(f.t, f.store.HasSynced, retry.Delay(time.Millisecond))
retry.UntilOrFail(f.t, f.Registry.HasSynced, retry.Delay(time.Millisecond))
f.ServiceEntryRegistry.ResyncEDS()
}
// SetupProxy initializes a proxy for the current environment. This should generally be used when creating
// any proxy. For example, `p := SetupProxy(&model.Proxy{...})`.
func (f *ConfigGenTest) SetupProxy(p *model.Proxy) *model.Proxy {
// Setup defaults
if p == nil {
p = &model.Proxy{}
}
if p.Metadata == nil {
p.Metadata = &model.NodeMetadata{}
}
if p.Metadata.IstioVersion == "" {
p.Metadata.IstioVersion = "1.22.0"
}
if p.IstioVersion == nil {
p.IstioVersion = model.ParseIstioVersion(p.Metadata.IstioVersion)
}
if p.Type == "" {
p.Type = model.SidecarProxy
}
if p.ConfigNamespace == "" {
p.ConfigNamespace = "default"
}
if p.Metadata.Namespace == "" {
p.Metadata.Namespace = p.ConfigNamespace
}
if p.ID == "" {
p.ID = "app.test"
}
if p.DNSDomain == "" {
p.DNSDomain = p.ConfigNamespace + ".svc.cluster.local"
}
if len(p.IPAddresses) == 0 {
p.IPAddresses = []string{"1.1.1.1"}
}
// Initialize data structures
pc := f.PushContext()
p.SetSidecarScope(pc)
p.SetServiceTargets(f.env.ServiceDiscovery)
p.SetGatewaysForProxy(pc)
p.DiscoverIPMode()
return p
}
func (f *ConfigGenTest) Listeners(p *model.Proxy) []*listener.Listener {
return f.ConfigGen.BuildListeners(p, f.PushContext())
}
func (f *ConfigGenTest) Clusters(p *model.Proxy) []*cluster.Cluster {
raw, _ := f.ConfigGen.BuildClusters(p, &model.PushRequest{Push: f.PushContext()})
res := make([]*cluster.Cluster, 0, len(raw))
for _, r := range raw {
c := &cluster.Cluster{}
if err := r.Resource.UnmarshalTo(c); err != nil {
f.t.Fatal(err)
}
res = append(res, c)
}
return res
}
func (f *ConfigGenTest) DeltaClusters(
p *model.Proxy,
configUpdated sets.Set[model.ConfigKey],
watched *model.WatchedResource,
) ([]*cluster.Cluster, []string, bool) {
raw, removed, _, delta := f.ConfigGen.BuildDeltaClusters(p,
&model.PushRequest{
Push: f.PushContext(), ConfigsUpdated: configUpdated,
}, watched)
res := make([]*cluster.Cluster, 0, len(raw))
for _, r := range raw {
c := &cluster.Cluster{}
if err := r.Resource.UnmarshalTo(c); err != nil {
f.t.Fatal(err)
}
res = append(res, c)
}
return res, removed, delta
}
func (f *ConfigGenTest) RoutesFromListeners(p *model.Proxy, l []*listener.Listener) []*route.RouteConfiguration {
resources, _ := f.ConfigGen.BuildHTTPRoutes(p, &model.PushRequest{Push: f.PushContext()}, ExtractRoutesFromListeners(l))
out := make([]*route.RouteConfiguration, 0, len(resources))
for _, resource := range resources {
routeConfig := &route.RouteConfiguration{}
_ = resource.Resource.UnmarshalTo(routeConfig)
out = append(out, routeConfig)
}
return out
}
func (f *ConfigGenTest) Routes(p *model.Proxy) []*route.RouteConfiguration {
return f.RoutesFromListeners(p, f.Listeners(p))
}
func (f *ConfigGenTest) PushContext() *model.PushContext {
return f.env.PushContext()
}
func (f *ConfigGenTest) Env() *model.Environment {
return f.env
}
func (f *ConfigGenTest) Store() model.ConfigStoreController {
return f.store
}
func getConfigs(t test.Failer, opts TestOptions) []config.Config {
for _, p := range opts.ConfigPointers {
if p != nil {
opts.Configs = append(opts.Configs, *p)
}
}
configStr := opts.ConfigString
if opts.ConfigTemplateInput != nil {
tmpl := template.Must(template.New("").Funcs(sprig.TxtFuncMap()).Parse(opts.ConfigString))
var buf bytes.Buffer
if err := tmpl.Execute(&buf, opts.ConfigTemplateInput); err != nil {
t.Fatalf("failed to execute template: %v", err)
}
configStr = buf.String()
}
cfgs := opts.Configs
if configStr != "" {
t0 := time.Now()
configs, _, err := crd.ParseInputs(configStr)
if err != nil {
t.Fatalf("failed to read config: %v: %v", err, configStr)
}
// setup default namespace if not defined
for _, c := range configs {
if c.Namespace == "" {
c.Namespace = "default"
}
// Set creation timestamp to same time for all of them for consistency.
// If explicit setting is needed it can be set in the yaml
if c.CreationTimestamp.IsZero() {
c.CreationTimestamp = t0
}
cfgs = append(cfgs, c)
}
}
return cfgs
}
// copied from xdstest to avoid import issues
func ExtractRoutesFromListeners(ll []*listener.Listener) []string {
routes := []string{}
for _, l := range ll {
for _, fc := range l.FilterChains {
for _, filter := range fc.Filters {
if filter.Name == wellknown.HTTPConnectionManager {
h := &hcm.HttpConnectionManager{}
_ = filter.GetTypedConfig().UnmarshalTo(h)
switch r := h.GetRouteSpecifier().(type) {
case *hcm.HttpConnectionManager_Rds:
routes = append(routes, r.Rds.RouteConfigName)
}
}
}
}
}
return routes
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/security/authn"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
)
// FilterChainMatchOptions describes options used for filter chain matches.
type FilterChainMatchOptions struct {
// Application protocols of the filter chain match
ApplicationProtocols []string
// Transport protocol of the filter chain match. "tls" or empty
TransportProtocol string
// Filter chain protocol. HTTP for HTTP proxy and TCP for TCP proxy
Protocol networking.ListenerProtocol
// Whether this chain should terminate TLS or not
TLS bool
}
// Set of filter chain match options used for various combinations.
var (
// Same as inboundPermissiveFilterChainMatchOptions except for following case:
// FCM 3: ALPN [istio-peer-exchange, istio] Transport protocol: tls --> TCP traffic from sidecar over TLS
inboundPermissiveFilterChainMatchWithMxcOptions = []FilterChainMatchOptions{
{
// client side traffic was detected as HTTP by the outbound listener, sent over mTLS
ApplicationProtocols: mtlsHTTPALPNs,
// If client sends mTLS traffic, transport protocol will be set by the TLS inspector
TransportProtocol: xdsfilters.TLSTransportProtocol,
Protocol: networking.ListenerProtocolHTTP,
TLS: true,
},
{
// client side traffic was detected as HTTP by the outbound listener, sent out as plain text
ApplicationProtocols: plaintextHTTPALPNs,
// No transport protocol match as this filter chain (+match) will be used for plain text connections
Protocol: networking.ListenerProtocolHTTP,
TransportProtocol: xdsfilters.RawBufferTransportProtocol,
},
{
// client side traffic could not be identified by the outbound listener, but sent over mTLS
ApplicationProtocols: mtlsTCPWithMxcALPNs,
// If client sends mTLS traffic, transport protocol will be set by the TLS inspector
TransportProtocol: xdsfilters.TLSTransportProtocol,
Protocol: networking.ListenerProtocolTCP,
TLS: true,
},
{
// client side traffic could not be identified by the outbound listener, sent over plaintext
// or it could be that the client has no sidecar. In this case, this filter chain is simply
// receiving plaintext TCP traffic.
Protocol: networking.ListenerProtocolTCP,
TransportProtocol: xdsfilters.RawBufferTransportProtocol,
},
{
// client side traffic could not be identified by the outbound listener, sent over one-way
// TLS (HTTPS for example) by the downstream application.
// or it could be that the client has no sidecar, and it is directly making a HTTPS connection to
// this sidecar. In this case, this filter chain is receiving plaintext one-way TLS traffic. The TLS
// inspector would detect this as TLS traffic [not necessarily mTLS]. But since there is no ALPN to match,
// this filter chain match will treat the traffic as just another TCP proxy.
TransportProtocol: xdsfilters.TLSTransportProtocol,
Protocol: networking.ListenerProtocolTCP,
},
}
inboundPermissiveHTTPFilterChainMatchWithMxcOptions = []FilterChainMatchOptions{
{
// HTTP over MTLS
ApplicationProtocols: allIstioMtlsALPNs,
TransportProtocol: xdsfilters.TLSTransportProtocol,
Protocol: networking.ListenerProtocolHTTP,
TLS: true,
},
{
// Plaintext HTTP
Protocol: networking.ListenerProtocolHTTP,
TransportProtocol: xdsfilters.RawBufferTransportProtocol,
},
// We do not need to handle other simple TLS or others, as this is explicitly declared as HTTP type.
}
inboundPermissiveTCPFilterChainMatchWithMxcOptions = []FilterChainMatchOptions{
{
// MTLS
ApplicationProtocols: allIstioMtlsALPNs,
TransportProtocol: xdsfilters.TLSTransportProtocol,
Protocol: networking.ListenerProtocolTCP,
TLS: true,
},
{
// Plain TLS
TransportProtocol: xdsfilters.TLSTransportProtocol,
Protocol: networking.ListenerProtocolTCP,
},
{
// Plaintext
Protocol: networking.ListenerProtocolTCP,
TransportProtocol: xdsfilters.RawBufferTransportProtocol,
},
}
inboundStrictFilterChainMatchOptions = []FilterChainMatchOptions{
{
// client side traffic was detected as HTTP by the outbound listener.
// If we are in strict mode, we will get mTLS HTTP ALPNS only.
ApplicationProtocols: mtlsHTTPALPNs,
Protocol: networking.ListenerProtocolHTTP,
TransportProtocol: xdsfilters.TLSTransportProtocol,
TLS: true,
},
{
// Could not detect traffic on the client side. Server side has no mTLS.
Protocol: networking.ListenerProtocolTCP,
TransportProtocol: xdsfilters.TLSTransportProtocol,
TLS: true,
},
}
inboundStrictTCPFilterChainMatchOptions = []FilterChainMatchOptions{
{
Protocol: networking.ListenerProtocolTCP,
TransportProtocol: xdsfilters.TLSTransportProtocol,
TLS: true,
},
}
inboundStrictHTTPFilterChainMatchOptions = []FilterChainMatchOptions{
{
Protocol: networking.ListenerProtocolHTTP,
TransportProtocol: xdsfilters.TLSTransportProtocol,
TLS: true,
},
}
inboundPlainTextFilterChainMatchOptions = []FilterChainMatchOptions{
{
ApplicationProtocols: plaintextHTTPALPNs,
Protocol: networking.ListenerProtocolHTTP,
TransportProtocol: xdsfilters.RawBufferTransportProtocol,
},
{
// Could not detect traffic on the client side. Server side has no mTLS.
Protocol: networking.ListenerProtocolTCP,
TransportProtocol: xdsfilters.RawBufferTransportProtocol,
},
}
inboundPlainTextTCPFilterChainMatchOptions = []FilterChainMatchOptions{
{
Protocol: networking.ListenerProtocolTCP,
TransportProtocol: xdsfilters.RawBufferTransportProtocol,
},
}
inboundPlainTextHTTPFilterChainMatchOptions = []FilterChainMatchOptions{
{
Protocol: networking.ListenerProtocolHTTP,
TransportProtocol: xdsfilters.RawBufferTransportProtocol,
},
}
)
// getTLSFilterChainMatchOptions returns the FilterChainMatchOptions that should be used based on mTLS mode and protocol
func getTLSFilterChainMatchOptions(protocol networking.ListenerProtocol) []FilterChainMatchOptions {
return []FilterChainMatchOptions{{
Protocol: protocol,
TransportProtocol: xdsfilters.TLSTransportProtocol,
TLS: true,
}}
}
// getFilterChainMatchOptions returns the FilterChainMatchOptions that should be used based on mTLS mode and protocol
func getFilterChainMatchOptions(settings authn.MTLSSettings, protocol networking.ListenerProtocol) []FilterChainMatchOptions {
switch protocol {
case networking.ListenerProtocolHTTP:
switch settings.Mode {
case model.MTLSStrict:
return inboundStrictHTTPFilterChainMatchOptions
case model.MTLSPermissive:
return inboundPermissiveHTTPFilterChainMatchWithMxcOptions
default:
return inboundPlainTextHTTPFilterChainMatchOptions
}
case networking.ListenerProtocolAuto:
switch settings.Mode {
case model.MTLSStrict:
return inboundStrictFilterChainMatchOptions
case model.MTLSPermissive:
return inboundPermissiveFilterChainMatchWithMxcOptions
default:
return inboundPlainTextFilterChainMatchOptions
}
default:
switch settings.Mode {
case model.MTLSStrict:
return inboundStrictTCPFilterChainMatchOptions
case model.MTLSPermissive:
return inboundPermissiveTCPFilterChainMatchWithMxcOptions
default:
return inboundPlainTextTCPFilterChainMatchOptions
}
}
}
func (opt FilterChainMatchOptions) ToTransportSocket(mtls authn.MTLSSettings) *tls.DownstreamTlsContext {
if !opt.TLS {
return nil
}
if opt.Protocol == networking.ListenerProtocolHTTP {
return mtls.HTTP
}
return mtls.TCP
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"encoding/binary"
"fmt"
"sort"
"strconv"
"strings"
"unsafe"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
statefulsession "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/stateful_session/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
anypb "github.com/golang/protobuf/ptypes/any"
"github.com/hashicorp/go-multierror"
extensions "istio.io/api/extensions/v1alpha1"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
istionetworking "istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/extension"
istio_route "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/tunnelingconfig"
"istio.io/istio/pilot/pkg/networking/telemetry"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/gateway"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/security"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/proto"
"istio.io/istio/pkg/util/hash"
"istio.io/istio/pkg/util/istiomultierror"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/wellknown"
)
type mutableListenerOpts struct {
mutable *MutableGatewayListener
opts *gatewayListenerOpts
transport istionetworking.TransportProtocol
}
// MutableGatewayListener represents a listener that is being built.
// Historically, this was used for all listener building. At this point, outbound and inbound have specialized code.
// This only applies to gateways now.
type MutableGatewayListener struct {
// Listener is the listener being built.
Listener *listener.Listener
}
// build adds the provided TCP and HTTP filters to the provided Listener and serializes them.
// TODO: given how tightly tied listener.FilterChains, opts.filterChainOpts, and mutable.FilterChains
// are to each other we should encapsulate them some way to ensure they remain consistent (mainly that
// in each an index refers to the same chain).
func (ml *MutableGatewayListener) build(builder *ListenerBuilder, opts gatewayListenerOpts) error {
if len(opts.filterChainOpts) == 0 {
return fmt.Errorf("must have more than 0 chains in listener %q", ml.Listener.Name)
}
httpConnectionManagers := make([]*hcm.HttpConnectionManager, len(ml.Listener.FilterChains))
for i := range ml.Listener.FilterChains {
filterChain := ml.Listener.FilterChains[i]
opt := opts.filterChainOpts[i]
ml.Listener.FilterChains[i].Metadata = opt.metadata
if opt.httpOpts == nil {
filterChain.Filters = opt.networkFilters
log.Debugf("attached %d network filters to listener %q filter chain %d", len(filterChain.Filters), ml.Listener.Name, i)
} else {
// Add the TCP filters first.. and then the HTTP connection manager.
// Skip adding this if transport is not TCP (could be QUIC)
if len(opt.networkFilters) > 0 {
filterChain.Filters = append(filterChain.Filters, opt.networkFilters...)
}
// If statPrefix has been set before calling this method, respect that.
if len(opt.httpOpts.statPrefix) == 0 {
opt.httpOpts.statPrefix = strings.ToLower(ml.Listener.TrafficDirection.String()) + "_" + ml.Listener.Name
}
opt.httpOpts.port = opts.port
httpConnectionManagers[i] = builder.buildHTTPConnectionManager(opt.httpOpts)
filter := &listener.Filter{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(httpConnectionManagers[i])},
}
filterChain.Filters = append(filterChain.Filters, filter)
log.Debugf("attached HTTP filter with %d http_filter options to listener %q filter chain %d",
len(httpConnectionManagers[i].HttpFilters), ml.Listener.Name, i)
}
}
return nil
}
func (configgen *ConfigGeneratorImpl) buildGatewayListeners(builder *ListenerBuilder) *ListenerBuilder {
if builder.node.MergedGateway == nil {
log.Debugf("buildGatewayListeners: no gateways for router %v", builder.node.ID)
return builder
}
mergedGateway := builder.node.MergedGateway
log.Debugf("buildGatewayListeners: gateways after merging: %v", mergedGateway)
actualWildcards, _ := getWildcardsAndLocalHost(builder.node.GetIPMode())
errs := istiomultierror.New()
// Mutable objects keyed by listener name so that we can build listeners at the end.
mutableopts := make(map[string]mutableListenerOpts)
proxyConfig := builder.node.Metadata.ProxyConfigOrDefault(builder.push.Mesh.DefaultConfig)
// listener port -> host/bind
tlsHostsByPort := map[uint32]map[string]string{}
for _, port := range mergedGateway.ServerPorts {
// Skip ports we cannot bind to. Note that MergeGateways will already translate Service port to
// targetPort, which handles the common case of exposing ports like 80 and 443 but listening on
// higher numbered ports.
if builder.node.IsUnprivileged() && port.Number < 1024 {
log.Warnf("buildGatewayListeners: skipping privileged gateway port %d for node %s as it is an unprivileged pod",
port.Number, builder.node.ID)
continue
}
var extraBind []string
bind := actualWildcards[0]
if features.EnableDualStack && len(actualWildcards) > 1 {
extraBind = actualWildcards[1:]
}
if len(port.Bind) > 0 {
bind = port.Bind
extraBind = nil
}
// NOTE: There is no gating here to check for the value of the QUIC feature flag. However,
// they are created in MergeGatways only when the flag is set. So when it is turned off, the
// MergedQUICTransportServers would be nil so that no listener would be created. It is written this way
// to make testing a little easier.
transportToServers := map[istionetworking.TransportProtocol]map[model.ServerPort]*model.MergedServers{
istionetworking.TransportProtocolTCP: mergedGateway.MergedServers,
istionetworking.TransportProtocolQUIC: mergedGateway.MergedQUICTransportServers,
}
for transport, gwServers := range transportToServers {
if gwServers == nil {
log.Debugf("buildGatewayListeners: no gateway-server for transport %s at port %d", transport.String(), port.Number)
continue
}
needPROXYProtocol := transport != istionetworking.TransportProtocolQUIC &&
proxyConfig.GetGatewayTopology().GetProxyProtocol() != nil
// on a given port, we can either have plain text HTTP servers or
// HTTPS/TLS servers with SNI. We cannot have a mix of http and https server on same port.
// We can also have QUIC on a given port along with HTTPS/TLS on a given port. It does not
// cause port-conflict as they use different transport protocols
opts := &gatewayListenerOpts{
push: builder.push,
proxy: builder.node,
bind: bind,
extraBind: extraBind,
port: int(port.Number),
bindToPort: true,
needPROXYProtocol: needPROXYProtocol,
}
lname := getListenerName(bind, int(port.Number), transport)
p := protocol.Parse(port.Protocol)
serversForPort := gwServers[port]
if serversForPort == nil {
continue
}
switch transport {
case istionetworking.TransportProtocolTCP:
configgen.buildGatewayTCPBasedFilterChains(builder, p, port, opts, serversForPort, proxyConfig, mergedGateway, tlsHostsByPort)
case istionetworking.TransportProtocolQUIC:
// Currently, we just assume that QUIC is HTTP/3 although that does not
// have to be the case (it is just the most common case now, in the future
// we will support more cases)
configgen.buildGatewayHTTP3FilterChains(builder, serversForPort, mergedGateway, proxyConfig, opts)
}
if mopts, exists := mutableopts[lname]; !exists {
mutable := &MutableGatewayListener{}
mutableopts[lname] = mutableListenerOpts{mutable: mutable, opts: opts, transport: transport}
} else {
mopts.opts.filterChainOpts = append(mopts.opts.filterChainOpts, opts.filterChainOpts...)
}
}
}
listeners := make([]*listener.Listener, 0)
for _, ml := range mutableopts {
ml.mutable.Listener = buildGatewayListener(*ml.opts, ml.transport)
log.Debugf("buildGatewayListeners: marshaling listener %q with %d filter chains",
ml.mutable.Listener.GetName(), len(ml.mutable.Listener.GetFilterChains()))
// Filters are serialized one time into an opaque struct once we have the complete list.
if err := ml.mutable.build(builder, *ml.opts); err != nil {
errs = multierror.Append(errs, fmt.Errorf("gateway omitting listener %q due to: %v", ml.mutable.Listener.Name, err.Error()))
continue
}
listeners = append(listeners, ml.mutable.Listener)
}
// We'll try to return any listeners we successfully marshaled; if we have none, we'll emit the error we built up
err := errs.ErrorOrNil()
if err != nil {
// we have some listeners to return, but we also have some errors; log them
log.Info(err.Error())
}
if len(mutableopts) == 0 {
log.Warnf("gateway has zero listeners for node %v", builder.node.ID)
return builder
}
builder.gatewayListeners = listeners
return builder
}
func (configgen *ConfigGeneratorImpl) buildGatewayTCPBasedFilterChains(
builder *ListenerBuilder,
p protocol.Instance, port model.ServerPort,
opts *gatewayListenerOpts,
serversForPort *model.MergedServers,
proxyConfig *meshconfig.ProxyConfig,
mergedGateway *model.MergedGateway,
tlsHostsByPort map[uint32]map[string]string,
) {
// Add network level WASM filters if any configured.
wasm := builder.push.WasmPluginsByListenerInfo(builder.node, model.WasmPluginListenerInfo{
Port: opts.port,
Class: istionetworking.ListenerClassGateway,
}, model.WasmPluginTypeNetwork)
if p.IsHTTP() {
// We have a list of HTTP servers on this port. Build a single listener for the server port.
port := &networking.Port{Number: port.Number, Protocol: port.Protocol}
httpFilterChainOpts := configgen.createGatewayHTTPFilterChainOpts(builder.node, port, nil, serversForPort.RouteName,
proxyConfig, istionetworking.ListenerProtocolTCP, builder.push)
// In HTTP, we need to have RBAC, etc. upfront so that they can enforce policies immediately
httpFilterChainOpts.networkFilters = extension.PopAppendNetwork(httpFilterChainOpts.networkFilters, wasm, extensions.PluginPhase_AUTHN)
httpFilterChainOpts.networkFilters = extension.PopAppendNetwork(httpFilterChainOpts.networkFilters, wasm, extensions.PluginPhase_AUTHZ)
httpFilterChainOpts.networkFilters = extension.PopAppendNetwork(httpFilterChainOpts.networkFilters, wasm, extensions.PluginPhase_STATS)
httpFilterChainOpts.networkFilters = extension.PopAppendNetwork(httpFilterChainOpts.networkFilters, wasm, extensions.PluginPhase_UNSPECIFIED_PHASE)
opts.filterChainOpts = []*filterChainOpts{httpFilterChainOpts}
} else {
// build http connection manager with TLS context, for HTTPS servers using simple/mutual TLS
// build listener with tcp proxy, with or without TLS context, for TCP servers
// or TLS servers using simple/mutual/passthrough TLS
// or HTTPS servers using passthrough TLS
// This process typically yields multiple filter chain matches (with SNI) [if TLS is used]
for _, server := range serversForPort.Servers {
if gateway.IsHTTPSServerWithTLSTermination(server) {
routeName := mergedGateway.TLSServerInfo[server].RouteName
// This is a HTTPS server, where we are doing TLS termination. Build a http connection manager with TLS context
httpFilterChainOpts := configgen.createGatewayHTTPFilterChainOpts(builder.node, server.Port, server,
routeName, proxyConfig, istionetworking.TransportProtocolTCP, builder.push)
// In HTTP, we need to have RBAC, etc. upfront so that they can enforce policies immediately
httpFilterChainOpts.networkFilters = extension.PopAppendNetwork(httpFilterChainOpts.networkFilters, wasm, extensions.PluginPhase_AUTHN)
httpFilterChainOpts.networkFilters = extension.PopAppendNetwork(httpFilterChainOpts.networkFilters, wasm, extensions.PluginPhase_AUTHZ)
httpFilterChainOpts.networkFilters = extension.PopAppendNetwork(httpFilterChainOpts.networkFilters, wasm, extensions.PluginPhase_STATS)
httpFilterChainOpts.networkFilters = extension.PopAppendNetwork(httpFilterChainOpts.networkFilters, wasm, extensions.PluginPhase_UNSPECIFIED_PHASE)
opts.filterChainOpts = append(opts.filterChainOpts, httpFilterChainOpts)
} else {
// we are building a network filter chain (no http connection manager) for this filter chain
// For network filters such as mysql, mongo, etc., we need the filter codec upfront. Data from this
// codec is used by RBAC later.
// This is the case of TCP or PASSTHROUGH.
tcpChainOpts := builder.createGatewayTCPFilterChainOpts(
server, port.Number, mergedGateway.GatewayNameForServer[server], tlsHostsByPort)
opts.filterChainOpts = append(opts.filterChainOpts, tcpChainOpts...)
}
}
}
}
func (configgen *ConfigGeneratorImpl) buildGatewayHTTP3FilterChains(
builder *ListenerBuilder,
serversForPort *model.MergedServers,
mergedGateway *model.MergedGateway,
proxyConfig *meshconfig.ProxyConfig,
opts *gatewayListenerOpts,
) {
quicFilterChainOpts := make([]*filterChainOpts, 0)
for _, server := range serversForPort.Servers {
log.Debugf("buildGatewayListeners: creating QUIC filter chain for port %d(%s:%s)",
server.GetPort().GetNumber(), server.GetPort().GetName(), server.GetPort().GetProtocol())
// Here it is assumed that this HTTP/3 server is a mirror of an existing HTTPS
// server. So the same route name would be reused instead of creating new one.
routeName := mergedGateway.TLSServerInfo[server].RouteName
quicFilterChainOpts = append(quicFilterChainOpts, configgen.createGatewayHTTPFilterChainOpts(builder.node, server.Port, server,
routeName, proxyConfig, istionetworking.TransportProtocolQUIC, builder.push))
}
opts.filterChainOpts = quicFilterChainOpts
}
func getListenerName(bind string, port int, transport istionetworking.TransportProtocol) string {
switch transport {
case istionetworking.TransportProtocolTCP:
return bind + "_" + strconv.Itoa(port)
case istionetworking.TransportProtocolQUIC:
return "udp_" + bind + "_" + strconv.Itoa(port)
}
return "unknown"
}
func buildNameToServiceMapForHTTPRoutes(node *model.Proxy, push *model.PushContext,
virtualService config.Config,
) map[host.Name]*model.Service {
vs := virtualService.Spec.(*networking.VirtualService)
nameToServiceMap := map[host.Name]*model.Service{}
addService := func(hostname host.Name) {
if _, exist := nameToServiceMap[hostname]; exist {
return
}
var service *model.Service
// First, we obtain the service which has the same namespace as virtualService
s, exist := push.ServiceIndex.HostnameAndNamespace[hostname][virtualService.Namespace]
if exist {
// We should check whether the selected service is visible to the proxy node.
if push.IsServiceVisible(s, node.ConfigNamespace) {
service = s
}
}
// If we find no service for the namespace of virtualService or the selected service is not visible to the proxy node,
// we should fallback to pick one service which is visible to the ConfigNamespace of node.
if service == nil {
service = push.ServiceForHostname(node, hostname)
}
nameToServiceMap[hostname] = service
}
for _, httpRoute := range vs.Http {
if httpRoute.GetMirror() != nil {
addService(host.Name(httpRoute.GetMirror().GetHost()))
}
for _, mirror := range httpRoute.GetMirrors() {
if mirror.GetDestination() != nil {
addService(host.Name(mirror.GetDestination().GetHost()))
}
}
for _, route := range httpRoute.GetRoute() {
if route.GetDestination() != nil {
addService(host.Name(route.GetDestination().GetHost()))
}
}
}
return nameToServiceMap
}
func (configgen *ConfigGeneratorImpl) buildGatewayHTTPRouteConfig(node *model.Proxy, push *model.PushContext,
routeName string,
) *route.RouteConfiguration {
if node.MergedGateway == nil {
log.Warnf("buildGatewayRoutes: no gateways for router %v", node.ID)
return &route.RouteConfiguration{
Name: routeName,
VirtualHosts: []*route.VirtualHost{},
ValidateClusters: proto.BoolFalse,
}
}
ph := GetProxyHeaders(node, push, istionetworking.ListenerClassGateway)
merged := node.MergedGateway
log.Debugf("buildGatewayRoutes: gateways after merging: %v", merged)
// make sure that there is some server listening on this port
if _, ok := merged.ServersByRouteName[routeName]; !ok {
log.Warnf("Gateway missing for route %s. This is normal if gateway was recently deleted.", routeName)
// This can happen when a gateway has recently been deleted. Envoy will still request route
// information due to the draining of listeners, so we should not return an error.
return &route.RouteConfiguration{
Name: routeName,
VirtualHosts: []*route.VirtualHost{},
ValidateClusters: proto.BoolFalse,
}
}
servers := merged.ServersByRouteName[routeName]
// When this is true, we add alt-svc header to the response to tell the client
// that HTTP/3 over QUIC is available on the same port for this host. This is
// very important for discovering HTTP/3 services
isH3DiscoveryNeeded := merged.HTTP3AdvertisingRoutes.Contains(routeName)
gatewayRoutes := make(map[string]map[string][]*route.Route)
gatewayVirtualServices := make(map[string][]config.Config)
vHostDedupMap := make(map[host.Name]*route.VirtualHost)
for _, server := range servers {
gatewayName := merged.GatewayNameForServer[server]
port := int(server.Port.Number)
var virtualServices []config.Config
var exists bool
if virtualServices, exists = gatewayVirtualServices[gatewayName]; !exists {
virtualServices = push.VirtualServicesForGateway(node.ConfigNamespace, gatewayName)
gatewayVirtualServices[gatewayName] = virtualServices
}
for _, virtualService := range virtualServices {
virtualServiceHosts := host.NewNames(virtualService.Spec.(*networking.VirtualService).Hosts)
serverHosts := host.NamesForNamespace(server.Hosts, virtualService.Namespace)
// We have two cases here:
// 1. virtualService hosts are 1.foo.com, 2.foo.com, 3.foo.com and server hosts are ns/*.foo.com
// 2. virtualService hosts are *.foo.com, and server hosts are ns/1.foo.com, ns/2.foo.com, ns/3.foo.com
intersectingHosts := serverHosts.Intersection(virtualServiceHosts)
if len(intersectingHosts) == 0 {
continue
}
// Make sure we can obtain services which are visible to this virtualService as much as possible.
nameToServiceMap := buildNameToServiceMapForHTTPRoutes(node, push, virtualService)
var routes []*route.Route
var exists bool
var err error
if _, exists = gatewayRoutes[gatewayName]; !exists {
gatewayRoutes[gatewayName] = make(map[string][]*route.Route)
}
vskey := virtualService.Name + "/" + virtualService.Namespace
if routes, exists = gatewayRoutes[gatewayName][vskey]; !exists {
opts := istio_route.RouteOptions{
IsTLS: server.Tls != nil,
IsHTTP3AltSvcHeaderNeeded: isH3DiscoveryNeeded,
Mesh: push.Mesh,
}
hashByDestination := istio_route.GetConsistentHashForVirtualService(push, node, virtualService)
routes, err = istio_route.BuildHTTPRoutesForVirtualService(node, virtualService, nameToServiceMap,
hashByDestination, port, sets.New(gatewayName), opts)
if err != nil {
log.Debugf("%s omitting routes for virtual service %v/%v due to error: %v", node.ID, virtualService.Namespace, virtualService.Name, err)
continue
}
gatewayRoutes[gatewayName][vskey] = routes
}
for _, hostname := range intersectingHosts {
if vHost, exists := vHostDedupMap[hostname]; exists {
vHost.Routes = append(vHost.Routes, routes...)
if server.Tls != nil && server.Tls.HttpsRedirect {
vHost.RequireTls = route.VirtualHost_ALL
}
} else {
gatewayService := nameToServiceMap[hostname]
perRouteFilters := map[string]*anypb.Any{}
if gatewayService != nil {
// Build StatefulSession Filter if gateway service has persistence session label.
if statefulConfig := util.MaybeBuildStatefulSessionFilterConfig(gatewayService); statefulConfig != nil {
perRouteStatefulSession := &statefulsession.StatefulSessionPerRoute{
Override: &statefulsession.StatefulSessionPerRoute_StatefulSession{
StatefulSession: statefulConfig,
},
}
perRouteFilters[util.StatefulSessionFilter] = protoconv.MessageToAny(perRouteStatefulSession)
}
}
newVHost := &route.VirtualHost{
Name: util.DomainName(string(hostname), port),
Domains: []string{hostname.String()},
Routes: routes,
TypedPerFilterConfig: perRouteFilters,
IncludeRequestAttemptCount: ph.IncludeRequestAttemptCount,
}
if server.Tls != nil && server.Tls.HttpsRedirect {
newVHost.RequireTls = route.VirtualHost_ALL
}
vHostDedupMap[hostname] = newVHost
}
}
}
// check all hostname in vHostDedupMap and if is not exist with HttpsRedirect set to true
// create VirtualHost to redirect
for _, hostname := range server.Hosts {
if !server.GetTls().GetHttpsRedirect() {
continue
}
if vHost, exists := vHostDedupMap[host.Name(hostname)]; exists {
vHost.RequireTls = route.VirtualHost_ALL
continue
}
newVHost := &route.VirtualHost{
Name: util.DomainName(hostname, port),
Domains: []string{hostname},
IncludeRequestAttemptCount: ph.IncludeRequestAttemptCount,
RequireTls: route.VirtualHost_ALL,
}
vHostDedupMap[host.Name(hostname)] = newVHost
}
}
var virtualHosts []*route.VirtualHost
if len(vHostDedupMap) == 0 {
port := int(servers[0].Port.Number)
log.Warnf("constructed http route config for route %s on port %d with no vhosts; Setting up a default 404 vhost", routeName, port)
virtualHosts = []*route.VirtualHost{{
Name: util.DomainName("blackhole", port),
Domains: []string{"*"},
// Empty route list will cause Envoy to 404 NR any requests
Routes: []*route.Route{},
}}
} else {
virtualHosts = make([]*route.VirtualHost, 0, len(vHostDedupMap))
vHostDedupMap = collapseDuplicateRoutes(vHostDedupMap)
for _, v := range vHostDedupMap {
v.Routes = istio_route.SortVHostRoutes(v.Routes)
virtualHosts = append(virtualHosts, v)
}
}
util.SortVirtualHosts(virtualHosts)
routeCfg := &route.RouteConfiguration{
// Retain the routeName as its used by EnvoyFilter patching logic
Name: routeName,
VirtualHosts: virtualHosts,
ValidateClusters: proto.BoolFalse,
IgnorePortInHostMatching: !node.IsProxylessGrpc(),
MaxDirectResponseBodySizeBytes: istio_route.DefaultMaxDirectResponseBodySizeBytes,
}
return routeCfg
}
// hashRouteList returns a hash of a list of pointers
func hashRouteList(r []*route.Route) uint64 {
// nolint: gosec
// Not security sensitive code
h := hash.New()
for _, v := range r {
u := uintptr(unsafe.Pointer(v))
size := unsafe.Sizeof(u)
b := make([]byte, size)
switch size {
case 4:
binary.LittleEndian.PutUint32(b, uint32(u))
default:
binary.LittleEndian.PutUint64(b, uint64(u))
}
h.Write(b)
}
return h.Sum64()
}
// collapseDuplicateRoutes prevents cardinality explosion when we have multiple hostnames defined for the same set of routes
// with virtual service: {hosts: [a, b], routes: [r1, r2]}
// before: [{vhosts: [a], routes: [r1, r2]},{vhosts: [b], routes: [r1, r2]}]
// after: [{vhosts: [a,b], routes: [r1, r2]}]
// Note: At this point in the code, r1 and r2 are just pointers. However, once we send them over the wire
// they are fully expanded and expensive, so the optimization is important.
func collapseDuplicateRoutes(input map[host.Name]*route.VirtualHost) map[host.Name]*route.VirtualHost {
if !features.EnableRouteCollapse {
return input
}
dedupe := make(map[host.Name]*route.VirtualHost, len(input))
known := make(map[uint64]host.Name, len(input))
// In order to ensure stable XDS, we need to sort things. First vhost alphabetically will be the "primary"
var hostnameKeys host.Names = make([]host.Name, 0, len(input))
for k := range input {
hostnameKeys = append(hostnameKeys, k)
}
sort.Sort(hostnameKeys)
for _, h := range hostnameKeys {
vh := input[h]
hash := hashRouteList(vh.Routes)
eh, f := known[hash]
if f && vhostMergeable(vh, dedupe[eh]) {
// Merge domains, routes are identical. We check the hash *and* routesEqual so that we don't depend on not having
// collisions.
// routesEqual is fairly cheap, but not cheap enough to do n^2 checks, so both are needed
dedupe[eh].Domains = append(dedupe[eh].Domains, vh.Domains...)
} else {
known[hash] = h
dedupe[h] = vh
}
}
return dedupe
}
// vhostMergeable checks if two virtual hosts can be merged
// We explicitly do not check domains or name, as those are the keys for the merge
func vhostMergeable(a, b *route.VirtualHost) bool {
if a.IncludeRequestAttemptCount != b.IncludeRequestAttemptCount {
return false
}
if a.RequireTls != b.RequireTls {
return false
}
if !routesEqual(a.Routes, b.Routes) {
return false
}
return true
}
func routesEqual(a, b []*route.Route) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}
// builds a HTTP connection manager for servers of type HTTP or HTTPS (mode: simple/mutual)
func (configgen *ConfigGeneratorImpl) createGatewayHTTPFilterChainOpts(node *model.Proxy, port *networking.Port, server *networking.Server,
routeName string, proxyConfig *meshconfig.ProxyConfig, transportProtocol istionetworking.TransportProtocol,
push *model.PushContext,
) *filterChainOpts {
serverProto := protocol.Parse(port.Protocol)
ph := GetProxyHeadersFromProxyConfig(proxyConfig, istionetworking.ListenerClassGateway)
if serverProto.IsHTTP() {
return &filterChainOpts{
// This works because we validate that only HTTPS servers can have same port but still different port names
// and that no two non-HTTPS servers can be on same port or share port names.
// Validation is done per gateway and also during merging
sniHosts: nil,
tlsContext: nil,
httpOpts: &httpListenerOpts{
rds: routeName,
useRemoteAddress: true,
connectionManager: buildGatewayConnectionManager(proxyConfig, node, false /* http3SupportEnabled */, push),
suppressEnvoyDebugHeaders: ph.SuppressDebugHeaders,
protocol: serverProto,
class: istionetworking.ListenerClassGateway,
},
}
}
// Build a filter chain for the HTTPS server
// We know that this is a HTTPS server because this function is called only for ports of type HTTP/HTTPS
// where HTTPS server's TLS mode is not passthrough and not nil
http3Enabled := transportProtocol == istionetworking.TransportProtocolQUIC
return &filterChainOpts{
// This works because we validate that only HTTPS servers can have same port but still different port names
// and that no two non-HTTPS servers can be on same port or share port names.
// Validation is done per gateway and also during merging
sniHosts: node.MergedGateway.TLSServerInfo[server].SNIHosts,
tlsContext: buildGatewayListenerTLSContext(push.Mesh, server, node, transportProtocol),
httpOpts: &httpListenerOpts{
rds: routeName,
useRemoteAddress: true,
connectionManager: buildGatewayConnectionManager(proxyConfig, node, http3Enabled, push),
suppressEnvoyDebugHeaders: ph.SuppressDebugHeaders,
protocol: serverProto,
statPrefix: server.Name,
http3Only: http3Enabled,
class: istionetworking.ListenerClassGateway,
},
}
}
func buildGatewayConnectionManager(proxyConfig *meshconfig.ProxyConfig, node *model.Proxy, http3SupportEnabled bool,
push *model.PushContext,
) *hcm.HttpConnectionManager {
ph := GetProxyHeadersFromProxyConfig(proxyConfig, istionetworking.ListenerClassGateway)
httpProtoOpts := &core.Http1ProtocolOptions{}
if features.HTTP10 || enableHTTP10(node.Metadata.HTTP10) {
httpProtoOpts.AcceptHttp_10 = true
}
xffNumTrustedHops := uint32(0)
// Gateways do not use ProxyHeaders for XFCC as there is an existing field in gateway topology that is used instead.
forwardClientCertDetails := util.MeshConfigToEnvoyForwardClientCertDetails(meshconfig.ForwardClientCertDetails_SANITIZE_SET)
if proxyConfig != nil && proxyConfig.GatewayTopology != nil {
xffNumTrustedHops = proxyConfig.GatewayTopology.NumTrustedProxies
if proxyConfig.GatewayTopology.ForwardClientCertDetails != meshconfig.ForwardClientCertDetails_UNDEFINED {
forwardClientCertDetails = util.MeshConfigToEnvoyForwardClientCertDetails(proxyConfig.GatewayTopology.ForwardClientCertDetails)
}
}
httpConnManager := &hcm.HttpConnectionManager{
XffNumTrustedHops: xffNumTrustedHops,
// Forward client cert if connection is mTLS
ForwardClientCertDetails: forwardClientCertDetails,
ServerName: ph.ServerName,
ServerHeaderTransformation: ph.ServerHeaderTransformation,
GenerateRequestId: ph.GenerateRequestID,
HttpProtocolOptions: httpProtoOpts,
}
// Only set_current_client_cert_details if forward_client_cert_details permitted
// See: https://www.envoyproxy.io/docs/envoy/latest/api-v3/extensions/filters/network/http_connection_manager/v3/http_connection_manager.proto
if forwardClientCertDetails == hcm.HttpConnectionManager_APPEND_FORWARD ||
forwardClientCertDetails == hcm.HttpConnectionManager_SANITIZE_SET {
httpConnManager.SetCurrentClientCertDetails = &hcm.HttpConnectionManager_SetCurrentClientCertDetails{
Subject: proto.BoolTrue,
Cert: true,
Dns: true,
Uri: true,
}
}
if http3SupportEnabled {
httpConnManager.Http3ProtocolOptions = &core.Http3ProtocolOptions{}
httpConnManager.CodecType = hcm.HttpConnectionManager_HTTP3
}
if features.EnableHCMInternalNetworks && push.Networks != nil {
for _, internalnetwork := range push.Networks.GetNetworks() {
iac := &hcm.HttpConnectionManager_InternalAddressConfig{}
for _, ne := range internalnetwork.Endpoints {
if cidr := util.ConvertAddressToCidr(ne.GetFromCidr()); cidr != nil {
iac.CidrRanges = append(iac.CidrRanges, cidr)
}
}
httpConnManager.InternalAddressConfig = iac
}
}
return httpConnManager
}
// sdsPath: is the path to the mesh-wide workload sds uds path, and it is assumed that if this path is unset, that sds is
// disabled mesh-wide
// metadata: map of miscellaneous configuration values sent from the Envoy instance back to Pilot, could include the field
//
// Below is a table of potential scenarios for the gateway configuration:
//
// TLS mode | Mesh-wide SDS | Ingress SDS | Resulting Configuration
// SIMPLE/MUTUAL | ENABLED | ENABLED | support SDS at ingress gateway to terminate SSL communication outside the mesh
// ISTIO_MUTUAL | ENABLED | DISABLED | support SDS at gateway to terminate workload mTLS, with internal workloads
//
// | for egress or with another trusted cluster for ingress)
//
// ISTIO_MUTUAL | DISABLED | DISABLED | use file-mounted secret paths to terminate workload mTLS from gateway
//
// Note that ISTIO_MUTUAL TLS mode and ingressSds should not be used simultaneously on the same ingress gateway.
func buildGatewayListenerTLSContext(
mesh *meshconfig.MeshConfig, server *networking.Server, proxy *model.Proxy, transportProtocol istionetworking.TransportProtocol,
) *tls.DownstreamTlsContext {
// Server.TLS cannot be nil or passthrough. But as a safety guard, return nil
if server.Tls == nil || gateway.IsPassThroughServer(server) {
return nil // We don't need to setup TLS context for passthrough mode
}
server.Tls.CipherSuites = security.FilterCipherSuites(server.Tls.CipherSuites)
return BuildListenerTLSContext(server.Tls, proxy, mesh, transportProtocol, gateway.IsTCPServerWithTLSTermination(server))
}
func convertTLSProtocol(in networking.ServerTLSSettings_TLSProtocol) tls.TlsParameters_TlsProtocol {
out := tls.TlsParameters_TlsProtocol(in) // There should be a one-to-one enum mapping
if out < tls.TlsParameters_TLS_AUTO || out > tls.TlsParameters_TLSv1_3 {
log.Warnf("was not able to map TLS protocol to Envoy TLS protocol")
return tls.TlsParameters_TLS_AUTO
}
return out
}
func (lb *ListenerBuilder) createGatewayTCPFilterChainOpts(
server *networking.Server, listenerPort uint32,
gatewayName string, tlsHostsByPort map[uint32]map[string]string,
) []*filterChainOpts {
// We have a TCP/TLS server. This could be TLS termination (user specifies server.TLS with simple/mutual)
// or opaque TCP (server.TLS is nil). or it could be a TLS passthrough with SNI based routing.
// This is opaque TCP server. Find matching virtual services with TCP blocks and forward
if server.Tls == nil {
if filters := lb.buildGatewayNetworkFiltersFromTCPRoutes(server, gatewayName); len(filters) > 0 {
return []*filterChainOpts{
{
sniHosts: nil,
tlsContext: nil,
networkFilters: filters,
},
}
}
log.Warnf("gateway %s:%d listener missed network filter", gatewayName, server.Port.Number)
} else if !gateway.IsPassThroughServer(server) {
// TCP with TLS termination and forwarding. Setup TLS context to terminate, find matching services with TCP blocks
// and forward to backend
// Validation ensures that non-passthrough servers will have certs
if filters := lb.buildGatewayNetworkFiltersFromTCPRoutes(server, gatewayName); len(filters) > 0 {
return []*filterChainOpts{
{
sniHosts: lb.node.MergedGateway.TLSServerInfo[server].SNIHosts,
tlsContext: buildGatewayListenerTLSContext(lb.push.Mesh, server, lb.node, istionetworking.TransportProtocolTCP),
networkFilters: filters,
},
}
}
log.Warnf("gateway %s:%d listener missed network filter", gatewayName, server.Port.Number)
} else {
// Passthrough server.
return lb.buildGatewayNetworkFiltersFromTLSRoutes(server, listenerPort, gatewayName, tlsHostsByPort)
}
return []*filterChainOpts{}
}
// buildGatewayNetworkFiltersFromTCPRoutes builds tcp proxy routes for all VirtualServices with TCP blocks.
// It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this
// server's port and hostnames, and produces network filters for each destination from the filtered services.
func (lb *ListenerBuilder) buildGatewayNetworkFiltersFromTCPRoutes(server *networking.Server, gateway string) []*listener.Filter {
port := &model.Port{
Name: server.Port.Name,
Port: int(server.Port.Number),
Protocol: protocol.Parse(server.Port.Protocol),
}
gatewayServerHosts := sets.NewWithLength[host.Name](len(server.Hosts))
for _, hostname := range server.Hosts {
gatewayServerHosts.Insert(host.Name(hostname))
}
virtualServices := lb.push.VirtualServicesForGateway(lb.node.ConfigNamespace, gateway)
if len(virtualServices) == 0 {
log.Warnf("no virtual service bound to gateway: %v", gateway)
}
for _, v := range virtualServices {
vsvc := v.Spec.(*networking.VirtualService)
// We have two cases here:
// 1. virtualService hosts are 1.foo.com, 2.foo.com, 3.foo.com and gateway's hosts are ns/*.foo.com
// 2. virtualService hosts are *.foo.com, and gateway's hosts are ns/1.foo.com, ns/2.foo.com, ns/3.foo.com
// Since this is TCP, neither matters. We are simply looking for matching virtual service for this gateway
matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, v)
if len(matchingHosts) == 0 {
// the VirtualService's hosts don't include hosts advertised by server
continue
}
// ensure we satisfy the rule's l4 match conditions, if any exist
// For the moment, there can be only one match that succeeds
// based on the match port/server port and the gateway name
for _, tcp := range vsvc.Tcp {
if l4MultiMatch(tcp.Match, server, gateway) {
includeMx := server.GetTls().GetMode() == networking.ServerTLSSettings_ISTIO_MUTUAL
return lb.buildOutboundNetworkFilters(tcp.Route, port, v.Meta, includeMx)
}
}
}
return nil
}
// buildGatewayNetworkFiltersFromTLSRoutes builds tcp proxy routes for all VirtualServices with TLS blocks.
// It first obtains all virtual services bound to the set of Gateways for this workload, filters them by this
// server's port and hostnames, and produces network filters for each destination from the filtered services
func (lb *ListenerBuilder) buildGatewayNetworkFiltersFromTLSRoutes(server *networking.Server,
listenerPort uint32, gatewayName string, tlsHostsByPort map[uint32]map[string]string,
) []*filterChainOpts {
port := &model.Port{
Name: server.Port.Name,
Port: int(server.Port.Number),
Protocol: protocol.Parse(server.Port.Protocol),
}
gatewayServerHosts := sets.NewWithLength[host.Name](len(server.Hosts))
for _, hostname := range server.Hosts {
gatewayServerHosts.Insert(host.Name(hostname))
}
filterChains := make([]*filterChainOpts, 0)
if server.Tls.Mode == networking.ServerTLSSettings_AUTO_PASSTHROUGH {
filterChains = append(filterChains, builtAutoPassthroughFilterChains(lb.push, lb.node, lb.node.MergedGateway.TLSServerInfo[server].SNIHosts)...)
} else {
virtualServices := lb.push.VirtualServicesForGateway(lb.node.ConfigNamespace, gatewayName)
for _, v := range virtualServices {
vsvc := v.Spec.(*networking.VirtualService)
// We have two cases here:
// 1. virtualService hosts are 1.foo.com, 2.foo.com, 3.foo.com and gateway's hosts are ns/*.foo.com
// 2. virtualService hosts are *.foo.com, and gateway's hosts are ns/1.foo.com, ns/2.foo.com, ns/3.foo.com
// The code below only handles 1.
// TODO: handle case 2
matchingHosts := pickMatchingGatewayHosts(gatewayServerHosts, v)
if len(matchingHosts) == 0 {
// the VirtualService's hosts don't include hosts advertised by server
continue
}
// For every matching TLS block, generate a filter chain with sni match
// TODO: Bug..if there is a single virtual service with *.foo.com, and multiple TLS block
// matches, one for 1.foo.com, another for 2.foo.com, this code will produce duplicate filter
// chain matches
for _, tls := range vsvc.Tls {
for i, match := range tls.Match {
if l4SingleMatch(convertTLSMatchToL4Match(match), server, gatewayName) {
// Envoy will reject config that has multiple filter chain matches with the same matching rules
// To avoid this, we need to make sure we don't have duplicated SNI hosts, which will become
// SNI filter chain matches
if tlsHostsByPort[listenerPort] == nil {
tlsHostsByPort[listenerPort] = make(map[string]string)
}
if duplicateSniHosts := model.CheckDuplicates(match.SniHosts, server.Bind, tlsHostsByPort[listenerPort]); len(duplicateSniHosts) != 0 {
log.Warnf(
"skipping VirtualService %s rule #%v on server port %d of gateway %s, duplicate SNI host names: %v",
v.Meta.Name, i, port.Port, gatewayName, duplicateSniHosts)
model.RecordRejectedConfig(gatewayName)
continue
}
// the sni hosts in the match will become part of a filter chain match
filterChains = append(filterChains, &filterChainOpts{
sniHosts: match.SniHosts,
tlsContext: nil, // NO TLS context because this is passthrough
networkFilters: lb.buildOutboundNetworkFilters(tls.Route, port, v.Meta, false),
})
}
}
}
}
}
return filterChains
}
// builtAutoPassthroughFilterChains builds a set of filter chains for auto_passthrough gateway servers.
// These servers allow connecting to any SNI-DNAT upstream cluster that matches the server's hostname.
// To handle this, we generate a filter chain per upstream cluster
func builtAutoPassthroughFilterChains(push *model.PushContext, proxy *model.Proxy, hosts []string) []*filterChainOpts {
// We do not want any authz here, so build a new LB without it set
lb := &ListenerBuilder{
node: proxy,
push: push,
}
filterChains := make([]*filterChainOpts, 0)
for _, service := range proxy.SidecarScope.Services() {
if service.MeshExternal {
continue
}
for _, port := range service.Ports {
if port.Protocol == protocol.UDP {
continue
}
matchFound := false
for _, h := range hosts {
if service.Hostname.SubsetOf(host.Name(h)) {
matchFound = true
break
}
}
if !matchFound {
continue
}
clusterName := model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port.Port)
statPrefix := clusterName
if len(push.Mesh.OutboundClusterStatName) != 0 {
statPrefix = telemetry.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", port, 0, &service.Attributes)
}
destinationRule := CastDestinationRule(proxy.SidecarScope.DestinationRule(
model.TrafficDirectionOutbound, proxy, service.Hostname).GetRule())
// First, we build the standard cluster. We match on the SNI matching the cluster name
// (per the spec of AUTO_PASSTHROUGH), as well as all possible Istio mTLS ALPNs. This,
// along with filtering out plaintext destinations in EDS, ensures that our requests will
// always hit an Istio mTLS filter chain on the inbound side. As a result, it should not
// be possible for anyone to access a cluster without mTLS. Note that we cannot actually
// check for mTLS here, as we are doing passthrough TLS.
filterChains = append(filterChains, &filterChainOpts{
sniHosts: []string{clusterName},
applicationProtocols: allIstioMtlsALPNs,
tlsContext: nil, // NO TLS context because this is passthrough
networkFilters: lb.buildOutboundNetworkFiltersWithSingleDestination(
statPrefix, clusterName, "", port, destinationRule, tunnelingconfig.Skip, false),
})
// Do the same, but for each subset
for _, subset := range destinationRule.GetSubsets() {
subsetClusterName := model.BuildDNSSrvSubsetKey(model.TrafficDirectionOutbound, subset.Name, service.Hostname, port.Port)
subsetStatPrefix := subsetClusterName
// If stat name is configured, build the stat prefix from configured pattern.
if len(push.Mesh.OutboundClusterStatName) != 0 {
subsetStatPrefix = telemetry.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), subset.Name, port, 0, &service.Attributes)
}
filterChains = append(filterChains, &filterChainOpts{
sniHosts: []string{subsetClusterName},
applicationProtocols: allIstioMtlsALPNs,
tlsContext: nil, // NO TLS context because this is passthrough
networkFilters: lb.buildOutboundNetworkFiltersWithSingleDestination(
subsetStatPrefix, subsetClusterName, subset.Name, port, destinationRule, tunnelingconfig.Skip, false),
})
}
}
}
return filterChains
}
// Select the virtualService's hosts that match the ones specified in the gateway server's hosts
// based on the wildcard hostname match and the namespace match
func pickMatchingGatewayHosts(gatewayServerHosts sets.Set[host.Name], virtualService config.Config) map[string]host.Name {
matchingHosts := make(map[string]host.Name)
virtualServiceHosts := virtualService.Spec.(*networking.VirtualService).Hosts
for _, vsvcHost := range virtualServiceHosts {
for gatewayHost := range gatewayServerHosts {
gwHostnameForMatching := gatewayHost
if strings.Contains(string(gwHostnameForMatching), "/") {
// match the namespace first
// gateway merging code ensures that we only have ns/host
// and no ./* or */host
parts := strings.Split(string(gwHostnameForMatching), "/")
if parts[0] != virtualService.Namespace {
continue
}
// strip the namespace
gwHostnameForMatching = host.Name(parts[1])
}
if gwHostnameForMatching.Matches(host.Name(vsvcHost)) {
// assign the actual gateway host because calling code uses it as a key
// to locate TLS redirect servers
matchingHosts[vsvcHost] = gatewayHost
}
}
}
return matchingHosts
}
func convertTLSMatchToL4Match(tlsMatch *networking.TLSMatchAttributes) *networking.L4MatchAttributes {
return &networking.L4MatchAttributes{
DestinationSubnets: tlsMatch.DestinationSubnets,
Port: tlsMatch.Port,
SourceLabels: tlsMatch.SourceLabels,
Gateways: tlsMatch.Gateways,
SourceNamespace: tlsMatch.SourceNamespace,
}
}
func l4MultiMatch(predicates []*networking.L4MatchAttributes, server *networking.Server, gateway string) bool {
// NB from proto definitions: each set of predicates is OR'd together; inside of a predicate all conditions are AND'd.
// This means we can return as soon as we get any match of an entire predicate.
for _, match := range predicates {
if l4SingleMatch(match, server, gateway) {
return true
}
}
// If we had no predicates we match; otherwise we don't match since we'd have exited at the first match.
return len(predicates) == 0
}
func l4SingleMatch(match *networking.L4MatchAttributes, server *networking.Server, gateway string) bool {
// if there's no gateway predicate, gatewayMatch is true; otherwise we match against the gateways for this workload
return isPortMatch(match.Port, server) && isGatewayMatch(gateway, match.Gateways)
}
func isPortMatch(port uint32, server *networking.Server) bool {
// if there's no port predicate, portMatch is true; otherwise we evaluate the port predicate against the server's port
portMatch := port == 0
if port != 0 {
portMatch = server.Port.Number == port
}
return portMatch
}
func isGatewayMatch(gateway string, gatewayNames []string) bool {
// if there's no gateway predicate, gatewayMatch is true; otherwise we match against the gateways for this workload
if len(gatewayNames) == 0 {
return true
}
if len(gatewayNames) > 0 {
for _, gatewayName := range gatewayNames {
if gatewayName == gateway {
return true
}
}
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"fmt"
"net"
"sort"
"strconv"
"strings"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
statefulsession "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/stateful_session/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
anypb "google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
istionetworking "istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/envoyfilter"
istio_route "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route"
"istio.io/istio/pilot/pkg/networking/telemetry"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/proto"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
const (
wildcardDomainPrefix = "*."
inboundVirtualHostPrefix = string(model.TrafficDirectionInbound) + "|http|"
)
// BuildHTTPRoutes produces a list of routes for the proxy
func (configgen *ConfigGeneratorImpl) BuildHTTPRoutes(
node *model.Proxy,
req *model.PushRequest,
routeNames []string,
) ([]*discovery.Resource, model.XdsLogDetails) {
var routeConfigurations model.Resources
efw := req.Push.EnvoyFilters(node)
hit, miss := 0, 0
switch node.Type {
case model.SidecarProxy, model.Waypoint:
vHostCache := make(map[int][]*route.VirtualHost)
// dependent envoyfilters' key, calculate in front once to prevent calc for each route.
envoyfilterKeys := efw.KeysApplyingTo(
networking.EnvoyFilter_ROUTE_CONFIGURATION,
networking.EnvoyFilter_VIRTUAL_HOST,
networking.EnvoyFilter_HTTP_ROUTE,
)
for _, routeName := range routeNames {
rc, cached := configgen.buildSidecarOutboundHTTPRouteConfig(node, req, routeName, vHostCache, efw, envoyfilterKeys)
if cached && !features.EnableUnsafeAssertions {
hit++
} else {
miss++
}
if rc == nil {
emptyRoute := &route.RouteConfiguration{
Name: routeName,
VirtualHosts: []*route.VirtualHost{},
ValidateClusters: proto.BoolFalse,
}
rc = &discovery.Resource{
Name: routeName,
Resource: protoconv.MessageToAny(emptyRoute),
}
}
routeConfigurations = append(routeConfigurations, rc)
}
case model.Router:
for _, routeName := range routeNames {
rc := configgen.buildGatewayHTTPRouteConfig(node, req.Push, routeName)
if rc != nil {
rc = envoyfilter.ApplyRouteConfigurationPatches(networking.EnvoyFilter_GATEWAY, node, efw, rc)
resource := &discovery.Resource{
Name: routeName,
Resource: protoconv.MessageToAny(rc),
}
routeConfigurations = append(routeConfigurations, resource)
}
}
}
if !features.EnableRDSCaching {
return routeConfigurations, model.DefaultXdsLogDetails
}
return routeConfigurations, model.XdsLogDetails{AdditionalInfo: fmt.Sprintf("cached:%v/%v", hit, hit+miss)}
}
// buildSidecarInboundHTTPRouteConfig builds the route config with a single wildcard virtual host on the inbound path
// TODO: trace decorators, inbound timeouts
func buildSidecarInboundHTTPRouteConfig(lb *ListenerBuilder, cc inboundChainConfig) *route.RouteConfiguration {
traceOperation := telemetry.TraceOperation(string(cc.telemetryMetadata.InstanceHostname), cc.port.Port)
defaultRoute := istio_route.BuildDefaultHTTPInboundRoute(cc.clusterName, traceOperation)
inboundVHost := &route.VirtualHost{
Name: inboundVirtualHostPrefix + strconv.Itoa(cc.port.Port), // Format: "inbound|http|%d"
Domains: []string{"*"},
Routes: []*route.Route{defaultRoute},
}
r := &route.RouteConfiguration{
Name: cc.clusterName,
VirtualHosts: []*route.VirtualHost{inboundVHost},
ValidateClusters: proto.BoolFalse,
}
efw := lb.push.EnvoyFilters(lb.node)
r = envoyfilter.ApplyRouteConfigurationPatches(networking.EnvoyFilter_SIDECAR_INBOUND, lb.node, efw, r)
return r
}
// buildSidecarOutboundHTTPRouteConfig builds an outbound HTTP Route for sidecar.
// Based on port, will determine all virtual hosts that listen on the port.
func (configgen *ConfigGeneratorImpl) buildSidecarOutboundHTTPRouteConfig(
node *model.Proxy,
req *model.PushRequest,
routeName string,
vHostCache map[int][]*route.VirtualHost,
efw *model.EnvoyFilterWrapper,
efKeys []string,
) (*discovery.Resource, bool) {
listenerPort, useSniffing, err := extractListenerPort(routeName)
if err != nil && routeName != model.RDSHttpProxy && !strings.HasPrefix(routeName, model.UnixAddressPrefix) {
// TODO: This is potentially one place where envoyFilter ADD operation can be helpful if the
// user wants to ship a custom RDS. But at this point, the match semantics are murky. We have no
// object to match upon. This needs more thought. For now, we will continue to return nil for
// unknown routes
return nil, false
}
var virtualHosts []*route.VirtualHost
var routeCache *istio_route.Cache
var resource *discovery.Resource
cacheHit := false
if useSniffing && listenerPort != 0 {
// Check if we have already computed the list of all virtual hosts for this port
// If so, then we simply have to return only the relevant virtual hosts for
// this listener's host:port
if vhosts, exists := vHostCache[listenerPort]; exists {
virtualHosts = getVirtualHostsForSniffedServicePort(vhosts, routeName)
cacheHit = true
}
}
if !cacheHit {
virtualHosts, resource, routeCache = BuildSidecarOutboundVirtualHosts(node, req.Push, routeName, listenerPort, efKeys, configgen.Cache)
if resource != nil {
return resource, true
}
if listenerPort > 0 {
// only cache for tcp ports and not for uds
vHostCache[listenerPort] = virtualHosts
}
// FIXME: This will ignore virtual services with hostnames that do not match any service in the registry
// per api spec, these hostnames + routes should appear in the virtual hosts (think bookinfo.com and
// productpage.ns1.svc.cluster.local). See the TODO in BuildSidecarOutboundVirtualHosts for the right solution
if useSniffing {
virtualHosts = getVirtualHostsForSniffedServicePort(virtualHosts, routeName)
}
}
util.SortVirtualHosts(virtualHosts)
if !useSniffing {
includeRequestAttemptCount := GetProxyHeaders(node, req.Push, istionetworking.ListenerClassSidecarOutbound).IncludeRequestAttemptCount
virtualHosts = append(virtualHosts, buildCatchAllVirtualHost(node, includeRequestAttemptCount))
}
out := &route.RouteConfiguration{
Name: routeName,
VirtualHosts: virtualHosts,
ValidateClusters: proto.BoolFalse,
MaxDirectResponseBodySizeBytes: istio_route.DefaultMaxDirectResponseBodySizeBytes,
IgnorePortInHostMatching: true,
}
// apply envoy filter patches
out = envoyfilter.ApplyRouteConfigurationPatches(networking.EnvoyFilter_SIDECAR_OUTBOUND, node, efw, out)
resource = &discovery.Resource{
Name: out.Name,
Resource: protoconv.MessageToAny(out),
}
if features.EnableRDSCaching && routeCache != nil {
configgen.Cache.Add(routeCache, req, resource)
}
return resource, false
}
func extractListenerPort(routeName string) (int, bool, error) {
hasPrefix := strings.HasPrefix(routeName, model.UnixAddressPrefix)
index := strings.IndexRune(routeName, ':')
if !hasPrefix {
routeName = routeName[index+1:]
}
listenerPort, err := strconv.Atoi(routeName)
useSniffing := !hasPrefix && index != -1
return listenerPort, useSniffing, err
}
// TODO: merge with IstioEgressListenerWrapper.selectVirtualServices
// selectVirtualServices selects the virtual services by matching given services' host names.
func selectVirtualServices(virtualServices []config.Config, servicesByName map[host.Name]*model.Service) []config.Config {
out := make([]config.Config, 0)
// As a performance optimization, find out wildcard service hosts first, so that
// if non wildcard vs hosts can't be looked up directly in the service map, only need to
// loop through wildcard service hosts instead of all.
wcSvcHosts := []host.Name{}
for svcHost := range servicesByName {
if svcHost.IsWildCarded() {
wcSvcHosts = append(wcSvcHosts, svcHost)
}
}
for i := range virtualServices {
rule := virtualServices[i].Spec.(*networking.VirtualService)
var match bool
// Selection algorithm:
// virtualservices have a list of hosts in the API spec
// if any host in the list matches one service hostname, select the virtual service
// and break out of the loop.
for _, h := range rule.Hosts {
// TODO: This is a bug. VirtualServices can have many hosts
// while the user might be importing only a single host
// We need to generate a new VirtualService with just the matched host
if servicesByName[host.Name(h)] != nil {
match = true
break
}
if host.Name(h).IsWildCarded() {
// Process wildcard vs host as it need to follow the slow path of
// looping through all services in the map.
for svcHost := range servicesByName {
if host.Name(h).Matches(svcHost) {
match = true
break
}
}
} else {
// If non wildcard vs host isn't be found in service map, only loop through
// wildcard service hosts to avoid repeated matching.
for _, svcHost := range wcSvcHosts {
if host.Name(h).Matches(svcHost) {
match = true
break
}
}
}
if match {
break
}
}
if match {
out = append(out, virtualServices[i])
}
}
return out
}
type ProxyHeaders struct {
ServerName string
ServerHeaderTransformation hcm.HttpConnectionManager_ServerHeaderTransformation
ForwardedClientCert hcm.HttpConnectionManager_ForwardClientCertDetails
IncludeRequestAttemptCount bool
GenerateRequestID *wrappers.BoolValue
SuppressDebugHeaders bool
SkipIstioMXHeaders bool
}
func GetProxyHeaders(node *model.Proxy, push *model.PushContext, class istionetworking.ListenerClass) ProxyHeaders {
pc := node.Metadata.ProxyConfigOrDefault(push.Mesh.DefaultConfig)
return GetProxyHeadersFromProxyConfig(pc, class)
}
func GetProxyHeadersFromProxyConfig(pc *meshconfig.ProxyConfig, class istionetworking.ListenerClass) ProxyHeaders {
base := ProxyHeaders{
ServerName: EnvoyServerName,
ServerHeaderTransformation: hcm.HttpConnectionManager_OVERWRITE,
ForwardedClientCert: hcm.HttpConnectionManager_APPEND_FORWARD,
IncludeRequestAttemptCount: true,
SuppressDebugHeaders: false,
GenerateRequestID: nil, // Envoy default is to enable them, so set nil
SkipIstioMXHeaders: false,
}
if class == istionetworking.ListenerClassSidecarOutbound {
// Likely due to a mistake, outbound uses "envoy" while inbound uses "istio-envoy". Bummer.
// We keep it for backwards compatibility.
base.ServerName = "" // Envoy default is "envoy" so no need to set it explicitly.
}
ph := pc.GetProxyHeaders()
if ph == nil {
return base
}
if ph.AttemptCount.GetDisabled().GetValue() {
base.IncludeRequestAttemptCount = false
}
if ph.ForwardedClientCert != meshconfig.ForwardClientCertDetails_UNDEFINED {
base.ForwardedClientCert = util.MeshConfigToEnvoyForwardClientCertDetails(ph.ForwardedClientCert)
}
if ph.Server != nil {
if ph.Server.Disabled.GetValue() {
base.ServerName = ""
base.ServerHeaderTransformation = hcm.HttpConnectionManager_PASS_THROUGH
} else if ph.Server.Value != "" {
base.ServerName = ph.Server.Value
}
}
if ph.RequestId.GetDisabled().GetValue() {
base.GenerateRequestID = proto.BoolFalse
}
if ph.EnvoyDebugHeaders.GetDisabled().GetValue() {
base.SuppressDebugHeaders = true
}
if ph.MetadataExchangeHeaders != nil && ph.MetadataExchangeHeaders.GetMode() == meshconfig.ProxyConfig_ProxyHeaders_IN_MESH {
base.SkipIstioMXHeaders = true
}
return base
}
func BuildSidecarOutboundVirtualHosts(node *model.Proxy, push *model.PushContext,
routeName string,
listenerPort int,
efKeys []string,
xdsCache model.XdsCache,
) ([]*route.VirtualHost, *discovery.Resource, *istio_route.Cache) {
var virtualServices []config.Config
var services []*model.Service
// Get the services from the egress listener. When sniffing is enabled, we send
// route name as foo.bar.com:8080 which is going to match against the wildcard
// egress listener only. A route with sniffing would not have been generated if there
// was a sidecar with explicit port (and hence protocol declaration). A route with
// sniffing is generated only in the case of the catch all egress listener.
egressListener := node.SidecarScope.GetEgressListenerForRDS(listenerPort, routeName)
// We should never be getting a nil egress listener because the code that setup this RDS
// call obviously saw an egress listener
if egressListener == nil {
return nil, nil, nil
}
services = egressListener.Services()
// To maintain correctness, we should only use the virtualservices for
// this listener and not all virtual services accessible to this proxy.
virtualServices = egressListener.VirtualServices()
// When generating RDS for ports created via the SidecarScope, we treat ports as HTTP proxy style ports
// if ports protocol is HTTP_PROXY.
if egressListener.IstioListener != nil && egressListener.IstioListener.Port != nil &&
protocol.Parse(egressListener.IstioListener.Port.Protocol) == protocol.HTTP_PROXY {
listenerPort = 0
}
includeRequestAttemptCount := GetProxyHeaders(node, push, istionetworking.ListenerClassSidecarOutbound).IncludeRequestAttemptCount
servicesByName := make(map[host.Name]*model.Service)
for _, svc := range services {
if svc.Resolution == model.Alias {
// Will be handled by the service it is an alias for
continue
}
if listenerPort == 0 {
// Take all ports when listen port is 0 (http_proxy or uds)
// Expect virtualServices to resolve to right port
servicesByName[svc.Hostname] = svc
} else if svcPort, exists := svc.Ports.GetByPort(listenerPort); exists {
servicesByName[svc.Hostname] = &model.Service{
Hostname: svc.Hostname,
DefaultAddress: svc.GetAddressForProxy(node),
MeshExternal: svc.MeshExternal,
Resolution: svc.Resolution,
Ports: []*model.Port{svcPort},
Attributes: model.ServiceAttributes{
Namespace: svc.Attributes.Namespace,
ServiceRegistry: svc.Attributes.ServiceRegistry,
Labels: svc.Attributes.Labels,
Aliases: svc.Attributes.Aliases,
K8sAttributes: svc.Attributes.K8sAttributes,
},
}
if features.EnableDualStack {
// cannot correctly build virtualHost domains for dual stack without ClusterVIPs
servicesByName[svc.Hostname].ClusterVIPs = *svc.ClusterVIPs.DeepCopy()
}
}
}
var routeCache *istio_route.Cache
if listenerPort > 0 && features.EnableRDSCaching {
// sort services, ensure that routeCache calculation result is stable
services = make([]*model.Service, 0, len(servicesByName))
for _, svc := range servicesByName {
services = append(services, svc)
}
sort.SliceStable(services, func(i, j int) bool {
return services[i].Hostname <= services[j].Hostname
})
routeCache = &istio_route.Cache{
RouteName: routeName,
ProxyVersion: node.Metadata.IstioVersion,
ClusterID: string(node.Metadata.ClusterID),
DNSDomain: node.DNSDomain,
DNSCapture: bool(node.Metadata.DNSCapture),
DNSAutoAllocate: bool(node.Metadata.DNSAutoAllocate),
AllowAny: util.IsAllowAnyOutbound(node),
ListenerPort: listenerPort,
Services: services,
VirtualServices: virtualServices,
DelegateVirtualServices: push.DelegateVirtualServices(virtualServices),
EnvoyFilterKeys: efKeys,
}
}
// This is hack to keep consistent with previous behavior.
if listenerPort != 80 {
// only select virtualServices that matches a service
virtualServices = selectVirtualServices(virtualServices, servicesByName)
}
mostSpecificWildcardIndex := egressListener.MostSpecificWildcardServiceIndex()
// Get list of virtual services bound to the mesh gateway
virtualHostWrappers := istio_route.BuildSidecarVirtualHostWrapper(routeCache, node, push,
servicesByName, virtualServices, listenerPort, mostSpecificWildcardIndex,
)
if features.EnableRDSCaching {
resource := xdsCache.Get(routeCache)
if resource != nil && !features.EnableUnsafeAssertions {
return nil, resource, routeCache
}
}
vHostPortMap := make(map[int][]*route.VirtualHost)
vhosts := sets.String{}
vhdomains := sets.String{}
knownFQDN := sets.String{}
buildVirtualHost := func(hostname string, vhwrapper istio_route.VirtualHostWrapper, svc *model.Service) *route.VirtualHost {
name := util.DomainName(hostname, vhwrapper.Port)
if vhosts.InsertContains(name) {
// This means this virtual host has caused duplicate virtual host name.
var msg string
if svc == nil {
msg = fmt.Sprintf("duplicate domain from virtual service: %s", name)
} else {
msg = fmt.Sprintf("duplicate domain from service: %s", name)
}
push.AddMetric(model.DuplicatedDomains, name, node.ID, msg)
return nil
}
var domains []string
var altHosts []string
if svc == nil {
if SidecarIgnorePort(node) {
domains = []string{util.IPv6Compliant(hostname)}
} else {
domains = []string{util.IPv6Compliant(hostname), name}
}
} else {
domains, altHosts = generateVirtualHostDomains(svc, listenerPort, vhwrapper.Port, node)
}
dl := len(domains)
domains = dedupeDomains(domains, vhdomains, altHosts, knownFQDN)
if dl != len(domains) {
var msg string
if svc == nil {
msg = fmt.Sprintf("duplicate domain from virtual service: %s", name)
} else {
msg = fmt.Sprintf("duplicate domain from service: %s", name)
}
// This means this virtual host has caused duplicate virtual host domain.
push.AddMetric(model.DuplicatedDomains, name, node.ID, msg)
}
if len(domains) > 0 {
pervirtualHostFilters := map[string]*anypb.Any{}
if statefulConfig := util.MaybeBuildStatefulSessionFilterConfig(svc); statefulConfig != nil {
perRouteStatefulSession := &statefulsession.StatefulSessionPerRoute{
Override: &statefulsession.StatefulSessionPerRoute_StatefulSession{
StatefulSession: statefulConfig,
},
}
pervirtualHostFilters[util.StatefulSessionFilter] = protoconv.MessageToAny(perRouteStatefulSession)
}
return &route.VirtualHost{
Name: name,
Domains: domains,
Routes: vhwrapper.Routes,
IncludeRequestAttemptCount: includeRequestAttemptCount,
TypedPerFilterConfig: pervirtualHostFilters,
}
}
return nil
}
for _, virtualHostWrapper := range virtualHostWrappers {
for _, svc := range virtualHostWrapper.Services {
name := util.DomainName(string(svc.Hostname), virtualHostWrapper.Port)
knownFQDN.InsertAll(name, string(svc.Hostname))
}
}
for _, virtualHostWrapper := range virtualHostWrappers {
// If none of the routes matched by source, skip this virtual host
if len(virtualHostWrapper.Routes) == 0 {
continue
}
virtualHosts := make([]*route.VirtualHost, 0, len(virtualHostWrapper.VirtualServiceHosts)+len(virtualHostWrapper.Services))
for _, hostname := range virtualHostWrapper.VirtualServiceHosts {
if vhost := buildVirtualHost(hostname, virtualHostWrapper, nil); vhost != nil {
virtualHosts = append(virtualHosts, vhost)
}
}
for _, svc := range virtualHostWrapper.Services {
if vhost := buildVirtualHost(string(svc.Hostname), virtualHostWrapper, svc); vhost != nil {
virtualHosts = append(virtualHosts, vhost)
}
}
vHostPortMap[virtualHostWrapper.Port] = append(vHostPortMap[virtualHostWrapper.Port], virtualHosts...)
}
var out []*route.VirtualHost
if listenerPort == 0 {
out = mergeAllVirtualHosts(vHostPortMap)
} else {
out = vHostPortMap[listenerPort]
}
return out, nil, routeCache
}
// dedupeDomains removes the duplicate domains from the passed in domains.
func dedupeDomains(domains []string, vhdomains sets.String, expandedHosts []string, knownFQDNs sets.String) []string {
temp := domains[:0]
for _, d := range domains {
if vhdomains.Contains(strings.ToLower(d)) {
continue
}
// Check if the domain is an "expanded" host, and its also a known FQDN
// This prevents a case where a domain like "foo.com.cluster.local" gets expanded to "foo.com", overwriting
// the real "foo.com"
// This works by providing a list of domains that were added as expanding the DNS domain as part of expandedHosts,
// and a list of known unexpanded FQDNs to compare against
if slices.Contains(expandedHosts, d) && knownFQDNs.Contains(d) { // O(n) search, but n is at most 10
continue
}
temp = append(temp, d)
vhdomains.Insert(strings.ToLower(d))
}
return temp
}
// Returns the set of virtual hosts that correspond to the listener that has HTTP protocol detection
// setup. This listener should only get the virtual hosts that correspond to this service+port and not
// all virtual hosts that are usually supplied for 0.0.0.0:PORT.
func getVirtualHostsForSniffedServicePort(vhosts []*route.VirtualHost, routeName string) []*route.VirtualHost {
nameWithoutPort, _, _ := net.SplitHostPort(routeName)
var virtualHosts []*route.VirtualHost
for _, vh := range vhosts {
for _, domain := range vh.Domains {
if domain == routeName || domain == nameWithoutPort {
virtualHosts = append(virtualHosts, vh)
break
}
}
}
if len(virtualHosts) == 0 {
return virtualHosts
}
if len(virtualHosts) == 1 {
virtualHosts[0].Domains = []string{"*"}
return virtualHosts
}
if features.EnableUnsafeAssertions {
panic(fmt.Sprintf("unexpectedly matched multiple virtual hosts for %v: %v", routeName, virtualHosts))
}
return virtualHosts
}
func SidecarIgnorePort(node *model.Proxy) bool {
return !node.IsProxylessGrpc()
}
// generateVirtualHostDomains generates the set of domain matches for a service being accessed from
// a proxy node
func generateVirtualHostDomains(service *model.Service, listenerPort int, port int, node *model.Proxy) ([]string, []string) {
if SidecarIgnorePort(node) && listenerPort != 0 {
// Indicate we do not need port, as we will set IgnorePortInHostMatching
port = portNoAppendPortSuffix
}
domains := []string{}
allAltHosts := []string{}
all := []string{string(service.Hostname)}
for _, a := range service.Attributes.Aliases {
all = append(all, a.Hostname.String())
}
for _, s := range all {
altHosts := GenerateAltVirtualHosts(s, port, node.DNSDomain)
domains = appendDomainPort(domains, s, port)
domains = append(domains, altHosts...)
allAltHosts = append(allAltHosts, altHosts...)
}
if service.Resolution == model.Passthrough &&
service.Attributes.ServiceRegistry == provider.Kubernetes {
for _, domain := range domains {
domains = append(domains, wildcardDomainPrefix+domain)
}
}
svcAddr := service.GetAddressForProxy(node)
if len(svcAddr) > 0 && svcAddr != constants.UnspecifiedIP {
domains = appendDomainPort(domains, svcAddr, port)
}
// handle dual stack's extra address when generating the virtualHost domains
// assumes that conversion is stripping out the DefaultAddress from ClusterVIPs
extraAddr := service.GetExtraAddressesForProxy(node)
for _, addr := range extraAddr {
domains = appendDomainPort(domains, addr, port)
}
return domains, allAltHosts
}
// appendDomainPort appends `domain` and `domain:port` to `domains`. The `domain:port` variant is skipped
// if port is unset.
func appendDomainPort(domains []string, domain string, port int) []string {
if port == portNoAppendPortSuffix {
return append(domains, util.IPv6Compliant(domain))
}
return append(domains, util.IPv6Compliant(domain), util.DomainName(domain, port))
}
// GenerateAltVirtualHosts given a service and a port, generates all possible HTTP Host headers.
// For example, a service of the form foo.local.campus.net on port 80, with local domain "local.campus.net"
// could be accessed as http://foo:80 within the .local network, as http://foo.local:80 (by other clients
// in the campus.net domain), as http://foo.local.campus:80, etc.
// NOTE: When a sidecar in remote.campus.net domain is talking to foo.local.campus.net,
// we should only generate foo.local, foo.local.campus, etc (and never just "foo").
//
// - Given foo.local.campus.net on proxy domain local.campus.net, this function generates
// foo:80, foo.local:80, foo.local.campus:80, with and without ports. It will not generate
// foo.local.campus.net (full hostname) since its already added elsewhere.
//
// - Given foo.local.campus.net on proxy domain remote.campus.net, this function generates
// foo.local:80, foo.local.campus:80
//
// - Given foo.local.campus.net on proxy domain "" or proxy domain example.com, this
// function returns nil
func GenerateAltVirtualHosts(hostname string, port int, proxyDomain string) []string {
// If the dns/proxy domain contains `.svc`, only services following the <ns>.svc.<suffix>
// naming convention and that share a suffix with the domain should be expanded.
if strings.Contains(proxyDomain, ".svc.") {
if strings.HasSuffix(hostname, removeSvcNamespace(proxyDomain)) {
return generateAltVirtualHostsForKubernetesService(hostname, port, proxyDomain)
}
// Hostname is not a kube service. It is not safe to expand the
// hostname as non-fully-qualified names could conflict with expansion of other kube service
// hostnames
return nil
}
var vhosts []string
uniqueHostnameParts, sharedDNSDomainParts := getUniqueAndSharedDNSDomain(hostname, proxyDomain)
// If there is no shared DNS name (e.g., foobar.com service on local.net proxy domain)
// do not generate any alternate virtual host representations
if len(sharedDNSDomainParts) == 0 {
return nil
}
uniqueHostname := strings.Join(uniqueHostnameParts, ".")
// Add the uniqueHost.
vhosts = appendDomainPort(vhosts, uniqueHostname, port)
if len(uniqueHostnameParts) == 2 {
// This is the case of uniqHostname having namespace already.
dnsHostName := uniqueHostname + "." + sharedDNSDomainParts[0]
vhosts = appendDomainPort(vhosts, dnsHostName, port)
}
return vhosts
}
// portNoAppendPortSuffix is a signal to not append port to vhost
const portNoAppendPortSuffix = 0
func generateAltVirtualHostsForKubernetesService(hostname string, port int, proxyDomain string) []string {
id := strings.Index(proxyDomain, ".svc.")
ih := strings.Index(hostname, ".svc.")
if ih > 0 { // Proxy and service hostname are in kube
ns := strings.Index(hostname, ".")
if ns+1 >= len(hostname) || ns+1 > ih {
// Invalid domain
return nil
}
if hostname[ns+1:ih] == proxyDomain[:id] {
// Same namespace
if port == portNoAppendPortSuffix {
return []string{
hostname[:ns],
hostname[:ih] + ".svc",
hostname[:ih],
}
}
return []string{
hostname[:ns],
util.DomainName(hostname[:ns], port),
hostname[:ih] + ".svc",
util.DomainName(hostname[:ih]+".svc", port),
hostname[:ih],
util.DomainName(hostname[:ih], port),
}
}
// Different namespace
if port == portNoAppendPortSuffix {
return []string{
hostname[:ih],
hostname[:ih] + ".svc",
}
}
return []string{
hostname[:ih],
util.DomainName(hostname[:ih], port),
hostname[:ih] + ".svc",
util.DomainName(hostname[:ih]+".svc", port),
}
}
// Proxy is in k8s, but service isn't. No alt hosts
return nil
}
// mergeAllVirtualHosts across all ports. On routes for ports other than port 80,
// virtual hosts without an explicit port suffix (IP:PORT) should not be added
func mergeAllVirtualHosts(vHostPortMap map[int][]*route.VirtualHost) []*route.VirtualHost {
var virtualHosts []*route.VirtualHost
for p, vhosts := range vHostPortMap {
if p == 80 {
virtualHosts = append(virtualHosts, vhosts...)
} else {
for _, vhost := range vhosts {
vhost.Domains = slices.FilterInPlace(vhost.Domains, func(domain string) bool {
return strings.Contains(domain, ":")
})
if len(vhost.Domains) > 0 {
virtualHosts = append(virtualHosts, vhost)
}
}
}
}
return virtualHosts
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
// getUniqueAndSharedDNSDomain computes the unique and shared DNS suffix from a FQDN service name and
// the proxy's local domain with namespace. This is especially useful in Kubernetes environments, where
// a two services can have same name in different namespaces (e.g., foo.ns1.svc.cluster.local,
// foo.ns2.svc.cluster.local). In this case, if the proxy is in ns2.svc.cluster.local, then while
// generating alt virtual hosts for service foo.ns1 for the sidecars in ns2 namespace, we should generate
// foo.ns1, foo.ns1.svc, foo.ns1.svc.cluster.local and should not generate a virtual host called "foo" for
// foo.ns1 service.
// So given foo.ns1.svc.cluster.local and ns2.svc.cluster.local, this function will return
// foo.ns1, and svc.cluster.local.
// When given foo.ns2.svc.cluster.local and ns2.svc.cluster.local, this function will return
// foo, ns2.svc.cluster.local.
func getUniqueAndSharedDNSDomain(fqdnHostname, proxyDomain string) (partsUnique []string, partsShared []string) {
// split them by the dot and reverse the arrays, so that we can
// start collecting the shared bits of DNS suffix.
// E.g., foo.ns1.svc.cluster.local -> local,cluster,svc,ns1,foo
// ns2.svc.cluster.local -> local,cluster,svc,ns2
partsFQDN := strings.Split(fqdnHostname, ".")
partsProxyDomain := strings.Split(proxyDomain, ".")
partsFQDNInReverse := slices.Reverse(partsFQDN)
partsProxyDomainInReverse := slices.Reverse(partsProxyDomain)
var sharedSuffixesInReverse []string // pieces shared between proxy and svc. e.g., local,cluster,svc
for i := 0; i < min(len(partsFQDNInReverse), len(partsProxyDomainInReverse)); i++ {
if partsFQDNInReverse[i] == partsProxyDomainInReverse[i] {
sharedSuffixesInReverse = append(sharedSuffixesInReverse, partsFQDNInReverse[i])
} else {
break
}
}
if len(sharedSuffixesInReverse) == 0 {
partsUnique = partsFQDN
} else {
// get the non shared pieces (ns1, foo) and reverse Array
partsUnique = slices.Reverse(partsFQDNInReverse[len(sharedSuffixesInReverse):])
partsShared = slices.Reverse(sharedSuffixesInReverse)
}
return
}
func buildCatchAllVirtualHost(node *model.Proxy, includeRequestAttemptCount bool) *route.VirtualHost {
if util.IsAllowAnyOutbound(node) {
egressCluster := util.PassthroughCluster
notimeout := durationpb.New(0)
// no need to check for nil value as the previous if check has checked
if node.SidecarScope.OutboundTrafficPolicy.EgressProxy != nil {
// user has provided an explicit destination for all the unknown traffic.
// build a cluster out of this destination
egressCluster = istio_route.GetDestinationCluster(node.SidecarScope.OutboundTrafficPolicy.EgressProxy,
nil, 0)
}
routeAction := &route.RouteAction{
ClusterSpecifier: &route.RouteAction_Cluster{Cluster: egressCluster},
// Disable timeout instead of assuming some defaults.
Timeout: notimeout,
// Use deprecated value for now as the replacement MaxStreamDuration has some regressions.
// nolint: staticcheck
MaxGrpcTimeout: notimeout,
}
return &route.VirtualHost{
Name: util.Passthrough,
Domains: []string{"*"},
Routes: []*route.Route{
{
Name: util.Passthrough,
Match: &route.RouteMatch{
PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"},
},
Action: &route.Route_Route{
Route: routeAction,
},
},
},
IncludeRequestAttemptCount: includeRequestAttemptCount,
}
}
return &route.VirtualHost{
Name: util.BlackHole,
Domains: []string{"*"},
Routes: []*route.Route{
{
Name: util.BlackHole,
Match: &route.RouteMatch{
PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"},
},
Action: &route.Route_DirectResponse{
DirectResponse: &route.DirectResponseAction{
Status: 502,
},
},
},
},
IncludeRequestAttemptCount: includeRequestAttemptCount,
}
}
// Simply removes everything before .svc, if present
func removeSvcNamespace(domain string) string {
if idx := strings.Index(domain, ".svc."); idx > 0 {
return domain[idx:]
}
return domain
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"fmt"
"sort"
"strconv"
"strings"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
envoyquicv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/quic/v3"
auth "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
"google.golang.org/protobuf/types/known/durationpb"
extensions "istio.io/api/extensions/v1alpha1"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
istionetworking "istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/extension"
"istio.io/istio/pilot/pkg/networking/util"
authnmodel "istio.io/istio/pilot/pkg/security/model"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/util/protoconv"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/monitoring"
"istio.io/istio/pkg/proto"
secconst "istio.io/istio/pkg/security"
"istio.io/istio/pkg/slices"
netutil "istio.io/istio/pkg/util/net"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/wellknown"
)
const (
NoConflict = iota
// HTTPOverTCP represents incoming HTTP existing TCP
HTTPOverTCP
// TCPOverHTTP represents incoming TCP existing HTTP
TCPOverHTTP
// TCPOverTCP represents incoming TCP existing TCP
TCPOverTCP
// TCPOverAuto represents incoming TCP existing AUTO
TCPOverAuto
// AutoOverHTTP represents incoming AUTO existing HTTP
AutoOverHTTP
// AutoOverTCP represents incoming AUTO existing TCP
AutoOverTCP
)
// A set of pre-allocated variables related to protocol sniffing logic for
// propagating the ALPN to upstreams
var (
// These are sniffed by the HTTP Inspector in the outbound listener
// We need to forward these ALPNs to upstream so that the upstream can
// properly use an HTTP or TCP listener
plaintextHTTPALPNs = func() []string {
if features.HTTP10 {
// If HTTP 1.0 is enabled, we will match it
return []string{"http/1.0", "http/1.1", "h2c"}
}
// Otherwise, matching would just lead to immediate rejection. By not matching, we can let it pass
// through as raw TCP at least.
// NOTE: mtlsHTTPALPNs can always include 1.0, for simplicity, as it will only be sent if a client
return []string{"http/1.1", "h2c"}
}()
mtlsHTTPALPNs = []string{"istio-http/1.0", "istio-http/1.1", "istio-h2"}
allIstioMtlsALPNs = []string{"istio", "istio-peer-exchange", "istio-http/1.0", "istio-http/1.1", "istio-h2"}
mtlsTCPWithMxcALPNs = []string{"istio-peer-exchange", "istio"}
)
// BuildListeners produces a list of listeners and referenced clusters for all proxies
func (configgen *ConfigGeneratorImpl) BuildListeners(node *model.Proxy,
push *model.PushContext,
) []*listener.Listener {
builder := NewListenerBuilder(node, push)
switch node.Type {
case model.SidecarProxy:
builder = configgen.buildSidecarListeners(builder)
case model.Waypoint:
builder = configgen.buildWaypointListeners(builder)
case model.Router:
builder = configgen.buildGatewayListeners(builder)
}
builder.patchListeners()
l := builder.getListeners()
if builder.node.EnableHBONE() && !builder.node.IsWaypointProxy() {
l = append(l, buildConnectOriginateListener())
}
return l
}
func BuildListenerTLSContext(serverTLSSettings *networking.ServerTLSSettings,
proxy *model.Proxy, mesh *meshconfig.MeshConfig, transportProtocol istionetworking.TransportProtocol, gatewayTCPServerWithTerminatingTLS bool,
) *auth.DownstreamTlsContext {
alpnByTransport := util.ALPNHttp
if transportProtocol == istionetworking.TransportProtocolQUIC {
alpnByTransport = util.ALPNHttp3OverQUIC
} else if transportProtocol == istionetworking.TransportProtocolTCP &&
serverTLSSettings.Mode == networking.ServerTLSSettings_ISTIO_MUTUAL &&
gatewayTCPServerWithTerminatingTLS {
if features.DisableMxALPN {
alpnByTransport = util.ALPNDownstream
} else {
alpnByTransport = util.ALPNDownstreamWithMxc
}
}
ctx := &auth.DownstreamTlsContext{
CommonTlsContext: &auth.CommonTlsContext{
AlpnProtocols: alpnByTransport,
},
}
ctx.RequireClientCertificate = proto.BoolFalse
if serverTLSSettings.Mode == networking.ServerTLSSettings_MUTUAL ||
serverTLSSettings.Mode == networking.ServerTLSSettings_ISTIO_MUTUAL {
ctx.RequireClientCertificate = proto.BoolTrue
}
if transportProtocol == istionetworking.TransportProtocolQUIC {
// TODO(https://github.com/envoyproxy/envoy/issues/23809) support this in Envoy
ctx.RequireClientCertificate = proto.BoolFalse
}
credentialSocketExist := false
if proxy.Metadata != nil && proxy.Metadata.Raw[secconst.CredentialMetaDataName] == "true" {
credentialSocketExist = true
}
validateClient := ctx.RequireClientCertificate.Value || serverTLSSettings.Mode == networking.ServerTLSSettings_OPTIONAL_MUTUAL
switch {
case serverTLSSettings.Mode == networking.ServerTLSSettings_ISTIO_MUTUAL:
authnmodel.ApplyToCommonTLSContext(ctx.CommonTlsContext, proxy, serverTLSSettings.SubjectAltNames, []string{}, validateClient)
// If credential name is specified at gateway config, create SDS config for gateway to fetch key/cert from Istiod.
case serverTLSSettings.CredentialName != "":
authnmodel.ApplyCredentialSDSToServerCommonTLSContext(ctx.CommonTlsContext, serverTLSSettings, credentialSocketExist)
default:
certProxy := &model.Proxy{}
certProxy.IstioVersion = proxy.IstioVersion
// If certificate files are specified in gateway configuration, use file based SDS.
certProxy.Metadata = &model.NodeMetadata{
TLSServerCertChain: serverTLSSettings.ServerCertificate,
TLSServerKey: serverTLSSettings.PrivateKey,
TLSServerRootCert: serverTLSSettings.CaCertificates,
}
authnmodel.ApplyToCommonTLSContext(ctx.CommonTlsContext, certProxy, serverTLSSettings.SubjectAltNames, []string{}, validateClient)
}
if isSimpleOrMutual(serverTLSSettings.Mode) {
// If Mesh TLSDefaults are set, use them.
applyDownstreamTLSDefaults(mesh.GetTlsDefaults(), ctx.CommonTlsContext)
applyServerTLSSettings(serverTLSSettings, ctx.CommonTlsContext)
}
return ctx
}
func applyDownstreamTLSDefaults(tlsDefaults *meshconfig.MeshConfig_TLSConfig, ctx *auth.CommonTlsContext) {
if tlsDefaults == nil {
return
}
if len(tlsDefaults.EcdhCurves) > 0 {
tlsParamsOrNew(ctx).EcdhCurves = tlsDefaults.EcdhCurves
}
if len(tlsDefaults.CipherSuites) > 0 {
tlsParamsOrNew(ctx).CipherSuites = tlsDefaults.CipherSuites
}
if tlsDefaults.MinProtocolVersion != meshconfig.MeshConfig_TLSConfig_TLS_AUTO {
tlsParamsOrNew(ctx).TlsMinimumProtocolVersion = auth.TlsParameters_TlsProtocol(tlsDefaults.MinProtocolVersion)
}
}
func applyServerTLSSettings(serverTLSSettings *networking.ServerTLSSettings, ctx *auth.CommonTlsContext) {
if serverTLSSettings.MinProtocolVersion != networking.ServerTLSSettings_TLS_AUTO {
tlsParamsOrNew(ctx).TlsMinimumProtocolVersion = convertTLSProtocol(serverTLSSettings.MinProtocolVersion)
}
if len(serverTLSSettings.CipherSuites) > 0 {
tlsParamsOrNew(ctx).CipherSuites = serverTLSSettings.CipherSuites
}
if serverTLSSettings.MaxProtocolVersion != networking.ServerTLSSettings_TLS_AUTO {
tlsParamsOrNew(ctx).TlsMaximumProtocolVersion = convertTLSProtocol(serverTLSSettings.MaxProtocolVersion)
}
}
func isSimpleOrMutual(mode networking.ServerTLSSettings_TLSmode) bool {
return mode == networking.ServerTLSSettings_SIMPLE || mode == networking.ServerTLSSettings_MUTUAL || mode == networking.ServerTLSSettings_OPTIONAL_MUTUAL
}
func tlsParamsOrNew(tlsContext *auth.CommonTlsContext) *auth.TlsParameters {
if tlsContext.TlsParams == nil {
tlsContext.TlsParams = &auth.TlsParameters{}
}
return tlsContext.TlsParams
}
// buildSidecarListeners produces a list of listeners for sidecar proxies
func (configgen *ConfigGeneratorImpl) buildSidecarListeners(builder *ListenerBuilder) *ListenerBuilder {
if builder.push.Mesh.ProxyListenPort > 0 {
// Any build order change need a careful code review
builder.appendSidecarInboundListeners().
appendSidecarOutboundListeners().
buildHTTPProxyListener().
buildVirtualOutboundListener()
}
return builder
}
// buildWaypointListeners produces a list of listeners for waypoint
func (configgen *ConfigGeneratorImpl) buildWaypointListeners(builder *ListenerBuilder) *ListenerBuilder {
builder.inboundListeners = builder.buildWaypointInbound()
return builder
}
// if enableFlag is "1" indicates that AcceptHttp_10 is enabled.
func enableHTTP10(enableFlag string) bool {
return enableFlag == "1"
}
type listenerBinding struct {
// binds contains a list of all addresses this listener should bind to. The first one in the list is considered the primary
binds []string
// bindToPort determines whether this binds to a real port. If so, it becomes a real linux-level listener. Otherwise,
// it is just a synthetic listener for matching with original_dst
bindToPort bool
}
// Primary returns the primary bind, or empty if there is none
func (l listenerBinding) Primary() string {
if len(l.binds) == 0 {
return ""
}
return l.binds[0]
}
// Extra returns any additional bindings. This is always empty if dual stack is disabled
func (l listenerBinding) Extra() []string {
if !features.EnableDualStack || len(l.binds) == 1 {
return nil
}
return l.binds[1:]
}
type outboundListenerEntry struct {
servicePort *model.Port
bind listenerBinding
locked bool
chains []*filterChainOpts
protocol protocol.Instance
}
func protocolName(p protocol.Instance) string {
switch istionetworking.ModelProtocolToListenerProtocol(p) {
case istionetworking.ListenerProtocolHTTP:
return "HTTP"
case istionetworking.ListenerProtocolTCP:
return "TCP"
default:
return "UNKNOWN"
}
}
type outboundListenerConflict struct {
metric monitoring.Metric
node *model.Proxy
listenerName string
currentProtocol protocol.Instance
newHostname host.Name
newProtocol protocol.Instance
}
func (c outboundListenerConflict) addMetric(metrics model.Metrics) {
metrics.AddMetric(c.metric,
c.listenerName,
c.node.ID,
fmt.Sprintf("Listener=%s Accepted=%s Rejected=%s (%s)",
c.listenerName,
protocolName(c.currentProtocol),
protocolName(c.newProtocol),
c.newHostname))
}
// buildSidecarOutboundListeners generates http and tcp listeners for
// outbound connections from the proxy based on the sidecar scope associated with the proxy.
func (lb *ListenerBuilder) buildSidecarOutboundListeners(node *model.Proxy,
push *model.PushContext,
) []*listener.Listener {
noneMode := node.GetInterceptionMode() == model.InterceptionNone
actualWildcards, actualLocalHosts := getWildcardsAndLocalHost(node.GetIPMode())
// For conflict resolution
listenerMap := make(map[listenerKey]*outboundListenerEntry)
// The sidecarConfig if provided could filter the list of
// services/virtual services that we need to process. It could also
// define one or more listeners with specific ports. Once we generate
// listeners for these user specified ports, we will auto generate
// configs for other ports if and only if the sidecarConfig has an
// egressListener on wildcard port.
//
// Validation will ensure that we have utmost one wildcard egress listener
// occurring in the end
// Add listeners based on the config in the sidecar.EgressListeners if
// no Sidecar CRD is provided for this config namespace,
// push.SidecarScope will generate a default catch all egress listener.
for _, egressListener := range node.SidecarScope.EgressListeners {
services := egressListener.Services()
virtualServices := egressListener.VirtualServices()
bind := listenerBinding{}
// determine the bindToPort setting for listeners
if noneMode {
// do not care what the listener's capture mode setting is. The proxy does not use iptables
bind.bindToPort = true
} else if egressListener.IstioListener != nil {
if egressListener.IstioListener.CaptureMode == networking.CaptureMode_NONE {
// proxy uses iptables redirect or tproxy. IF mode is not set
// for older proxies, it defaults to iptables redirect. If the
// listener's capture mode specifies NONE, then the proxy wants
// this listener alone to be on a physical port. If the
// listener's capture mode is default, then its same as
// iptables i.e. BindToPort is false.
bind.bindToPort = true
} else if strings.HasPrefix(egressListener.IstioListener.Bind, model.UnixAddressPrefix) {
// If the bind is a Unix domain socket, set bindtoPort to true as it makes no
// sense to have ORIG_DST listener for unix domain socket listeners.
bind.bindToPort = true
}
}
// If capture mode is NONE i.e., bindToPort is true, and
// Bind IP + Port is specified, we will bind to the specified IP and Port.
// This specified IP is ideally expected to be a loopback IP.
//
// If capture mode is NONE i.e., bindToPort is true, and
// only Port is specified, we will bind to the default loopback IP
// 127.0.0.1 and the specified Port.
//
// If capture mode is NONE, i.e., bindToPort is true, and
// only Bind IP is specified, we will bind to the specified IP
// for each port as defined in the service registry.
//
// If captureMode is not NONE, i.e., bindToPort is false, then
// we will bind to user specified IP (if any) or to the VIPs of services in
// this egress listener.
if egressListener.IstioListener != nil && egressListener.IstioListener.Bind != "" {
bind.binds = []string{egressListener.IstioListener.Bind}
} else if bind.bindToPort {
bind.binds = actualLocalHosts
}
if egressListener.IstioListener != nil &&
egressListener.IstioListener.Port != nil {
// We have a non catch all listener on some user specified port
// The user specified port may or may not match a service port.
// If it does not match any service port and the service has only
// one port, then we pick a default service port. If service has
// multiple ports, we expect the user to provide a virtualService
// that will route to a proper Service.
// Skip ports we cannot bind to
if !node.CanBindToPort(bind.bindToPort, egressListener.IstioListener.Port.Number) {
log.Warnf("buildSidecarOutboundListeners: skipping privileged sidecar port %d for node %s as it is an unprivileged proxy",
egressListener.IstioListener.Port.Number, node.ID)
continue
}
listenPort := &model.Port{
Port: int(egressListener.IstioListener.Port.Number),
Protocol: protocol.Parse(egressListener.IstioListener.Port.Protocol),
Name: egressListener.IstioListener.Port.Name,
}
if conflictWithReservedListener(node, push, bind.Primary(), listenPort.Port, listenPort.Protocol) {
log.Warnf("buildSidecarOutboundListeners: skipping sidecar port %d for node %s as it conflicts with static listener",
egressListener.IstioListener.Port.Number, node.ID)
continue
}
// TODO: dualstack wildcards
for _, service := range services {
listenerOpts := outboundListenerOpts{
push: push,
proxy: node,
bind: bind,
port: listenPort,
service: service,
}
// Set service specific attributes here.
lb.buildSidecarOutboundListener(listenerOpts, listenerMap, virtualServices, actualWildcards)
}
} else {
// This is a catch all egress listener with no port. This
// should be the last egress listener in the sidecar
// Scope. Construct a listener for each service and service
// port, if and only if this port was not specified in any of
// the preceding listeners from the sidecarScope. This allows
// users to specify a trimmed set of services for one or more
// listeners and then add a catch all egress listener for all
// other ports. Doing so allows people to restrict the set of
// services exposed on one or more listeners, and avoid hard
// port conflicts like tcp taking over http or http taking over
// tcp, or simply specify that of all the listeners that Istio
// generates, the user would like to have only specific sets of
// services exposed on a particular listener.
//
// To ensure that we do not add anything to listeners we have
// already generated, run through the outboundListenerEntry map and set
// the locked bit to true.
// buildSidecarOutboundListener will not add/merge
// any HTTP/TCP listener if there is already a outboundListenerEntry
// with locked bit set to true
for _, e := range listenerMap {
e.locked = true
}
for _, service := range services {
saddress := service.GetAddressForProxy(node)
for _, servicePort := range service.Ports {
// Skip ports we cannot bind to
if !node.CanBindToPort(bind.bindToPort, uint32(servicePort.Port)) {
// here, we log at DEBUG level instead of WARN to avoid noise
// when the catch all egress listener hits ports 80 and 443
log.Debugf("buildSidecarOutboundListeners: skipping privileged service port %s:%d for node %s as it is an unprivileged proxy",
service.Hostname, servicePort.Port, node.ID)
continue
}
if conflictWithReservedListener(node, push, bind.Primary(), servicePort.Port, servicePort.Protocol) {
log.Debugf("buildSidecarOutboundListeners: skipping service port %s:%d for node %s as it conflicts with static listener",
service.Hostname, servicePort.Port, node.ID)
continue
}
listenerOpts := outboundListenerOpts{
push: push,
proxy: node,
bind: bind,
port: servicePort,
service: service,
}
// Support statefulsets/headless services with TCP ports, and empty service address field.
// Instead of generating a single 0.0.0.0:Port listener, generate a listener
// for each instance. HTTP services can happily reside on 0.0.0.0:PORT and use the
// wildcard route match to get to the appropriate IP through original dst clusters.
if features.EnableHeadlessService && bind.Primary() == "" && service.Resolution == model.Passthrough &&
saddress == constants.UnspecifiedIP && (servicePort.Protocol.IsTCP() || servicePort.Protocol.IsUnsupported()) {
instances := push.ServiceEndpointsByPort(service, servicePort.Port, nil)
if service.Attributes.ServiceRegistry != provider.Kubernetes && len(instances) == 0 && service.Attributes.LabelSelectors == nil {
// A Kubernetes service with no endpoints means there are no endpoints at
// all, so don't bother sending, as traffic will never work. If we did
// send a wildcard listener, we may get into a situation where a scale
// down leads to a listener conflict. Similarly, if we have a
// labelSelector on the Service, then this may have endpoints not yet
// selected or scaled down, so we skip these as well. This leaves us with
// only a plain ServiceEntry with resolution NONE. In this case, we will
// fallback to a wildcard listener.
lb.buildSidecarOutboundListener(listenerOpts, listenerMap, virtualServices, actualWildcards)
continue
}
for _, instance := range instances {
// Make sure each endpoint address is a valid address
// as service entries could have NONE resolution with label selectors for workload
// entries (which could technically have hostnames).
if !netutil.IsValidIPAddress(instance.Address) {
continue
}
// Skip build outbound listener to the node itself,
// as when app access itself by pod ip will not flow through this listener.
// Simultaneously, it will be duplicate with inbound listener.
if instance.Address == node.IPAddresses[0] {
continue
}
listenerOpts.bind.binds = []string{instance.Address}
lb.buildSidecarOutboundListener(listenerOpts, listenerMap, virtualServices, actualWildcards)
}
} else {
// Standard logic for headless and non headless services
lb.buildSidecarOutboundListener(listenerOpts, listenerMap, virtualServices, actualWildcards)
}
}
}
}
}
// Now validate all the listeners. Collate the tcp listeners first and then the HTTP listeners
// TODO: This is going to be bad for caching as the order of listeners in tcpListeners or httpListeners is not
// guaranteed.
return finalizeOutboundListeners(lb, listenerMap)
}
func finalizeOutboundListeners(lb *ListenerBuilder, listenerMap map[listenerKey]*outboundListenerEntry) []*listener.Listener {
listeners := make([]*listener.Listener, 0, len(listenerMap))
for _, le := range listenerMap {
// TODO: this could be outside the loop, but we would get object sharing in EnvoyFilter patches.
fallthroughNetworkFilters := buildOutboundCatchAllNetworkFiltersOnly(lb.push, lb.node)
l := buildListenerFromEntry(lb, le, fallthroughNetworkFilters)
listeners = append(listeners, l)
}
return listeners
}
func buildListenerFromEntry(builder *ListenerBuilder, le *outboundListenerEntry, fallthroughNetworkFilters []*listener.Filter) *listener.Listener {
l := &listener.Listener{
// TODO: need to sanitize the opts.bind if its a UDS socket, as it could have colons, that envoy doesn't like
Name: getListenerName(le.bind.Primary(), le.servicePort.Port, istionetworking.TransportProtocolTCP),
Address: util.BuildAddress(le.bind.Primary(), uint32(le.servicePort.Port)),
AdditionalAddresses: util.BuildAdditionalAddresses(le.bind.Extra(), uint32(le.servicePort.Port)),
TrafficDirection: core.TrafficDirection_OUTBOUND,
ContinueOnListenerFiltersTimeout: true,
}
if builder.node.Metadata.OutboundListenerExactBalance {
l.ConnectionBalanceConfig = &listener.Listener_ConnectionBalanceConfig{
BalanceType: &listener.Listener_ConnectionBalanceConfig_ExactBalance_{
ExactBalance: &listener.Listener_ConnectionBalanceConfig_ExactBalance{},
},
}
}
if !le.bind.bindToPort {
l.BindToPort = proto.BoolFalse
}
// add a TLS inspector if we need to detect ServerName or ALPN
// (this is not applicable for QUIC listeners)
needTLSInspector := false
needHTTPInspector := false
for _, chain := range le.chains {
needsALPN := chain.tlsContext != nil && chain.tlsContext.CommonTlsContext != nil && len(chain.tlsContext.CommonTlsContext.AlpnProtocols) > 0
if len(chain.sniHosts) > 0 || needsALPN {
needTLSInspector = true
}
needHTTP := len(chain.applicationProtocols) > 0
if needHTTP {
needHTTPInspector = true
}
}
// We add a TLS inspector when http inspector is needed for outbound only. This
// is because if we ever set ALPN in the match without
// transport_protocol=raw_buffer, Envoy will automatically inject a tls
// inspector: https://github.com/envoyproxy/envoy/issues/13601. This leads to
// excessive logging and loss of control over the config. For inbound this is not
// needed, since we are explicitly setting transport protocol in every single
// match. We can do this for outbound as well, at which point this could be
// removed, but have not yet
if needTLSInspector || needHTTPInspector {
l.ListenerFilters = append(l.ListenerFilters, xdsfilters.TLSInspector)
}
if needHTTPInspector {
l.ListenerFilters = append(l.ListenerFilters, xdsfilters.HTTPInspector)
// Enable timeout only if they configure it and we have an HTTP inspector.
// This is really unsafe, so hopefully not used...
l.ListenerFiltersTimeout = builder.push.Mesh.ProtocolDetectionTimeout
} else {
// Otherwise, do not have a timeout at all
l.ListenerFiltersTimeout = durationpb.New(0)
}
wasm := builder.push.WasmPluginsByListenerInfo(builder.node, model.WasmPluginListenerInfo{
Port: le.servicePort.Port,
Class: istionetworking.ListenerClassSidecarOutbound,
}, model.WasmPluginTypeNetwork)
for _, opt := range le.chains {
chain := &listener.FilterChain{
Metadata: opt.metadata,
TransportSocket: buildDownstreamTLSTransportSocket(opt.tlsContext),
}
if opt.httpOpts == nil {
// we are building a network filter chain (no http connection manager) for this filter chain
chain.Filters = opt.networkFilters
} else {
opt.httpOpts.statPrefix = strings.ToLower(l.TrafficDirection.String()) + "_" + l.Name
opt.httpOpts.port = le.servicePort.Port
hcm := builder.buildHTTPConnectionManager(opt.httpOpts)
filter := &listener.Filter{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(hcm)},
}
opt.networkFilters = extension.PopAppendNetwork(opt.networkFilters, wasm, extensions.PluginPhase_AUTHN)
opt.networkFilters = extension.PopAppendNetwork(opt.networkFilters, wasm, extensions.PluginPhase_AUTHZ)
opt.networkFilters = extension.PopAppendNetwork(opt.networkFilters, wasm, extensions.PluginPhase_STATS)
opt.networkFilters = extension.PopAppendNetwork(opt.networkFilters, wasm, extensions.PluginPhase_UNSPECIFIED_PHASE)
chain.Filters = append(chain.Filters, opt.networkFilters...)
chain.Filters = append(chain.Filters, filter)
}
// Set a default filter chain. This allows us to avoid issues where
// traffic starts to match a filter chain but then doesn't match latter criteria, leading to
// dropped requests. See https://github.com/istio/istio/issues/26079 for details.
// If there are multiple filter chains and a match all chain, move it to DefaultFilterChain
// This ensures it will always be used as the fallback.
if opt.isMatchAll() {
l.DefaultFilterChain = chain
} else {
chain.FilterChainMatch = opt.toFilterChainMatch()
l.FilterChains = append(l.FilterChains, chain)
}
}
// If there is only one filter chain, no need to use DefaultFilterChain
// This is probably not necessary, but for consistency with older code we keep the same logic.
if l.DefaultFilterChain != nil && len(l.FilterChains) == 0 {
l.FilterChains = []*listener.FilterChain{l.DefaultFilterChain}
l.DefaultFilterChain = nil
} else if l.DefaultFilterChain == nil {
l.DefaultFilterChain = &listener.FilterChain{
FilterChainMatch: &listener.FilterChainMatch{},
Name: util.PassthroughFilterChain,
Filters: fallthroughNetworkFilters,
}
}
return l
}
func (lb *ListenerBuilder) buildHTTPProxy(node *model.Proxy,
push *model.PushContext,
) *listener.Listener {
httpProxyPort := push.Mesh.ProxyHttpPort // global
if node.Metadata.HTTPProxyPort != "" {
port, err := strconv.Atoi(node.Metadata.HTTPProxyPort)
if err == nil {
httpProxyPort = int32(port)
}
}
if httpProxyPort == 0 {
return nil
}
ph := GetProxyHeaders(node, push, istionetworking.ListenerClassSidecarOutbound)
// enable HTTP PROXY port if necessary; this will add an RDS route for this port
_, actualLocalHosts := getWildcardsAndLocalHost(node.GetIPMode())
httpOpts := &core.Http1ProtocolOptions{
AllowAbsoluteUrl: proto.BoolTrue,
}
if features.HTTP10 || enableHTTP10(node.Metadata.HTTP10) {
httpOpts.AcceptHttp_10 = true
}
fcs := []*filterChainOpts{{
httpOpts: &httpListenerOpts{
rds: model.RDSHttpProxy,
useRemoteAddress: false,
connectionManager: &hcm.HttpConnectionManager{
HttpProtocolOptions: httpOpts,
ServerName: ph.ServerName,
ServerHeaderTransformation: ph.ServerHeaderTransformation,
GenerateRequestId: ph.GenerateRequestID,
},
suppressEnvoyDebugHeaders: ph.SuppressDebugHeaders,
skipIstioMXHeaders: false,
protocol: protocol.HTTP_PROXY,
class: istionetworking.ListenerClassSidecarOutbound,
},
}}
return buildListenerFromEntry(lb, &outboundListenerEntry{
chains: fcs,
bind: listenerBinding{
binds: actualLocalHosts,
bindToPort: true,
},
servicePort: &model.Port{Port: int(httpProxyPort)},
}, nil)
}
func buildSidecarOutboundHTTPListenerOpts(
opts outboundListenerOpts,
actualWildcard string,
listenerProtocol istionetworking.ListenerProtocol,
) []*filterChainOpts {
var rdsName string
if opts.port.Port == 0 {
rdsName = opts.bind.Primary() // use the UDS as a rds name
} else {
if listenerProtocol == istionetworking.ListenerProtocolAuto && opts.bind.Primary() != actualWildcard && opts.service != nil {
// For sniffed services, we have a unique listener and route just for that service
rdsName = string(opts.service.Hostname) + ":" + strconv.Itoa(opts.port.Port)
} else {
// Otherwise we have a shared one per-port
rdsName = strconv.Itoa(opts.port.Port)
}
}
ph := GetProxyHeaders(opts.proxy, opts.push, istionetworking.ListenerClassSidecarOutbound)
httpOpts := &httpListenerOpts{
// Set useRemoteAddress to true for sidecar outbound listeners so that it picks up the localhost address of the sender,
// which is an internal address, so that trusted headers are not sanitized. This helps to retain the timeout headers
// such as "x-envoy-upstream-rq-timeout-ms" set by the calling application.
useRemoteAddress: features.UseRemoteAddress,
rds: rdsName,
connectionManager: &hcm.HttpConnectionManager{
ServerName: ph.ServerName,
ServerHeaderTransformation: ph.ServerHeaderTransformation,
GenerateRequestId: ph.GenerateRequestID,
},
suppressEnvoyDebugHeaders: ph.SuppressDebugHeaders,
skipIstioMXHeaders: ph.SkipIstioMXHeaders,
protocol: opts.port.Protocol,
class: istionetworking.ListenerClassSidecarOutbound,
}
if features.HTTP10 || enableHTTP10(opts.proxy.Metadata.HTTP10) {
httpOpts.connectionManager.HttpProtocolOptions = &core.Http1ProtocolOptions{
AcceptHttp_10: true,
}
}
return []*filterChainOpts{{
httpOpts: httpOpts,
}}
}
func buildSidecarOutboundTCPListenerOpts(opts outboundListenerOpts, virtualServices []config.Config) []*filterChainOpts {
meshGateway := sets.New(constants.IstioMeshGateway)
out := make([]*filterChainOpts, 0)
var svcConfigs []config.Config
if opts.service != nil {
// Do not filter namespace for now.
// TODO(https://github.com/istio/istio/issues/46146) we may need to, or something more sophisticated
svcConfigs = getConfigsForHost("", opts.service.Hostname, virtualServices)
} else {
svcConfigs = virtualServices
}
out = append(out, buildSidecarOutboundTLSFilterChainOpts(opts.proxy, opts.push, opts.cidr, opts.service,
opts.bind.Primary(), opts.port, meshGateway, svcConfigs)...)
out = append(out, buildSidecarOutboundTCPFilterChainOpts(opts.proxy, opts.push, opts.cidr, opts.service,
opts.port, meshGateway, svcConfigs)...)
return out
}
// buildSidecarOutboundListener builds a single listener and
// adds it to the listenerMap provided by the caller. Listeners are added
// if one doesn't already exist. HTTP listeners on same port are ignored
// (as vhosts are shipped through RDS). TCP listeners on same port are
// allowed only if they have different CIDR matches.
func (lb *ListenerBuilder) buildSidecarOutboundListener(listenerOpts outboundListenerOpts,
listenerMap map[listenerKey]*outboundListenerEntry, virtualServices []config.Config, actualWildcards []string,
) {
// Alias services do not get listeners generated
if listenerOpts.service.Resolution == model.Alias {
return
}
// TODO: remove actualWildcard
var currentListenerEntry *outboundListenerEntry
conflictType := NoConflict
listenerPortProtocol := listenerOpts.port.Protocol
listenerProtocol := istionetworking.ModelProtocolToListenerProtocol(listenerOpts.port.Protocol)
var listenerMapKey listenerKey
switch listenerProtocol {
case istionetworking.ListenerProtocolTCP, istionetworking.ListenerProtocolAuto:
// Determine the listener address if bind is empty
// we listen on the service VIP if and only
// if the address is an IP address. If its a CIDR, we listen on
// 0.0.0.0, and setup a filter chain match for the CIDR range.
// As a small optimization, CIDRs with /32 prefix will be converted
// into listener address so that there is a dedicated listener for this
// ip:port. This will reduce the impact of a listener reload
if listenerOpts.bind.Primary() == "" { // TODO: make this better
svcListenAddress := listenerOpts.service.GetAddressForProxy(listenerOpts.proxy)
svcExtraListenAddresses := listenerOpts.service.GetExtraAddressesForProxy(listenerOpts.proxy)
// Override the svcListenAddress, using the proxy ipFamily, for cases where the ipFamily cannot be detected easily.
// For example: due to the possibility of using hostnames instead of ips in ServiceEntry,
// it is hard to detect ipFamily for such services.
if listenerOpts.service.Attributes.ServiceRegistry == provider.External && listenerOpts.proxy.IsIPv6() &&
svcListenAddress == constants.UnspecifiedIP {
svcListenAddress = constants.UnspecifiedIPv6
}
// For dualstack proxies we need to add the unspecifed ipv6 address to the list of extra listen addresses
if listenerOpts.service.Attributes.ServiceRegistry == provider.External && listenerOpts.proxy.IsDualStack() &&
svcListenAddress == constants.UnspecifiedIP {
svcExtraListenAddresses = append(svcExtraListenAddresses, constants.UnspecifiedIPv6)
}
// We should never get an empty address.
// This is a safety guard, in case some platform adapter isn't doing things
// properly
if len(svcListenAddress) > 0 {
if !strings.Contains(svcListenAddress, "/") {
listenerOpts.bind.binds = append([]string{svcListenAddress}, svcExtraListenAddresses...)
} else {
// Address is a CIDR. Fall back to 0.0.0.0 and
// filter chain match
// TODO: this probably needs to handle dual stack better
listenerOpts.bind.binds = actualWildcards
listenerOpts.cidr = svcListenAddress
}
}
}
listenerMapKey = listenerKey{listenerOpts.bind.Primary(), listenerOpts.port.Port}
case istionetworking.ListenerProtocolHTTP:
// first identify the bind if its not set. Then construct the key
// used to lookup the listener in the conflict map.
if len(listenerOpts.bind.Primary()) == 0 { // no user specified bind. Use 0.0.0.0:Port or [::]:Port
listenerOpts.bind.binds = actualWildcards
}
listenerMapKey = listenerKey{listenerOpts.bind.Primary(), listenerOpts.port.Port}
}
// Have we already generated a listener for this Port based on user
// specified listener ports? if so, we should not add any more HTTP
// services to the port. The user could have specified a sidecar
// resource with one or more explicit ports and then added a catch
// all listener, implying add all other ports as usual. When we are
// iterating through the services for a catchAll egress listener,
// the caller would have set the locked bit for each listener Entry
// in the map.
//
// Check if this HTTP listener conflicts with an existing TCP
// listener. We could have listener conflicts occur on unix domain
// sockets, or on IP binds. Specifically, its common to see
// conflicts on binds for wildcard address when a service has NONE
// resolution type, since we collapse all HTTP listeners into a
// single 0.0.0.0:port listener and use vhosts to distinguish
// individual http services in that port
if cur, exists := listenerMap[listenerMapKey]; exists {
currentListenerEntry = cur
// NOTE: This is not a conflict. This is simply filtering the
// services for a given listener explicitly.
// When the user declares their own ports in Sidecar.egress
// with some specific services on those ports, we should not
// generate any more listeners on that port as the user does
// not want those listeners. Protocol sniffing is not needed.
if cur.locked {
return
}
}
var opts []*filterChainOpts
// For HTTP_PROXY protocol defined by sidecars, just create the HTTP listener right away.
if listenerPortProtocol == protocol.HTTP_PROXY {
opts = buildSidecarOutboundHTTPListenerOpts(listenerOpts, actualWildcards[0], listenerProtocol)
} else {
switch listenerProtocol {
case istionetworking.ListenerProtocolHTTP:
// Check if conflict happens
if currentListenerEntry != nil {
// Build HTTP listener. If current listener entry is using HTTP or protocol sniffing,
// append the service. Otherwise (TCP), change current listener to use protocol sniffing.
if currentListenerEntry.protocol.IsTCP() {
conflictType = HTTPOverTCP
} else {
// Exit early, listener already exists
return
}
}
opts = buildSidecarOutboundHTTPListenerOpts(listenerOpts, actualWildcards[0], listenerProtocol)
// Add application protocol filter chain match to the http filter chain. The application protocol will be set by http inspector
// Since application protocol filter chain match has been added to the http filter chain, a fall through filter chain will be
// appended to the listener later to allow arbitrary egress TCP traffic pass through when its port is conflicted with existing
// HTTP services, which can happen when a pod accesses a non registry service.
if listenerOpts.bind.Primary() == actualWildcards[0] {
for _, opt := range opts {
// Support HTTP/1.0, HTTP/1.1 and HTTP/2
opt.applicationProtocols = append(opt.applicationProtocols, plaintextHTTPALPNs...)
opt.transportProtocol = xdsfilters.RawBufferTransportProtocol
}
// if we have a tcp fallthrough filter chain, this is no longer an HTTP listener - it
// is instead "unsupported" (auto detected), as we have a TCP and HTTP filter chain with
// inspection to route between them
listenerPortProtocol = protocol.Unsupported
}
case istionetworking.ListenerProtocolTCP:
opts = buildSidecarOutboundTCPListenerOpts(listenerOpts, virtualServices)
// Check if conflict happens
if currentListenerEntry != nil {
// Build TCP listener. If current listener entry is using HTTP, add a new TCP filter chain
// If current listener is using protocol sniffing, merge the TCP filter chains.
if currentListenerEntry.protocol.IsHTTP() {
conflictType = TCPOverHTTP
} else if currentListenerEntry.protocol.IsTCP() {
conflictType = TCPOverTCP
} else {
conflictType = TCPOverAuto
}
}
case istionetworking.ListenerProtocolAuto:
if currentListenerEntry != nil {
if currentListenerEntry.protocol.IsHTTP() {
conflictType = AutoOverHTTP
} else if currentListenerEntry.protocol.IsTCP() {
conflictType = AutoOverTCP
} else {
// Exit early, listener already exists
return
}
}
// Add tcp filter chain, build TCP filter chain first.
tcpOpts := buildSidecarOutboundTCPListenerOpts(listenerOpts, virtualServices)
// Add http filter chain and tcp filter chain to the listener opts
httpOpts := buildSidecarOutboundHTTPListenerOpts(listenerOpts, actualWildcards[0], listenerProtocol)
// Add application protocol filter chain match to the http filter chain. The application protocol will be set by http inspector
for _, opt := range httpOpts {
// Support HTTP/1.0, HTTP/1.1 and HTTP/2
opt.applicationProtocols = append(opt.applicationProtocols, plaintextHTTPALPNs...)
opt.transportProtocol = xdsfilters.RawBufferTransportProtocol
}
opts = append(tcpOpts, httpOpts...)
default:
// UDP or other protocols: no need to log, it's too noisy
return
}
}
// If there is a TCP listener on well known port, cannot add any http filter chain
// with the inspector as it will break for server-first protocols. Similarly,
// if there was a HTTP listener on well known port, cannot add a tcp listener
// with the inspector as inspector breaks all server-first protocols.
if currentListenerEntry != nil &&
!isConflictWithWellKnownPort(listenerOpts.port.Protocol, currentListenerEntry.protocol, conflictType) {
log.Warnf("conflict happens on a well known port %d, incoming protocol %v, existing protocol %v, conflict type %v",
listenerOpts.port.Port, listenerOpts.port.Protocol, currentListenerEntry.protocol, conflictType)
return
}
// In general, for handling conflicts we:
// * Turn on sniffing if its HTTP and TCP mixed
// * Merge filter chains
switch conflictType {
case NoConflict, AutoOverHTTP:
// This is a new entry (NoConflict), or completely overriding (AutoOverHTTP); add it to the map
listenerMap[listenerMapKey] = &outboundListenerEntry{
servicePort: listenerOpts.port,
bind: listenerOpts.bind,
chains: opts,
protocol: listenerPortProtocol,
}
case HTTPOverTCP, TCPOverHTTP, AutoOverTCP:
// Merge the two and "upgrade" to sniffed
mergeTCPFilterChains(currentListenerEntry, opts, listenerOpts)
currentListenerEntry.protocol = protocol.Unsupported
case TCPOverTCP, TCPOverAuto:
// Merge two TCP filter chains. HTTP filter chain will not conflict with TCP filter chain because HTTP filter chain match for
// HTTP filter chain is different from TCP filter chain's.
mergeTCPFilterChains(currentListenerEntry, opts, listenerOpts)
default:
// This should never happen
log.Errorf("Got unexpected conflict type %v. This should never happen", conflictType)
}
}
// httpListenerOpts are options for an HTTP listener
type httpListenerOpts struct {
routeConfig *route.RouteConfiguration
rds string
// If set, use this as a basis
connectionManager *hcm.HttpConnectionManager
// stat prefix for the http connection manager
// DO not set this field. Will be overridden by buildCompleteFilterChain
statPrefix string
protocol protocol.Instance
useRemoteAddress bool
suppressEnvoyDebugHeaders bool
skipIstioMXHeaders bool
// http3Only indicates that the HTTP codec used
// is HTTP/3 over QUIC transport (uses UDP)
http3Only bool
class istionetworking.ListenerClass
port int
hbone bool
// Waypoint-specific modifications in HCM
isWaypoint bool
}
// filterChainOpts describes a filter chain: a set of filters with the same TLS context
type filterChainOpts struct {
// Matching criteria. Will eventually turn into FilterChainMatch
sniHosts []string
destinationCIDRs []string
applicationProtocols []string
transportProtocol string
// Arbitrary metadata to attach to the filter
metadata *core.Metadata
// TLS configuration for the filter
tlsContext *auth.DownstreamTlsContext
// Set if this is for HTTP.
httpOpts *httpListenerOpts
// Set if this is for TCP chain.
networkFilters []*listener.Filter
}
// gatewayListenerOpts are the options required to build a gateway Listener
type gatewayListenerOpts struct {
push *model.PushContext
proxy *model.Proxy
bindToPort bool
bind string
extraBind []string
port int
filterChainOpts []*filterChainOpts
needPROXYProtocol bool
}
// outboundListenerOpts are the options to build an outbound listener
type outboundListenerOpts struct {
push *model.PushContext
proxy *model.Proxy
bind listenerBinding
cidr string
port *model.Port
service *model.Service
}
// buildGatewayListener builds and initializes a Listener proto based on the provided opts. It does not set any filters.
// Optionally for HTTP filters with TLS enabled, HTTP/3 can be supported by generating QUIC Mirror filters for the
// same port (it is fine as QUIC uses UDP)
func buildGatewayListener(opts gatewayListenerOpts, transport istionetworking.TransportProtocol) *listener.Listener {
filterChains := make([]*listener.FilterChain, 0, len(opts.filterChainOpts))
var listenerFilters []*listener.ListenerFilter
// Strip PROXY header first for non-QUIC traffic if requested.
if opts.needPROXYProtocol {
listenerFilters = append(listenerFilters, xdsfilters.ProxyProtocol)
}
// add a TLS inspector if we need to detect ServerName or ALPN
// (this is not applicable for QUIC listeners)
if transport == istionetworking.TransportProtocolTCP {
for _, chain := range opts.filterChainOpts {
needsALPN := chain.tlsContext != nil && chain.tlsContext.CommonTlsContext != nil && len(chain.tlsContext.CommonTlsContext.AlpnProtocols) > 0
if len(chain.sniHosts) > 0 || needsALPN {
listenerFilters = append(listenerFilters, xdsfilters.TLSInspector)
break
}
}
}
for _, chain := range opts.filterChainOpts {
match := chain.toFilterChainMatch()
var transportSocket *core.TransportSocket
switch transport {
case istionetworking.TransportProtocolTCP:
transportSocket = buildDownstreamTLSTransportSocket(chain.tlsContext)
case istionetworking.TransportProtocolQUIC:
transportSocket = buildDownstreamQUICTransportSocket(chain.tlsContext)
}
filterChains = append(filterChains, &listener.FilterChain{
FilterChainMatch: match,
TransportSocket: transportSocket,
})
}
res := &listener.Listener{
TrafficDirection: core.TrafficDirection_OUTBOUND,
ListenerFilters: listenerFilters,
FilterChains: filterChains,
// For Gateways, we want no timeout. We should wait indefinitely for the TLS if we are sniffing.
// The timeout is useful for sidecars, where we may operate on server first traffic; for gateways if we have listener filters
// we know those filters are required.
ContinueOnListenerFiltersTimeout: false,
ListenerFiltersTimeout: durationpb.New(0),
}
switch transport {
case istionetworking.TransportProtocolTCP:
// TODO: need to sanitize the opts.bind if its a UDS socket, as it could have colons, that envoy doesn't like
res.Name = getListenerName(opts.bind, opts.port, istionetworking.TransportProtocolTCP)
log.Debugf("buildGatewayListener: building TCP listener %s", res.Name)
// TODO: need to sanitize the opts.bind if its a UDS socket, as it could have colons, that envoy doesn't like
res.Address = util.BuildAddress(opts.bind, uint32(opts.port))
// only use to exact_balance for tcp outbound listeners; virtualOutbound listener should
// not have this set per Envoy docs for redirected listeners
if opts.proxy.Metadata.OutboundListenerExactBalance {
res.ConnectionBalanceConfig = &listener.Listener_ConnectionBalanceConfig{
BalanceType: &listener.Listener_ConnectionBalanceConfig_ExactBalance_{
ExactBalance: &listener.Listener_ConnectionBalanceConfig_ExactBalance{},
},
}
}
case istionetworking.TransportProtocolQUIC:
// TODO: switch on TransportProtocolQUIC is in too many places now. Once this is a bit
// mature, refactor some of these to an interface so that they kick off the process
// of building listener, filter chains, serializing etc based on transport protocol
res.Name = getListenerName(opts.bind, opts.port, istionetworking.TransportProtocolQUIC)
log.Debugf("buildGatewayListener: building UDP/QUIC listener %s", res.Name)
res.Address = util.BuildNetworkAddress(opts.bind, uint32(opts.port), istionetworking.TransportProtocolQUIC)
res.UdpListenerConfig = &listener.UdpListenerConfig{
// TODO: Maybe we should add options in MeshConfig to
// configure QUIC options - it should look similar
// to the H2 protocol options.
QuicOptions: &listener.QuicProtocolOptions{},
DownstreamSocketConfig: &core.UdpSocketConfig{},
}
}
// add extra addresses for the listener
if features.EnableDualStack && len(opts.extraBind) > 0 {
res.AdditionalAddresses = util.BuildAdditionalAddresses(opts.extraBind, uint32(opts.port))
// Ensure consistent transport protocol with main address
for _, additionalAddress := range res.AdditionalAddresses {
additionalAddress.GetAddress().GetSocketAddress().Protocol = transport.ToEnvoySocketProtocol()
}
}
accessLogBuilder.setListenerAccessLog(opts.push, opts.proxy, res, istionetworking.ListenerClassGateway)
return res
}
// isMatchAll returns true if this chain will match everything
// This closely matches toFilterChainMatch
func (chain *filterChainOpts) isMatchAll() bool {
return (len(chain.sniHosts) == 0 || slices.Contains(chain.sniHosts, "*")) &&
len(chain.applicationProtocols) == 0 &&
len(chain.transportProtocol) == 0 &&
len(chain.destinationCIDRs) == 0
}
func (chain *filterChainOpts) conflictsWith(other *filterChainOpts) bool {
a, b := chain, other
if a == nil || b == nil {
return a == b
}
if a.transportProtocol != b.transportProtocol {
return false
}
if !slices.Equal(a.applicationProtocols, b.applicationProtocols) {
return false
}
// SNI order does not matter, and we ignore * entries
sniSet := func(sni []string) sets.String {
if len(sni) == 0 {
return nil
}
res := sets.NewWithLength[string](len(sni))
for _, s := range sni {
if s == "*" {
continue
}
res.Insert(s)
}
return res
}
if !sniSet(a.sniHosts).Equals(sniSet(b.sniHosts)) {
return false
}
// Order doesn't matter. Make sure we properly handle overlapping prefixes though
// eg: 1.2.3.4/8 is the same as 1.5.6.7/8.
cidrSet := func(cidrs []string) sets.String {
if len(cidrs) == 0 {
return nil
}
res := sets.NewWithLength[string](len(cidrs))
for _, s := range cidrs {
prefix, err := util.AddrStrToPrefix(s)
if err != nil {
continue
}
if prefix.Addr().String() == constants.UnspecifiedIP {
continue
}
if s == "*" {
continue
}
res.Insert(prefix.Masked().String())
}
return res
}
return cidrSet(a.destinationCIDRs).Equals(cidrSet(b.destinationCIDRs))
}
func (chain *filterChainOpts) toFilterChainMatch() *listener.FilterChainMatch {
if chain.isMatchAll() {
return nil
}
match := &listener.FilterChainMatch{
ApplicationProtocols: chain.applicationProtocols,
TransportProtocol: chain.transportProtocol,
}
if len(chain.sniHosts) > 0 {
fullWildcardFound := false
for _, h := range chain.sniHosts {
if h == "*" {
fullWildcardFound = true
// If we have a host with *, it effectively means match anything, i.e.
// no SNI based matching for this host.
break
}
}
if !fullWildcardFound {
chain.sniHosts = append([]string{}, chain.sniHosts...)
sort.Stable(sort.StringSlice(chain.sniHosts))
match.ServerNames = chain.sniHosts
}
}
if len(chain.destinationCIDRs) > 0 {
chain.destinationCIDRs = append([]string{}, chain.destinationCIDRs...)
sort.Stable(sort.StringSlice(chain.destinationCIDRs))
for _, d := range chain.destinationCIDRs {
cidr := util.ConvertAddressToCidr(d)
if cidr != nil && cidr.AddressPrefix != constants.UnspecifiedIP {
match.PrefixRanges = append(match.PrefixRanges, cidr)
}
}
}
return match
}
func mergeTCPFilterChains(current *outboundListenerEntry, incoming []*filterChainOpts, opts outboundListenerOpts) {
// TODO(rshriram) merge multiple identical filter chains with just a single destination CIDR based
// filter chain match, into a single filter chain and array of destinationcidr matches
// The code below checks for TCP over TCP conflicts and merges listeners
// Merge the newly built listener with the existing listener, if and only if the filter chains have distinct conditions.
// Extract the current filter chain matches, for every new filter chain match being added, check if there is a matching
// one in previous filter chains, if so, skip adding this filter chain with a warning.
merged := make([]*filterChainOpts, 0, len(current.chains)+len(incoming))
// Start with the current listener's filter chains.
merged = append(merged, current.chains...)
for _, incoming := range incoming {
conflict := false
for _, existing := range merged {
conflict = existing.conflictsWith(incoming)
if conflict {
// NOTE: While pluginParams.Service can be nil,
// this code cannot be reached if Service is nil because a pluginParams.Service can be nil only
// for user defined Egress listeners with ports. And these should occur in the API before
// the wildcard egress listener. the check for the "locked" bit will eliminate the collision.
// User is also not allowed to add duplicate ports in the egress listener
var newHostname host.Name
if opts.service != nil {
newHostname = opts.service.Hostname
} else {
// user defined outbound listener via sidecar API
newHostname = "sidecar-config-egress-tcp-listener"
}
outboundListenerConflict{
metric: model.ProxyStatusConflictOutboundListenerTCPOverTCP,
node: opts.proxy,
listenerName: getListenerName(opts.bind.Primary(), opts.port.Port, istionetworking.TransportProtocolTCP),
currentProtocol: current.servicePort.Protocol,
newHostname: newHostname,
newProtocol: opts.port.Protocol,
}.addMetric(opts.push)
break
}
}
if !conflict {
// There is no conflict with any filter chain in the existing listener.
// So append the new filter chains to the existing listener's filter chains
merged = append(merged, incoming)
}
}
current.chains = merged
}
// isConflictWithWellKnownPort checks conflicts between incoming protocol and existing protocol.
// Mongo and MySQL are not allowed to co-exist with other protocols in one port.
func isConflictWithWellKnownPort(incoming, existing protocol.Instance, conflict int) bool {
if conflict == NoConflict {
return true
}
if (incoming == protocol.Mongo ||
incoming == protocol.MySQL ||
existing == protocol.Mongo ||
existing == protocol.MySQL) && incoming != existing {
return false
}
return true
}
// nolint: interfacer
func buildDownstreamTLSTransportSocket(tlsContext *auth.DownstreamTlsContext) *core.TransportSocket {
if tlsContext == nil {
return nil
}
return &core.TransportSocket{
Name: wellknown.TransportSocketTLS,
ConfigType: &core.TransportSocket_TypedConfig{TypedConfig: protoconv.MessageToAny(tlsContext)},
}
}
func buildDownstreamQUICTransportSocket(tlsContext *auth.DownstreamTlsContext) *core.TransportSocket {
if tlsContext == nil {
return nil
}
return &core.TransportSocket{
Name: wellknown.TransportSocketQuic,
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&envoyquicv3.QuicDownstreamTransport{
DownstreamTlsContext: tlsContext,
}),
},
}
}
type listenerKey struct {
bind string
port int
}
// conflictWithReservedListener checks whether the listener address bind:port conflicts with
// - static listener port:default is 15021 and 15090
// - virtual listener port: default is 15001 and 15006 (only need to check for outbound listener)
func conflictWithReservedListener(proxy *model.Proxy, push *model.PushContext, bind string, port int, protocol protocol.Instance) bool {
if bind != "" {
if bind != wildCards[proxy.GetIPMode()][0] {
return false
}
} else if !protocol.IsHTTP() {
// if the protocol is HTTP and bind == "", the listener address will be 0.0.0.0:port
return false
}
var conflictWithStaticListener, conflictWithVirtualListener bool
// bind == wildcard
// or bind unspecified, but protocol is HTTP
if proxy.Metadata != nil {
conflictWithStaticListener = proxy.Metadata.EnvoyStatusPort == port || proxy.Metadata.EnvoyPrometheusPort == port
}
if push != nil {
conflictWithVirtualListener = int(push.Mesh.ProxyListenPort) == port || int(push.Mesh.ProxyInboundListenPort) == port
}
return conflictWithStaticListener || conflictWithVirtualListener
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
)
const (
// WildcardAddress binds to all IP addresses
WildcardAddress = "0.0.0.0"
// WildcardIPv6Address binds to all IPv6 addresses
WildcardIPv6Address = "::"
// LocalhostAddress for local binding
LocalhostAddress = "127.0.0.1"
// LocalhostIPv6Address for local binding
LocalhostIPv6Address = "::1"
// 6 is the magical number for inbound: 15006, 127.0.0.6, ::6
InboundPassthroughBindIpv4 = "127.0.0.6"
InboundPassthroughBindIpv6 = "::6"
)
var (
// maintain 3 maps to return wildCards, localHosts and passthroughBindIPs according to IP mode of proxy
wildCards = map[model.IPMode][]string{
model.IPv4: {WildcardAddress},
model.IPv6: {WildcardIPv6Address},
model.Dual: {WildcardAddress, WildcardIPv6Address},
}
localHosts = map[model.IPMode][]string{
model.IPv4: {LocalhostAddress},
model.IPv6: {LocalhostIPv6Address},
model.Dual: {LocalhostAddress, LocalhostIPv6Address},
}
passthroughBindIPs = map[model.IPMode][]string{
model.IPv4: {InboundPassthroughBindIpv4},
model.IPv6: {InboundPassthroughBindIpv6},
model.Dual: {InboundPassthroughBindIpv4, InboundPassthroughBindIpv6},
}
)
// TODO: getActualWildcardAndLocalHost would be removed once the dual stack support in Istio
// getActualWildcardAndLocalHost will return corresponding Wildcard and LocalHost
// depending on value of proxy's IPAddresses.
func getActualWildcardAndLocalHost(node *model.Proxy) (string, string) {
if node.SupportsIPv4() {
return WildcardAddress, LocalhostAddress
}
return WildcardIPv6Address, LocalhostIPv6Address
}
func getPassthroughBindIPs(ipMode model.IPMode) []string {
if !features.EnableDualStack && ipMode == model.Dual {
return []string{InboundPassthroughBindIpv4}
}
passthroughBindIPAddresses := passthroughBindIPs[ipMode]
// it means that ipMode is empty if passthroughBindIPAddresses is empty
if len(passthroughBindIPAddresses) == 0 {
return []string{InboundPassthroughBindIpv6}
}
return passthroughBindIPAddresses
}
// getSidecarInboundBindIPs returns the IP that the proxy can bind to along with the sidecar specified port.
// It looks for an unicast address, if none found, then the default wildcard address is used.
// This will make the inbound listener bind to instance_ip:port instead of 0.0.0.0:port where applicable.
func getSidecarInboundBindIPs(node *model.Proxy) []string {
// Return the IP if its a global unicast address.
if len(node.GlobalUnicastIP) > 0 {
return []string{node.GlobalUnicastIP}
}
defaultInboundIPs, _ := getWildcardsAndLocalHost(node.GetIPMode())
return defaultInboundIPs
}
func getWildcardsAndLocalHost(ipMode model.IPMode) ([]string, []string) {
return wildCards[ipMode], localHosts[ipMode]
}
// Copyright Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"time"
accesslog "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
tcp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
"google.golang.org/protobuf/types/known/durationpb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
extensions "istio.io/api/extensions/v1alpha1"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
istionetworking "istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/envoyfilter"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/extension"
istio_route "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route"
"istio.io/istio/pilot/pkg/networking/plugin/authn"
"istio.io/istio/pilot/pkg/networking/plugin/authz"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/util/protoconv"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
"istio.io/istio/pilot/pkg/xds/requestidextension"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/proto"
"istio.io/istio/pkg/wellknown"
)
// A stateful listener builder
// Support the below intentions
// 1. Use separate inbound capture listener(:15006) and outbound capture listener(:15001)
// 2. The above listeners use bind_to_port sub listeners or filter chains.
type ListenerBuilder struct {
node *model.Proxy
push *model.PushContext
gatewayListeners []*listener.Listener
inboundListeners []*listener.Listener
outboundListeners []*listener.Listener
// HttpProxyListener is a specialize outbound listener. See MeshConfig.proxyHttpPort
httpProxyListener *listener.Listener
virtualOutboundListener *listener.Listener
virtualInboundListener *listener.Listener
envoyFilterWrapper *model.EnvoyFilterWrapper
// authnBuilder provides access to authn (mTLS) configuration for the given proxy.
authnBuilder *authn.Builder
// authzBuilder provides access to authz configuration for the given proxy.
authzBuilder *authz.Builder
// authzCustomBuilder provides access to CUSTOM authz configuration for the given proxy.
authzCustomBuilder *authz.Builder
}
// enabledInspector captures if for a given listener, listener filter inspectors are added
type enabledInspector struct {
HTTPInspector bool
TLSInspector bool
}
func NewListenerBuilder(node *model.Proxy, push *model.PushContext) *ListenerBuilder {
builder := &ListenerBuilder{
node: node,
push: push,
}
builder.authnBuilder = authn.NewBuilder(push, node)
builder.authzBuilder = authz.NewBuilder(authz.Local, push, node, node.Type == model.Waypoint)
builder.authzCustomBuilder = authz.NewBuilder(authz.Custom, push, node, node.Type == model.Waypoint)
return builder
}
func (lb *ListenerBuilder) appendSidecarInboundListeners() *ListenerBuilder {
lb.inboundListeners = lb.buildInboundListeners()
if lb.node.EnableHBONE() {
lb.inboundListeners = append(lb.inboundListeners, lb.buildInboundHBONEListeners()...)
}
return lb
}
func (lb *ListenerBuilder) appendSidecarOutboundListeners() *ListenerBuilder {
lb.outboundListeners = lb.buildSidecarOutboundListeners(lb.node, lb.push)
return lb
}
func (lb *ListenerBuilder) buildHTTPProxyListener() *ListenerBuilder {
httpProxy := lb.buildHTTPProxy(lb.node, lb.push)
if httpProxy == nil {
return lb
}
lb.httpProxyListener = httpProxy
return lb
}
func (lb *ListenerBuilder) buildVirtualOutboundListener() *ListenerBuilder {
if lb.node.GetInterceptionMode() == model.InterceptionNone {
// virtual listener is not necessary since workload is not using IPtables for traffic interception
return lb
}
var isTransparentProxy *wrappers.BoolValue
if lb.node.GetInterceptionMode() == model.InterceptionTproxy {
isTransparentProxy = proto.BoolTrue
}
filterChains := buildOutboundCatchAllNetworkFilterChains(lb.node, lb.push)
actualWildcards, _ := getWildcardsAndLocalHost(lb.node.GetIPMode())
// add an extra listener that binds to the port that is the recipient of the iptables redirect
ipTablesListener := &listener.Listener{
Name: model.VirtualOutboundListenerName,
Address: util.BuildAddress(actualWildcards[0], uint32(lb.push.Mesh.ProxyListenPort)),
Transparent: isTransparentProxy,
UseOriginalDst: proto.BoolTrue,
FilterChains: filterChains,
TrafficDirection: core.TrafficDirection_OUTBOUND,
}
// add extra addresses for the listener
if features.EnableDualStack && len(actualWildcards) > 1 {
ipTablesListener.AdditionalAddresses = util.BuildAdditionalAddresses(actualWildcards[1:], uint32(lb.push.Mesh.ProxyListenPort))
} else if features.EnableAdditionalIpv4OutboundListenerForIpv6Only && (lb.node.GetIPMode() == model.IPv6) {
// add an additional IPv4 outbound listener for IPv6 only clusters
ipv4Wildcards, _ := getWildcardsAndLocalHost(model.IPv4) // get the IPv4 based wildcards
ipTablesListener.AdditionalAddresses = util.BuildAdditionalAddresses(ipv4Wildcards[0:], uint32(lb.push.Mesh.ProxyListenPort))
}
class := model.OutboundListenerClass(lb.node.Type)
accessLogBuilder.setListenerAccessLog(lb.push, lb.node, ipTablesListener, class)
lb.virtualOutboundListener = ipTablesListener
return lb
}
func (lb *ListenerBuilder) patchOneListener(l *listener.Listener, ctx networking.EnvoyFilter_PatchContext) *listener.Listener {
if l == nil {
return nil
}
tempArray := []*listener.Listener{l}
tempArray = envoyfilter.ApplyListenerPatches(ctx, lb.envoyFilterWrapper, tempArray, true)
// temp array will either be empty [if virtual listener was removed] or will have a modified listener
if len(tempArray) == 0 {
return nil
}
return tempArray[0]
}
func (lb *ListenerBuilder) patchListeners() {
lb.envoyFilterWrapper = lb.push.EnvoyFilters(lb.node)
if lb.envoyFilterWrapper == nil {
return
}
if lb.node.Type == model.Router {
lb.gatewayListeners = envoyfilter.ApplyListenerPatches(networking.EnvoyFilter_GATEWAY, lb.envoyFilterWrapper,
lb.gatewayListeners, false)
return
}
lb.virtualOutboundListener = lb.patchOneListener(lb.virtualOutboundListener, networking.EnvoyFilter_SIDECAR_OUTBOUND)
lb.virtualInboundListener = lb.patchOneListener(lb.virtualInboundListener, networking.EnvoyFilter_SIDECAR_INBOUND)
lb.httpProxyListener = lb.patchOneListener(lb.httpProxyListener, networking.EnvoyFilter_SIDECAR_OUTBOUND)
lb.inboundListeners = envoyfilter.ApplyListenerPatches(networking.EnvoyFilter_SIDECAR_INBOUND, lb.envoyFilterWrapper, lb.inboundListeners, false)
lb.outboundListeners = envoyfilter.ApplyListenerPatches(networking.EnvoyFilter_SIDECAR_OUTBOUND, lb.envoyFilterWrapper, lb.outboundListeners, false)
}
func (lb *ListenerBuilder) getListeners() []*listener.Listener {
if lb.node.Type == model.Router {
return lb.gatewayListeners
}
nInbound, nOutbound := len(lb.inboundListeners), len(lb.outboundListeners)
nHTTPProxy, nVirtual := 0, 0
if lb.httpProxyListener != nil {
nHTTPProxy = 1
}
if lb.virtualOutboundListener != nil {
nVirtual = 1
}
nListener := nInbound + nOutbound + nHTTPProxy + nVirtual
listeners := make([]*listener.Listener, 0, nListener)
listeners = append(listeners, lb.outboundListeners...)
if lb.httpProxyListener != nil {
listeners = append(listeners, lb.httpProxyListener)
}
if lb.virtualOutboundListener != nil {
listeners = append(listeners, lb.virtualOutboundListener)
}
listeners = append(listeners, lb.inboundListeners...)
log.Debugf("Build %d listeners for node %s including %d outbound, %d http proxy, "+
"%d virtual outbound",
nListener,
lb.node.ID,
nOutbound,
nHTTPProxy,
nVirtual,
)
return listeners
}
func buildOutboundCatchAllNetworkFiltersOnly(push *model.PushContext, node *model.Proxy) []*listener.Filter {
var egressCluster string
if util.IsAllowAnyOutbound(node) {
// We need a passthrough filter to fill in the filter stack for orig_dst listener
egressCluster = util.PassthroughCluster
// no need to check for nil value as the previous if check has checked
if node.SidecarScope.OutboundTrafficPolicy.EgressProxy != nil {
// user has provided an explicit destination for all the unknown traffic.
// build a cluster out of this destination
egressCluster = istio_route.GetDestinationCluster(node.SidecarScope.OutboundTrafficPolicy.EgressProxy,
nil, 0)
}
} else {
egressCluster = util.BlackHoleCluster
}
tcpProxy := &tcp.TcpProxy{
StatPrefix: egressCluster,
ClusterSpecifier: &tcp.TcpProxy_Cluster{Cluster: egressCluster},
IdleTimeout: parseDuration(node.Metadata.IdleTimeout),
}
filterStack := buildMetricsNetworkFilters(push, node, istionetworking.ListenerClassSidecarOutbound)
accessLogBuilder.setTCPAccessLog(push, node, tcpProxy, istionetworking.ListenerClassSidecarOutbound)
filterStack = append(filterStack, &listener.Filter{
Name: wellknown.TCPProxy,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(tcpProxy)},
})
return filterStack
}
func parseDuration(s string) *durationpb.Duration {
if s == "" {
return nil
}
t, err := time.ParseDuration(s)
if err != nil {
return nil
}
return durationpb.New(t)
}
// TODO: This code is still insufficient. Ideally we should be parsing all the virtual services
// with TLS blocks and build the appropriate filter chain matches and routes here. And then finally
// evaluate the left over unmatched TLS traffic using allow_any or registry_only.
// See https://github.com/istio/istio/issues/21170
func buildOutboundCatchAllNetworkFilterChains(node *model.Proxy, push *model.PushContext) []*listener.FilterChain {
filterStack := buildOutboundCatchAllNetworkFiltersOnly(push, node)
chains := make([]*listener.FilterChain, 0, 2)
chains = append(chains, blackholeFilterChain(push, node), &listener.FilterChain{
Name: model.VirtualOutboundCatchAllTCPFilterChainName,
Filters: filterStack,
})
return chains
}
func blackholeFilterChain(push *model.PushContext, node *model.Proxy) *listener.FilterChain {
return &listener.FilterChain{
Name: model.VirtualOutboundBlackholeFilterChainName,
FilterChainMatch: &listener.FilterChainMatch{
// We should not allow requests to the listen port directly. Requests must be
// sent to some other original port and iptables redirected to 15001. This
// ensures we do not passthrough back to the listen port.
DestinationPort: &wrappers.UInt32Value{Value: uint32(push.Mesh.ProxyListenPort)},
},
Filters: append(
buildMetricsNetworkFilters(push, node, istionetworking.ListenerClassSidecarOutbound),
&listener.Filter{
Name: wellknown.TCPProxy,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(&tcp.TcpProxy{
StatPrefix: util.BlackHoleCluster,
ClusterSpecifier: &tcp.TcpProxy_Cluster{Cluster: util.BlackHoleCluster},
})},
},
),
}
}
func (lb *ListenerBuilder) buildHTTPConnectionManager(httpOpts *httpListenerOpts) *hcm.HttpConnectionManager {
if httpOpts.connectionManager == nil {
httpOpts.connectionManager = &hcm.HttpConnectionManager{}
}
connectionManager := httpOpts.connectionManager
if httpOpts.http3Only {
connectionManager.CodecType = hcm.HttpConnectionManager_HTTP3
connectionManager.Http3ProtocolOptions = &core.Http3ProtocolOptions{}
} else {
connectionManager.CodecType = hcm.HttpConnectionManager_AUTO
}
connectionManager.AccessLog = []*accesslog.AccessLog{}
connectionManager.StatPrefix = httpOpts.statPrefix
// Setup normalization
connectionManager.PathWithEscapedSlashesAction = hcm.HttpConnectionManager_KEEP_UNCHANGED
switch lb.push.Mesh.GetPathNormalization().GetNormalization() {
case meshconfig.MeshConfig_ProxyPathNormalization_NONE:
connectionManager.NormalizePath = proto.BoolFalse
case meshconfig.MeshConfig_ProxyPathNormalization_BASE, meshconfig.MeshConfig_ProxyPathNormalization_DEFAULT:
connectionManager.NormalizePath = proto.BoolTrue
case meshconfig.MeshConfig_ProxyPathNormalization_MERGE_SLASHES:
connectionManager.NormalizePath = proto.BoolTrue
connectionManager.MergeSlashes = true
case meshconfig.MeshConfig_ProxyPathNormalization_DECODE_AND_MERGE_SLASHES:
connectionManager.NormalizePath = proto.BoolTrue
connectionManager.MergeSlashes = true
connectionManager.PathWithEscapedSlashesAction = hcm.HttpConnectionManager_UNESCAPE_AND_FORWARD
}
if httpOpts.useRemoteAddress {
connectionManager.UseRemoteAddress = proto.BoolTrue
} else {
connectionManager.UseRemoteAddress = proto.BoolFalse
}
// Allow websocket upgrades
websocketUpgrade := &hcm.HttpConnectionManager_UpgradeConfig{UpgradeType: "websocket"}
connectionManager.UpgradeConfigs = []*hcm.HttpConnectionManager_UpgradeConfig{websocketUpgrade}
if idleTimeout := parseDuration(lb.node.Metadata.IdleTimeout); idleTimeout != nil {
connectionManager.CommonHttpProtocolOptions = &core.HttpProtocolOptions{
IdleTimeout: idleTimeout,
}
}
connectionManager.StreamIdleTimeout = durationpb.New(0 * time.Second)
if httpOpts.rds != "" {
rds := &hcm.HttpConnectionManager_Rds{
Rds: &hcm.Rds{
ConfigSource: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_Ads{
Ads: &core.AggregatedConfigSource{},
},
InitialFetchTimeout: durationpb.New(0),
ResourceApiVersion: core.ApiVersion_V3,
},
RouteConfigName: httpOpts.rds,
},
}
connectionManager.RouteSpecifier = rds
} else {
connectionManager.RouteSpecifier = &hcm.HttpConnectionManager_RouteConfig{RouteConfig: httpOpts.routeConfig}
}
accessLogBuilder.setHTTPAccessLog(lb.push, lb.node, connectionManager, httpOpts.class)
startChildSpan, reqIDExtensionCtx := configureTracing(lb.push, lb.node, connectionManager, httpOpts.class)
filters := []*hcm.HttpFilter{}
if !httpOpts.isWaypoint {
wasm := lb.push.WasmPluginsByListenerInfo(lb.node, model.WasmPluginListenerInfo{
Port: httpOpts.port,
Class: httpOpts.class,
}, model.WasmPluginTypeHTTP)
// Metadata exchange filter needs to be added before any other HTTP filters are added. This is done to
// ensure that mx filter comes before HTTP RBAC filter. This is related to https://github.com/istio/istio/issues/41066
filters = appendMxFilter(httpOpts, filters)
// TODO: how to deal with ext-authz? It will be in the ordering twice
filters = append(filters, lb.authzCustomBuilder.BuildHTTP(httpOpts.class)...)
filters = extension.PopAppendHTTP(filters, wasm, extensions.PluginPhase_AUTHN)
filters = append(filters, lb.authnBuilder.BuildHTTP(httpOpts.class)...)
filters = extension.PopAppendHTTP(filters, wasm, extensions.PluginPhase_AUTHZ)
filters = append(filters, lb.authzBuilder.BuildHTTP(httpOpts.class)...)
// TODO: these feel like the wrong place to insert, but this retains backwards compatibility with the original implementation
filters = extension.PopAppendHTTP(filters, wasm, extensions.PluginPhase_STATS)
filters = extension.PopAppendHTTP(filters, wasm, extensions.PluginPhase_UNSPECIFIED_PHASE)
}
if httpOpts.protocol == protocol.GRPCWeb {
// TODO: because we share an HCM between many services, this check is broken; it will only work if the first
// GRPCWeb is probably only used for Gateways though, which don't have this concern.
filters = append(filters, xdsfilters.GrpcWeb)
}
filters = append(filters, xdsfilters.GrpcStats)
// append ALPN HTTP filter in HTTP connection manager for outbound listener only.
if features.ALPNFilter {
if httpOpts.class != istionetworking.ListenerClassSidecarInbound {
filters = append(filters, xdsfilters.Alpn)
}
}
// TypedPerFilterConfig in route needs these filters.
filters = append(filters, xdsfilters.Fault, xdsfilters.Cors)
if !httpOpts.isWaypoint {
filters = append(filters, lb.push.Telemetry.HTTPFilters(lb.node, httpOpts.class)...)
}
// Add EmptySessionFilter so that it can be overridden at route level per service.
if features.EnablePersistentSessionFilter && httpOpts.class != istionetworking.ListenerClassSidecarInbound {
filters = append(filters, xdsfilters.EmptySessionFilter)
}
filters = append(filters, xdsfilters.BuildRouterFilter(xdsfilters.RouterFilterContext{
StartChildSpan: startChildSpan,
SuppressDebugHeaders: httpOpts.suppressEnvoyDebugHeaders,
}))
connectionManager.HttpFilters = filters
connectionManager.RequestIdExtension = requestidextension.BuildUUIDRequestIDExtension(reqIDExtensionCtx)
if features.EnableHCMInternalNetworks && lb.push.Networks != nil {
for _, internalnetwork := range lb.push.Networks.Networks {
iac := &hcm.HttpConnectionManager_InternalAddressConfig{}
for _, ne := range internalnetwork.Endpoints {
if cidr := util.ConvertAddressToCidr(ne.GetFromCidr()); cidr != nil {
iac.CidrRanges = append(iac.CidrRanges, cidr)
}
}
connectionManager.InternalAddressConfig = iac
}
}
return connectionManager
}
func appendMxFilter(httpOpts *httpListenerOpts, filters []*hcm.HttpFilter) []*hcm.HttpFilter {
if !features.MetadataExchange || httpOpts.hbone {
return filters
}
if httpOpts.class == istionetworking.ListenerClassSidecarInbound {
return append(filters, xdsfilters.SidecarInboundMetadataFilter)
}
if httpOpts.skipIstioMXHeaders {
return append(filters, xdsfilters.SidecarOutboundMetadataFilterSkipHeaders)
}
return append(filters, xdsfilters.SidecarOutboundMetadataFilter)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"fmt"
"sort"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
tcp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
envoytype "github.com/envoyproxy/go-control-plane/envoy/type/v3"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
extensions "istio.io/api/extensions/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
istionetworking "istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/extension"
"istio.io/istio/pilot/pkg/networking/plugin/authz"
"istio.io/istio/pilot/pkg/networking/telemetry"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/security/authn"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/util/protoconv"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/security"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/proto"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/wellknown"
)
// inboundChainConfig defines the configuration for a single inbound filter chain. This may be created
// as a result of a Service, and Sidecar CR, or for the built-in passthrough filter chains.
type inboundChainConfig struct {
// clusterName defines the destination cluster for this chain
clusterName string
// port defines the port configuration for this chain. Note that there is a Port and TargetPort;
// most usages should just use TargetPort. Port is mostly used for legacy compatibility and
// telemetry.
port model.ServiceInstancePort
// bind determines where (IP) this filter chain should bind. Note: typically we just end up using
// 'virtual' listener and do not literally bind to port; in these cases this just impacts naming
// and telemetry.
bind string
// extraBind is string slice and each element is similar with bind address and support multiple addresses for 'virtual' listener
extraBind []string
// tlsSettings defines the *custom* TLS settings for the chain. mTLS settings are orthogonal; this
// only configures TLS overrides.
tlsSettings *networking.ServerTLSSettings
// passthrough should be set to true for the 'passthrough' chains, which are the chains always
// present to handle all unmatched traffic. These have a few naming and quirks that require
// different configuration.
passthrough bool
// bindToPort determines if this chain should form a real listener that actually binds to a real port,
// or if it should just be a filter chain part of the 'virtual inbound' listener.
bindToPort bool
// hbone determines if this is coming from an HBONE request originally
hbone bool
// telemetryMetadata defines additional information about the chain for telemetry purposes.
telemetryMetadata telemetry.FilterChainMetadata
}
// StatPrefix returns the stat prefix for the config
func (cc inboundChainConfig) StatPrefix() string {
if cc.passthrough {
// A bit arbitrary, but for backwards compatibility just use the cluster name
return cc.clusterName
}
return "inbound_" + cc.Name(istionetworking.ListenerProtocolHTTP)
}
// Name determines the name for this chain
func (cc inboundChainConfig) Name(protocol istionetworking.ListenerProtocol) string {
if cc.passthrough {
// A bit arbitrary, but for backwards compatibility fixed names are used
// Passthrough chains have fix names based on protocol
if protocol == istionetworking.ListenerProtocolHTTP {
return model.VirtualInboundCatchAllHTTPFilterChainName
}
return model.VirtualInboundListenerName
}
// Everything else derived from bind/port
return getListenerName(cc.bind, int(cc.port.TargetPort), istionetworking.TransportProtocolTCP)
}
var (
IPv4PassthroughCIDR = []*core.CidrRange{util.ConvertAddressToCidr("0.0.0.0/0")}
IPv6PassthroughCIDR = []*core.CidrRange{util.ConvertAddressToCidr("::/0")}
)
// ToFilterChainMatch builds the FilterChainMatch for the config
func (cc inboundChainConfig) ToFilterChainMatch(opt FilterChainMatchOptions) *listener.FilterChainMatch {
match := &listener.FilterChainMatch{}
match.ApplicationProtocols = opt.ApplicationProtocols
match.TransportProtocol = opt.TransportProtocol
if cc.passthrough {
// Pasthrough listeners do an IP match - but matching all IPs. This is really an IP *version* match,
// but Envoy doesn't explicitly have version check.
if cc.clusterName == util.InboundPassthroughClusterIpv4 {
match.PrefixRanges = IPv4PassthroughCIDR
} else {
match.PrefixRanges = IPv6PassthroughCIDR
}
}
if cc.port.TargetPort > 0 {
match.DestinationPort = &wrappers.UInt32Value{Value: cc.port.TargetPort}
}
return match
}
func (lb *ListenerBuilder) buildInboundHBONEListeners() []*listener.Listener {
routes := []*route.Route{{
Match: &route.RouteMatch{
PathSpecifier: &route.RouteMatch_ConnectMatcher_{ConnectMatcher: &route.RouteMatch_ConnectMatcher{}},
},
Action: &route.Route_Route{Route: &route.RouteAction{
UpgradeConfigs: []*route.RouteAction_UpgradeConfig{{
UpgradeType: ConnectUpgradeType,
ConnectConfig: &route.RouteAction_UpgradeConfig_ConnectConfig{},
}},
ClusterSpecifier: &route.RouteAction_Cluster{Cluster: MainInternalName},
}},
}}
terminate := lb.buildConnectTerminateListener(routes)
// Now we have top level listener... but we must have an internal listener for each standard filter chain
// 1 listener per port; that listener will do protocol detection.
l := &listener.Listener{
Name: MainInternalName,
ListenerSpecifier: &listener.Listener_InternalListener{InternalListener: &listener.Listener_InternalListenerConfig{}},
TrafficDirection: core.TrafficDirection_INBOUND,
ContinueOnListenerFiltersTimeout: true,
}
// Flush authz cache since we need filter state for the principal.
oldBuilder := lb.authzBuilder
lb.authzBuilder = authz.NewBuilder(authz.Local, lb.push, lb.node, true)
inboundChainConfigs := lb.buildInboundChainConfigs()
for _, cc := range inboundChainConfigs {
cc.hbone = true
lp := istionetworking.ModelProtocolToListenerProtocol(cc.port.Protocol)
// Internal chain has no mTLS
mtls := authn.MTLSSettings{Port: cc.port.TargetPort, Mode: model.MTLSDisable}
opts := getFilterChainMatchOptions(mtls, lp)
chains := lb.inboundChainForOpts(cc, mtls, opts)
for _, c := range chains {
fcm := c.GetFilterChainMatch()
if fcm != nil {
// Clear out settings that do not matter anymore
fcm.TransportProtocol = ""
}
}
l.FilterChains = append(l.FilterChains, chains...)
}
lb.authzBuilder = oldBuilder
accessLogBuilder.setListenerAccessLog(lb.push, lb.node, l, istionetworking.ListenerClassSidecarInbound)
l.ListenerFilters = append(l.ListenerFilters, xdsfilters.OriginalDestination)
// TODO: Exclude inspectors from some inbound ports.
l.ListenerFilters = append(l.ListenerFilters, populateListenerFilters(lb.node, l, true)...)
return []*listener.Listener{terminate, l}
}
// buildInboundListeners creates inbound listeners.
// Typically, this a single listener with many filter chains for each applicable Service; traffic is redirect with iptables.
// However, explicit listeners can be used in NONE mode or with Sidecar.Ingress configuration.
func (lb *ListenerBuilder) buildInboundListeners() []*listener.Listener {
// All listeners we build
var listeners []*listener.Listener
// virtualInboundFilterChains builds up all of the filter chains for the virtual inbound listener
var virtualInboundFilterChains []*listener.FilterChain
// For each chain config we will build required filter chain(s)
for _, cc := range lb.buildInboundChainConfigs() {
// First, construct our set of filter chain matchers. For a given port, we will have multiple matches
// to handle mTLS vs plaintext and HTTP vs TCP (depending on protocol and PeerAuthentication).
var opts []FilterChainMatchOptions
mtls := lb.authnBuilder.ForPort(cc.port.TargetPort)
// Chain has explicit user TLS config. This can only apply when the TLS mode is DISABLE to avoid conflicts.
if cc.tlsSettings != nil && mtls.Mode == model.MTLSDisable {
// Since we are terminating TLS, we need to treat the protocol as if its terminated.
// Example: user specifies protocol=HTTPS and user TLS, we will use HTTP
cc.port.Protocol = cc.port.Protocol.AfterTLSTermination()
lp := istionetworking.ModelProtocolToListenerProtocol(cc.port.Protocol)
opts = getTLSFilterChainMatchOptions(lp)
mtls.TCP = BuildListenerTLSContext(cc.tlsSettings, lb.node, lb.push.Mesh, istionetworking.TransportProtocolTCP, false)
mtls.HTTP = mtls.TCP
} else {
lp := istionetworking.ModelProtocolToListenerProtocol(cc.port.Protocol)
opts = getFilterChainMatchOptions(mtls, lp)
}
// Build the actual chain
chains := lb.inboundChainForOpts(cc, mtls, opts)
if cc.bindToPort {
// If this config is for bindToPort, we want to actually create a real Listener.
listeners = append(listeners, lb.inboundCustomListener(cc, chains))
} else {
// Otherwise, just append the filter chain to the virtual inbound chains.
virtualInboundFilterChains = append(virtualInboundFilterChains, chains...)
}
}
if lb.node.GetInterceptionMode() != model.InterceptionNone {
// Prepend virtual inbound, as long as we are using redirection.
listeners = append([]*listener.Listener{lb.inboundVirtualListener(virtualInboundFilterChains)}, listeners...)
}
return listeners
}
// inboundVirtualListener builds the virtual inbound listener.
func (lb *ListenerBuilder) inboundVirtualListener(chains []*listener.FilterChain) *listener.Listener {
actualWildcards, _ := getWildcardsAndLocalHost(lb.node.GetIPMode())
// Build the "virtual" inbound listener. This will capture all inbound redirected traffic and contains:
// * Passthrough filter chains, matching all unmatched traffic. There are a few of these to handle all cases
// * Service filter chains. These will either be for each Port exposed by a Service OR Sidecar.Ingress configuration.
allChains := buildInboundPassthroughChains(lb)
allChains = append(allChains, chains...)
l := lb.buildInboundListener(model.VirtualInboundListenerName, actualWildcards, uint32(lb.push.Mesh.ProxyInboundListenPort), false, allChains)
return l
}
// inboundCustomListener build a custom listener that actually binds to a port, rather than relying on redirection.
func (lb *ListenerBuilder) inboundCustomListener(cc inboundChainConfig, chains []*listener.FilterChain) *listener.Listener {
addresses := []string{cc.bind}
if len(cc.extraBind) > 0 {
addresses = append(addresses, cc.extraBind...)
}
ll := lb.buildInboundListener(cc.Name(istionetworking.ListenerProtocolTCP), addresses, cc.port.TargetPort, true, chains)
return ll
}
func (lb *ListenerBuilder) buildInboundListener(name string, addresses []string, tPort uint32,
bindToPort bool, chains []*listener.FilterChain,
) *listener.Listener {
if len(addresses) == 0 {
return nil
}
address := util.BuildAddress(addresses[0], tPort)
l := &listener.Listener{
Name: name,
Address: address,
TrafficDirection: core.TrafficDirection_INBOUND,
ContinueOnListenerFiltersTimeout: true,
}
if features.EnableDualStack && len(addresses) > 1 {
// add extra addresses for the listener
l.AdditionalAddresses = util.BuildAdditionalAddresses(addresses[1:], tPort)
}
if lb.node.Metadata.InboundListenerExactBalance {
l.ConnectionBalanceConfig = &listener.Listener_ConnectionBalanceConfig{
BalanceType: &listener.Listener_ConnectionBalanceConfig_ExactBalance_{
ExactBalance: &listener.Listener_ConnectionBalanceConfig_ExactBalance{},
},
}
}
if !bindToPort && lb.node.GetInterceptionMode() == model.InterceptionTproxy {
l.Transparent = proto.BoolTrue
}
accessLogBuilder.setListenerAccessLog(lb.push, lb.node, l, istionetworking.ListenerClassSidecarInbound)
l.FilterChains = chains
l.ListenerFilters = populateListenerFilters(lb.node, l, bindToPort)
l.ListenerFiltersTimeout = lb.push.Mesh.GetProtocolDetectionTimeout()
return l
}
// inboundChainForOpts builds a set of filter chains
func (lb *ListenerBuilder) inboundChainForOpts(cc inboundChainConfig, mtls authn.MTLSSettings, opts []FilterChainMatchOptions) []*listener.FilterChain {
chains := make([]*listener.FilterChain, 0, len(opts))
for _, opt := range opts {
switch opt.Protocol {
// Switch on the protocol. Note: we do not need to handle Auto protocol as it will already be split into a TCP and HTTP option.
case istionetworking.ListenerProtocolHTTP:
chains = append(chains, &listener.FilterChain{
FilterChainMatch: cc.ToFilterChainMatch(opt),
Filters: lb.buildInboundNetworkFiltersForHTTP(cc),
TransportSocket: buildDownstreamTLSTransportSocket(opt.ToTransportSocket(mtls)),
Name: cc.Name(opt.Protocol),
})
case istionetworking.ListenerProtocolTCP:
chains = append(chains, &listener.FilterChain{
FilterChainMatch: cc.ToFilterChainMatch(opt),
Filters: lb.buildInboundNetworkFilters(cc),
TransportSocket: buildDownstreamTLSTransportSocket(opt.ToTransportSocket(mtls)),
Name: cc.Name(opt.Protocol),
})
}
}
return chains
}
func getSidecarIngressPortList(node *model.Proxy) sets.Set[int] {
sidecarScope := node.SidecarScope
ingressPortListSet := sets.New[int]()
for _, ingressListener := range sidecarScope.Sidecar.Ingress {
ingressPortListSet.Insert(int(ingressListener.Port.Number))
}
return ingressPortListSet
}
func (lb *ListenerBuilder) getFilterChainsByServicePort(enableSidecarServiceInboundListenerMerge bool) map[uint32]inboundChainConfig {
chainsByPort := make(map[uint32]inboundChainConfig)
ingressPortListSet := sets.New[int]()
sidecarScope := lb.node.SidecarScope
if sidecarScope.HasIngressListener() {
ingressPortListSet = getSidecarIngressPortList(lb.node)
}
for _, i := range lb.node.ServiceTargets {
bindToPort := getBindToPort(networking.CaptureMode_DEFAULT, lb.node)
// Skip ports we cannot bind to
if !lb.node.CanBindToPort(bindToPort, i.Port.TargetPort) {
log.Debugf("buildInboundListeners: skipping privileged service port %d for node %s as it is an unprivileged proxy",
i.Port.TargetPort, lb.node.ID)
continue
}
port := i.Port
actualWildcards, _ := getWildcardsAndLocalHost(lb.node.GetIPMode())
if enableSidecarServiceInboundListenerMerge && sidecarScope.HasIngressListener() &&
// ingress listener port means the target port, may not equal to service port
ingressPortListSet.Contains(int(port.TargetPort)) {
// here if port is declared in service and sidecar ingress both, we continue to take the one on sidecar + other service ports
// e.g. 1,2, 3 in service and 3,4 in sidecar ingress,
// this will still generate listeners for 1,2,3,4 where 3 is picked from sidecar ingress
// port present in sidecarIngress listener so let sidecar take precedence
continue
}
cc := inboundChainConfig{
telemetryMetadata: telemetry.FilterChainMetadata{InstanceHostname: i.Service.Hostname},
port: port,
clusterName: model.BuildInboundSubsetKey(int(port.TargetPort)),
bind: actualWildcards[0],
bindToPort: bindToPort,
hbone: lb.node.IsWaypointProxy(),
}
// for inbound only generate a standalone listener when bindToPort=true
if bindToPort && conflictWithReservedListener(lb.node, nil, cc.bind, int(port.TargetPort), port.Protocol) {
log.Debugf("buildInboundListeners: skipping service port %d for node %s as it conflicts with static listener",
port.TargetPort, lb.node.ID)
continue
}
// add extra binding addresses
if len(actualWildcards) > 1 {
cc.extraBind = actualWildcards[1:]
}
if i.Service.Attributes.ServiceRegistry == provider.Kubernetes {
cc.telemetryMetadata.KubernetesServiceNamespace = i.Service.Attributes.Namespace
cc.telemetryMetadata.KubernetesServiceName = i.Service.Attributes.Name
}
// First, make sure there is a distinct instance used per port.
// The Service is *almost* not relevant, but some Telemetry is per-service.
// If there is a conflict, we will use the oldest Service. This impacts the protocol used as well.
if old, f := chainsByPort[port.TargetPort]; f {
reportInboundConflict(lb, old, cc)
continue
}
chainsByPort[port.TargetPort] = cc
}
return chainsByPort
}
// buildInboundChainConfigs builds all the application chain configs.
func (lb *ListenerBuilder) buildInboundChainConfigs() []inboundChainConfig {
var chainsByPort map[uint32]inboundChainConfig
// No user supplied sidecar scope or the user supplied one has no ingress listeners.
if !lb.node.SidecarScope.HasIngressListener() {
// We should not create inbound listeners in NONE mode based on the service instances
// Doing so will prevent the workloads from starting as they would be listening on the same port
// Users are required to provide the sidecar config to define the inbound listeners
if lb.node.GetInterceptionMode() == model.InterceptionNone {
return nil
}
chainsByPort = lb.getFilterChainsByServicePort(false)
} else {
// only allow to merge inbound listeners if sidecar has ingress listener pilot has env EnableSidecarServiceInboundListenerMerge set
if features.EnableSidecarServiceInboundListenerMerge {
chainsByPort = lb.getFilterChainsByServicePort(true)
} else {
chainsByPort = make(map[uint32]inboundChainConfig)
}
for _, i := range lb.node.SidecarScope.Sidecar.Ingress {
port := model.ServiceInstancePort{
ServicePort: &model.Port{
Name: i.Port.Name,
Port: int(i.Port.Number),
Protocol: protocol.Parse(i.Port.Protocol),
},
TargetPort: i.Port.Number, // No targetPort support in the API
}
bindtoPort := getBindToPort(i.CaptureMode, lb.node)
// Skip ports we cannot bind to
if !lb.node.CanBindToPort(bindtoPort, port.TargetPort) {
log.Warnf("buildInboundListeners: skipping privileged sidecar port %d for node %s as it is an unprivileged proxy",
port.TargetPort, lb.node.ID)
continue
}
cc := inboundChainConfig{
// Sidecar config doesn't have a real hostname. In order to give some telemetry info, make a synthetic hostname.
telemetryMetadata: telemetry.FilterChainMetadata{
InstanceHostname: host.Name(lb.node.SidecarScope.Name + "." + lb.node.SidecarScope.Namespace),
},
port: port,
clusterName: model.BuildInboundSubsetKey(int(port.TargetPort)),
bind: i.Bind,
bindToPort: bindtoPort,
hbone: lb.node.IsWaypointProxy(),
}
if cc.bind == "" {
// If user didn't provide, pick one based on IP
actualWildcards := getSidecarInboundBindIPs(lb.node)
cc.bind = actualWildcards[0]
if len(actualWildcards) > 1 {
cc.extraBind = actualWildcards[1:]
}
}
// for inbound only generate a standalone listener when bindToPort=true
if bindtoPort && conflictWithReservedListener(lb.node, nil, cc.bind, port.Port, port.Protocol) {
log.Warnf("buildInboundListeners: skipping sidecar port %d for node %s as it conflicts with static listener",
port.TargetPort, lb.node.ID)
continue
}
// If there is a conflict, we will use the oldest Service. This impacts the protocol used as well.
if old, f := chainsByPort[port.TargetPort]; f {
reportInboundConflict(lb, old, cc)
continue
}
if i.Tls != nil && features.EnableTLSOnSidecarIngress {
// User provided custom TLS settings
cc.tlsSettings = i.Tls.DeepCopy()
cc.tlsSettings.CipherSuites = security.FilterCipherSuites(cc.tlsSettings.CipherSuites)
cc.port.Protocol = cc.port.Protocol.AfterTLSTermination()
}
chainsByPort[port.TargetPort] = cc
}
}
chainConfigs := make([]inboundChainConfig, 0, len(chainsByPort))
for _, cc := range chainsByPort {
chainConfigs = append(chainConfigs, cc)
}
// Give a stable order to the chains
sort.Slice(chainConfigs, func(i, j int) bool {
return chainConfigs[i].port.TargetPort < chainConfigs[j].port.TargetPort
})
return chainConfigs
}
// getBindToPort determines whether we should bind to port based on the chain-specific config and the proxy
func getBindToPort(mode networking.CaptureMode, node *model.Proxy) bool {
if mode == networking.CaptureMode_DEFAULT {
// Chain doesn't specify explicit config, so use the proxy defaults
return node.GetInterceptionMode() == model.InterceptionNone
}
// Explicitly configured in the config, ignore proxy defaults
return mode == networking.CaptureMode_NONE
}
// populateListenerFilters determines the appropriate listener filters based on the listener
// HTTP and TLS inspectors are automatically derived based on FilterChainMatch requirements.
func populateListenerFilters(node *model.Proxy, vi *listener.Listener, bindToPort bool) []*listener.ListenerFilter {
lf := make([]*listener.ListenerFilter, 0, 4)
if !bindToPort {
lf = append(lf, xdsfilters.OriginalDestination)
}
if !bindToPort && node.GetInterceptionMode() == model.InterceptionTproxy {
lf = append(lf, xdsfilters.OriginalSrc)
}
// inspectors builds up a map of port -> required inspectors (TLS/HTTP)
inspectors := map[int]enabledInspector{}
for _, fc := range vi.FilterChains {
port := fc.GetFilterChainMatch().GetDestinationPort().GetValue()
needsTLS := fc.GetFilterChainMatch().GetTransportProtocol() == xdsfilters.TLSTransportProtocol
needHTTP := false
for _, ap := range fc.GetFilterChainMatch().GetApplicationProtocols() {
// Check for HTTP protocol - these require HTTP inspector
if ap == "http/1.1" || ap == "h2c" {
needHTTP = true
break
}
}
// Port may already have config; we OR them together. If any filter chain on that port is enabled
// we will enable the inspector.
i := inspectors[int(port)]
i.HTTPInspector = i.HTTPInspector || needHTTP
i.TLSInspector = i.TLSInspector || needsTLS
inspectors[int(port)] = i
}
// Enable TLS inspector on any ports we need it
if needsTLS(inspectors) {
lf = append(lf, buildTLSInspector(inspectors))
}
// Note: the HTTP inspector should be after TLS inspector.
// If TLS inspector sets transport protocol to tls, the http inspector
// won't inspect the packet.
if needsHTTP(inspectors) {
lf = append(lf, buildHTTPInspector(inspectors))
}
return lf
}
// listenerPredicateExcludePorts returns a listener filter predicate that will
// match everything except the passed in ports. This is useful, for example, to
// enable protocol sniffing on every port except port X and Y, because X and Y
// are explicitly declared.
func listenerPredicateExcludePorts(ports []int) *listener.ListenerFilterChainMatchPredicate {
ranges := []*listener.ListenerFilterChainMatchPredicate{}
for _, p := range ports {
ranges = append(ranges, &listener.ListenerFilterChainMatchPredicate{Rule: &listener.ListenerFilterChainMatchPredicate_DestinationPortRange{
// Range is [start, end)
DestinationPortRange: &envoytype.Int32Range{
Start: int32(p),
End: int32(p + 1),
},
}})
}
if len(ranges) > 1 {
return &listener.ListenerFilterChainMatchPredicate{Rule: &listener.ListenerFilterChainMatchPredicate_OrMatch{
OrMatch: &listener.ListenerFilterChainMatchPredicate_MatchSet{
Rules: ranges,
},
}}
}
return &listener.ListenerFilterChainMatchPredicate{Rule: ranges[0].GetRule()}
}
func listenerPredicateIncludePorts(ports []int) *listener.ListenerFilterChainMatchPredicate {
rule := listenerPredicateExcludePorts(ports)
return &listener.ListenerFilterChainMatchPredicate{Rule: &listener.ListenerFilterChainMatchPredicate_NotMatch{
NotMatch: rule,
}}
}
func needsTLS(inspectors map[int]enabledInspector) bool {
for _, i := range inspectors {
if i.TLSInspector {
return true
}
}
return false
}
func needsHTTP(inspectors map[int]enabledInspector) bool {
for _, i := range inspectors {
if i.HTTPInspector {
return true
}
}
return false
}
// buildTLSInspector creates a tls inspector filter. Based on the configured ports, this may be enabled
// for only some ports.
func buildTLSInspector(inspectors map[int]enabledInspector) *listener.ListenerFilter {
// TODO share logic with HTTP inspector
defaultEnabled := inspectors[0].TLSInspector
// We have a split path here based on if the passthrough inspector is enabled
// If it is, then we need to explicitly opt ports out of the inspector
// If it isn't, then we need to explicitly opt ports into the inspector
if defaultEnabled {
ports := make([]int, 0, len(inspectors))
// Collect all ports where TLS inspector is disabled.
for p, i := range inspectors {
if p == 0 {
continue
}
if !i.TLSInspector {
ports = append(ports, p)
}
}
// No need to filter, return the cached version enabled for all ports
if len(ports) == 0 {
return xdsfilters.TLSInspector
}
// Ensure consistent ordering as we are looping over a map
sort.Ints(ports)
filter := &listener.ListenerFilter{
Name: wellknown.TLSInspector,
ConfigType: xdsfilters.TLSInspector.ConfigType,
FilterDisabled: listenerPredicateExcludePorts(ports),
}
return filter
}
ports := make([]int, 0, len(inspectors))
// Collect all ports where TLS inspector is disabled.
for p, i := range inspectors {
if p == 0 {
continue
}
if i.TLSInspector {
ports = append(ports, p)
}
}
// No need to filter, return the cached version enabled for all ports
if len(ports) == 0 {
return xdsfilters.TLSInspector
}
// Ensure consistent ordering as we are looping over a map
sort.Ints(ports)
filter := &listener.ListenerFilter{
Name: wellknown.TLSInspector,
ConfigType: xdsfilters.TLSInspector.ConfigType,
// Exclude all disabled ports
FilterDisabled: listenerPredicateIncludePorts(ports),
}
return filter
}
// buildHTTPInspector creates an http inspector filter. Based on the configured ports, this may be enabled
// for only some ports.
func buildHTTPInspector(inspectors map[int]enabledInspector) *listener.ListenerFilter {
ports := make([]int, 0, len(inspectors))
// Collect all ports where HTTP inspector is disabled.
for p, i := range inspectors {
if !i.HTTPInspector {
ports = append(ports, p)
}
}
// No need to filter, return the cached version enabled for all ports
if len(ports) == 0 {
return xdsfilters.HTTPInspector
}
// Ensure consistent ordering as we are looping over a map
sort.Ints(ports)
filter := &listener.ListenerFilter{
Name: wellknown.HTTPInspector,
ConfigType: xdsfilters.HTTPInspector.ConfigType,
// Exclude all disabled ports
FilterDisabled: listenerPredicateExcludePorts(ports),
}
return filter
}
func reportInboundConflict(lb *ListenerBuilder, old inboundChainConfig, cc inboundChainConfig) {
// If the protocols and service do not match, we have a real conflict. For example, one Service may
// define TCP and the other HTTP. Report this up to the user.
if old.port.Protocol != cc.port.Protocol && old.telemetryMetadata.InstanceHostname != cc.telemetryMetadata.InstanceHostname {
lb.push.AddMetric(model.ProxyStatusConflictInboundListener, lb.node.ID, lb.node.ID,
fmt.Sprintf("Conflicting inbound listener:%d. existing: %s, incoming: %s", cc.port.TargetPort,
old.telemetryMetadata.InstanceHostname, cc.telemetryMetadata.InstanceHostname))
return
}
// This can happen if two services select the same pod with same port and protocol - we should skip
// building listener again, but no need to report to the user
if old.telemetryMetadata.InstanceHostname != cc.telemetryMetadata.InstanceHostname {
log.Debugf("skipping inbound listener:%d as we have already build it for existing host: %s, new host: %s",
cc.port.TargetPort,
old.telemetryMetadata.InstanceHostname, cc.telemetryMetadata.InstanceHostname)
}
}
// buildInboundPassthroughChains builds the passthrough chains. These match any unmatched traffic.
// This allows traffic to ports not exposed by any Service, for example.
func buildInboundPassthroughChains(lb *ListenerBuilder) []*listener.FilterChain {
// ipv4 and ipv6 feature detect
ipVersions := make([]string, 0, 2)
if lb.node.SupportsIPv4() {
ipVersions = append(ipVersions, util.InboundPassthroughClusterIpv4)
}
if lb.node.SupportsIPv6() {
ipVersions = append(ipVersions, util.InboundPassthroughClusterIpv6)
}
// Setup enough slots for common max size (permissive mode is 5 filter chains). This is not
// exact, just best effort optimization
filterChains := make([]*listener.FilterChain, 0, 1+5*len(ipVersions))
filterChains = append(filterChains, buildInboundBlackhole(lb))
for _, clusterName := range ipVersions {
mtlsOptions := lb.authnBuilder.ForPassthrough()
for _, mtls := range mtlsOptions {
cc := inboundChainConfig{
port: model.ServiceInstancePort{
ServicePort: &model.Port{
Name: model.VirtualInboundListenerName,
// Port as 0 doesn't completely make sense here, since we get weird tracing decorators like `:0/*`,
// but this is backwards compatible and there aren't any perfect options.
Port: 0,
Protocol: protocol.Unsupported,
},
TargetPort: mtls.Port,
},
clusterName: clusterName,
passthrough: true,
hbone: lb.node.IsWaypointProxy(),
}
opts := getFilterChainMatchOptions(mtls, istionetworking.ListenerProtocolAuto)
filterChains = append(filterChains, lb.inboundChainForOpts(cc, mtls, opts)...)
}
}
return filterChains
}
// buildInboundBlackhole builds a special filter chain for the virtual inbound matching traffic to the port the listener is actually on.
// This avoids a possible loop where traffic sent to this port would continually call itself indefinitely.
func buildInboundBlackhole(lb *ListenerBuilder) *listener.FilterChain {
var filters []*listener.Filter
if !lb.node.IsWaypointProxy() {
filters = append(filters, buildMetadataExchangeNetworkFilters()...)
}
filters = append(filters, buildMetricsNetworkFilters(lb.push, lb.node, istionetworking.ListenerClassSidecarInbound)...)
filters = append(filters, &listener.Filter{
Name: wellknown.TCPProxy,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(&tcp.TcpProxy{
StatPrefix: util.BlackHoleCluster,
ClusterSpecifier: &tcp.TcpProxy_Cluster{Cluster: util.BlackHoleCluster},
})},
})
return &listener.FilterChain{
Name: model.VirtualInboundBlackholeFilterChainName,
FilterChainMatch: &listener.FilterChainMatch{
DestinationPort: &wrappers.UInt32Value{Value: uint32(lb.push.Mesh.ProxyInboundListenPort)},
},
Filters: filters,
}
}
// buildSidecarInboundHTTPOpts sets up HTTP options for a given chain.
func buildSidecarInboundHTTPOpts(lb *ListenerBuilder, cc inboundChainConfig) *httpListenerOpts {
ph := GetProxyHeaders(lb.node, lb.push, istionetworking.ListenerClassSidecarInbound)
httpOpts := &httpListenerOpts{
routeConfig: buildSidecarInboundHTTPRouteConfig(lb, cc),
rds: "", // no RDS for inbound traffic
useRemoteAddress: false,
connectionManager: &hcm.HttpConnectionManager{
// Append and forward client cert to backend, if configured
ForwardClientCertDetails: ph.ForwardedClientCert,
SetCurrentClientCertDetails: &hcm.HttpConnectionManager_SetCurrentClientCertDetails{
Subject: proto.BoolTrue,
Uri: true,
Dns: true,
},
ServerName: ph.ServerName,
ServerHeaderTransformation: ph.ServerHeaderTransformation,
GenerateRequestId: ph.GenerateRequestID,
},
suppressEnvoyDebugHeaders: ph.SuppressDebugHeaders,
protocol: cc.port.Protocol,
class: istionetworking.ListenerClassSidecarInbound,
port: int(cc.port.TargetPort),
statPrefix: cc.StatPrefix(),
hbone: cc.hbone,
}
// See https://github.com/grpc/grpc-web/tree/master/net/grpc/gateway/examples/helloworld#configure-the-proxy
if cc.port.Protocol.IsHTTP2() {
httpOpts.connectionManager.Http2ProtocolOptions = &core.Http2ProtocolOptions{}
}
if features.HTTP10 || enableHTTP10(lb.node.Metadata.HTTP10) {
httpOpts.connectionManager.HttpProtocolOptions = &core.Http1ProtocolOptions{
AcceptHttp_10: true,
}
}
return httpOpts
}
// buildInboundNetworkFiltersForHTTP builds the network filters that should be inserted before an HCM.
// This should only be used with HTTP; see buildInboundNetworkFilters for TCP
func (lb *ListenerBuilder) buildInboundNetworkFiltersForHTTP(cc inboundChainConfig) []*listener.Filter {
// Add network level WASM filters if any configured.
httpOpts := buildSidecarInboundHTTPOpts(lb, cc)
wasm := lb.push.WasmPluginsByListenerInfo(lb.node, model.WasmPluginListenerInfo{
Port: httpOpts.port,
Class: httpOpts.class,
}, model.WasmPluginTypeNetwork)
var filters []*listener.Filter
// Metadata exchange goes first, so RBAC failures, etc can access the state. See https://github.com/istio/istio/issues/41066
if !cc.hbone {
filters = append(filters, buildMetadataExchangeNetworkFilters()...)
}
// Authn
filters = extension.PopAppendNetwork(filters, wasm, extensions.PluginPhase_AUTHN)
// Authz. Since this is HTTP, we only add WASM network filters -- not TCP RBAC, stats, etc.
filters = extension.PopAppendNetwork(filters, wasm, extensions.PluginPhase_AUTHZ)
filters = extension.PopAppendNetwork(filters, wasm, extensions.PluginPhase_STATS)
filters = extension.PopAppendNetwork(filters, wasm, extensions.PluginPhase_UNSPECIFIED_PHASE)
h := lb.buildHTTPConnectionManager(httpOpts)
filters = append(filters, &listener.Filter{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(h)},
})
return filters
}
// buildInboundNetworkFilters generates a TCP proxy network filter on the inbound path
func (lb *ListenerBuilder) buildInboundNetworkFilters(fcc inboundChainConfig) []*listener.Filter {
statPrefix := fcc.clusterName
// If stat name is configured, build the stat prefix from configured pattern.
if len(lb.push.Mesh.InboundClusterStatName) != 0 {
statPrefix = telemetry.BuildInboundStatPrefix(lb.push.Mesh.InboundClusterStatName, fcc.telemetryMetadata, "", uint32(fcc.port.Port), fcc.port.Name)
}
tcpProxy := &tcp.TcpProxy{
StatPrefix: statPrefix,
ClusterSpecifier: &tcp.TcpProxy_Cluster{Cluster: fcc.clusterName},
IdleTimeout: parseDuration(lb.node.Metadata.IdleTimeout),
}
tcpFilter := setAccessLogAndBuildTCPFilter(lb.push, lb.node, tcpProxy, istionetworking.ListenerClassSidecarInbound)
networkFilterstack := buildNetworkFiltersStack(fcc.port.Protocol, tcpFilter, statPrefix, fcc.clusterName)
return lb.buildCompleteNetworkFilters(istionetworking.ListenerClassSidecarInbound, fcc.port.Port, networkFilterstack, true)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"fmt"
"net/netip"
"strconv"
xds "github.com/cncf/xds/go/xds/core/v3"
matcher "github.com/cncf/xds/go/xds/type/matcher/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
tcp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
any "google.golang.org/protobuf/types/known/anypb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
extensions "istio.io/api/extensions/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
istionetworking "istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/extension"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/match"
istio_route "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/route/retry"
"istio.io/istio/pilot/pkg/networking/plugin/authn"
"istio.io/istio/pilot/pkg/networking/util"
security "istio.io/istio/pilot/pkg/security/model"
"istio.io/istio/pilot/pkg/util/protoconv"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/proto"
"istio.io/istio/pkg/wellknown"
)
func (lb *ListenerBuilder) serviceForHostname(name host.Name) *model.Service {
return lb.push.ServiceForHostname(lb.node, name)
}
func (lb *ListenerBuilder) buildWaypointInbound() []*listener.Listener {
listeners := []*listener.Listener{}
// We create 3 listeners:
// 1. Decapsulation CONNECT listener.
// 2. IP dispatch listener, handling both VIPs and direct pod IPs.
// 3. Encapsulation CONNECT listener, originating the tunnel
wls, wps := findWaypointResources(lb.node, lb.push)
listeners = append(listeners,
lb.buildWaypointInboundConnectTerminate(),
lb.buildWaypointInternal(wls, wps.orderedServices),
buildWaypointConnectOriginateListener())
return listeners
}
func (lb *ListenerBuilder) buildHCMConnectTerminateChain(routes []*route.Route) []*listener.Filter {
ph := GetProxyHeaders(lb.node, lb.push, istionetworking.ListenerClassSidecarInbound)
h := &hcm.HttpConnectionManager{
StatPrefix: ConnectTerminate,
RouteSpecifier: &hcm.HttpConnectionManager_RouteConfig{
RouteConfig: &route.RouteConfiguration{
Name: "default",
VirtualHosts: []*route.VirtualHost{{
Name: "default",
Domains: []string{"*"},
Routes: routes,
}},
},
},
// Append and forward client cert to backend, if configured
ForwardClientCertDetails: ph.ForwardedClientCert,
SetCurrentClientCertDetails: &hcm.HttpConnectionManager_SetCurrentClientCertDetails{
Subject: proto.BoolTrue,
Uri: true,
Dns: true,
},
ServerName: ph.ServerName,
ServerHeaderTransformation: ph.ServerHeaderTransformation,
GenerateRequestId: ph.GenerateRequestID,
UseRemoteAddress: proto.BoolFalse,
}
// Protocol settings
h.StreamIdleTimeout = istio_route.Notimeout
h.UpgradeConfigs = []*hcm.HttpConnectionManager_UpgradeConfig{{
UpgradeType: ConnectUpgradeType,
}}
h.Http2ProtocolOptions = &core.Http2ProtocolOptions{
AllowConnect: true,
// TODO(https://github.com/istio/istio/issues/43443)
// All streams are bound to the same worker. Therefore, we need to limit for better fairness.
MaxConcurrentStreams: &wrappers.UInt32Value{Value: 100},
}
// Filters needed to propagate the tunnel metadata to the inner streams.
h.HttpFilters = []*hcm.HttpFilter{
xdsfilters.WaypointDownstreamMetadataFilter,
xdsfilters.ConnectAuthorityFilter,
xdsfilters.BuildRouterFilter(xdsfilters.RouterFilterContext{
StartChildSpan: false,
SuppressDebugHeaders: ph.SuppressDebugHeaders,
}),
}
return []*listener.Filter{
{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(h)},
},
}
}
func (lb *ListenerBuilder) buildConnectTerminateListener(routes []*route.Route) *listener.Listener {
actualWildcard, _ := getActualWildcardAndLocalHost(lb.node)
l := &listener.Listener{
Name: ConnectTerminate,
Address: util.BuildAddress(actualWildcard, model.HBoneInboundListenPort),
FilterChains: []*listener.FilterChain{
{
Name: "default",
TransportSocket: &core.TransportSocket{
Name: "tls",
ConfigType: &core.TransportSocket_TypedConfig{TypedConfig: protoconv.MessageToAny(&tls.DownstreamTlsContext{
CommonTlsContext: buildCommonConnectTLSContext(lb.node, lb.push),
RequireClientCertificate: &wrappers.BoolValue{Value: true},
})},
},
Filters: lb.buildHCMConnectTerminateChain(routes),
},
},
}
return l
}
func (lb *ListenerBuilder) buildWaypointInboundConnectTerminate() *listener.Listener {
routes := []*route.Route{{
Match: &route.RouteMatch{
PathSpecifier: &route.RouteMatch_ConnectMatcher_{ConnectMatcher: &route.RouteMatch_ConnectMatcher{}},
},
Action: &route.Route_Route{Route: &route.RouteAction{
UpgradeConfigs: []*route.RouteAction_UpgradeConfig{{
UpgradeType: ConnectUpgradeType,
ConnectConfig: &route.RouteAction_UpgradeConfig_ConnectConfig{},
}},
ClusterSpecifier: &route.RouteAction_Cluster{Cluster: MainInternalName},
}},
}}
return lb.buildConnectTerminateListener(routes)
}
func (lb *ListenerBuilder) buildWaypointInternal(wls []*model.WorkloadInfo, svcs []*model.Service) *listener.Listener {
ipMatcher := &matcher.IPMatcher{}
chains := []*listener.FilterChain{}
pre, post := lb.buildWaypointHTTPFilters()
for _, svc := range svcs {
portMapper := match.NewDestinationPort()
for _, port := range svc.Ports {
if port.Protocol == protocol.UDP {
continue
}
portString := fmt.Sprintf("%d", port.Port)
cc := inboundChainConfig{
clusterName: model.BuildSubsetKey(model.TrafficDirectionInboundVIP, "tcp", svc.Hostname, port.Port),
port: model.ServiceInstancePort{
ServicePort: port,
TargetPort: uint32(port.Port),
},
bind: "0.0.0.0",
hbone: true,
}
name := model.BuildSubsetKey(model.TrafficDirectionInboundVIP, "", svc.Hostname, port.Port)
tcpName := name + "-tcp"
tcpChain := &listener.FilterChain{
Filters: lb.buildInboundNetworkFilters(cc),
Name: tcpName,
}
cc.clusterName = model.BuildSubsetKey(model.TrafficDirectionInboundVIP, "http", svc.Hostname, port.Port)
httpName := name + "-http"
httpChain := &listener.FilterChain{
Filters: lb.buildWaypointInboundHTTPFilters(svc, cc, pre, post),
Name: httpName,
}
if port.Protocol.IsUnsupported() {
// If we need to sniff, insert two chains and the protocol detector
chains = append(chains, tcpChain, httpChain)
portMapper.Map[portString] = match.ToMatcher(match.NewAppProtocol(match.ProtocolMatch{
TCP: match.ToChain(tcpName),
HTTP: match.ToChain(httpName),
}))
} else if port.Protocol.IsHTTP() {
// Otherwise, just insert HTTP/TCP
chains = append(chains, httpChain)
portMapper.Map[portString] = match.ToChain(httpChain.Name)
} else {
chains = append(chains, tcpChain)
portMapper.Map[portString] = match.ToChain(tcpChain.Name)
}
}
if len(portMapper.Map) > 0 {
cidr := util.ConvertAddressToCidr(svc.GetAddressForProxy(lb.node))
rangeMatcher := &matcher.IPMatcher_IPRangeMatcher{
Ranges: []*xds.CidrRange{{
AddressPrefix: cidr.AddressPrefix,
PrefixLen: cidr.PrefixLen,
}},
OnMatch: match.ToMatcher(portMapper.Matcher),
}
ipMatcher.RangeMatchers = append(ipMatcher.RangeMatchers, rangeMatcher)
}
}
{
// Direct pod access chain.
cc := inboundChainConfig{
clusterName: EncapClusterName,
port: model.ServiceInstancePort{
ServicePort: &model.Port{
Name: "unknown",
Protocol: protocol.TCP,
},
},
bind: "0.0.0.0",
hbone: true,
}
tcpChain := &listener.FilterChain{
Filters: append([]*listener.Filter{
xdsfilters.ConnectAuthorityNetworkFilter,
},
lb.buildInboundNetworkFilters(cc)...),
Name: "direct-tcp",
}
// TODO: maintains undesirable persistent HTTP connections to "encap"
httpChain := &listener.FilterChain{
Filters: append([]*listener.Filter{
xdsfilters.ConnectAuthorityNetworkFilter,
},
lb.buildWaypointInboundHTTPFilters(nil, cc, pre, post)...),
Name: "direct-http",
}
chains = append(chains, tcpChain, httpChain)
if len(wls) > 0 {
// Workload IP filtering happens here.
ipRange := []*xds.CidrRange{}
for _, wl := range wls {
for _, ip := range wl.Addresses {
addr, _ := netip.AddrFromSlice(ip)
cidr := util.ConvertAddressToCidr(addr.String())
ipRange = append(ipRange, &xds.CidrRange{
AddressPrefix: cidr.AddressPrefix,
PrefixLen: cidr.PrefixLen,
})
}
}
ipMatcher.RangeMatchers = append(ipMatcher.RangeMatchers,
&matcher.IPMatcher_IPRangeMatcher{
Ranges: ipRange,
OnMatch: match.ToMatcher(match.NewAppProtocol(match.ProtocolMatch{
TCP: match.ToChain(tcpChain.Name),
HTTP: match.ToChain(httpChain.Name),
})),
})
}
}
l := &listener.Listener{
Name: MainInternalName,
ListenerSpecifier: &listener.Listener_InternalListener{InternalListener: &listener.Listener_InternalListenerConfig{}},
ListenerFilters: []*listener.ListenerFilter{
xdsfilters.OriginalDestination,
// TODO: This may affect the data path due to the server-first protocols triggering a time-out. Need exception filter.
xdsfilters.HTTPInspector,
},
TrafficDirection: core.TrafficDirection_INBOUND,
FilterChains: chains,
FilterChainMatcher: &matcher.Matcher{
MatcherType: &matcher.Matcher_MatcherTree_{
MatcherTree: &matcher.Matcher_MatcherTree{
Input: match.DestinationIP,
TreeType: &matcher.Matcher_MatcherTree_CustomMatch{
CustomMatch: &xds.TypedExtensionConfig{
Name: "ip",
TypedConfig: protoconv.MessageToAny(ipMatcher),
},
},
},
},
},
}
return l
}
func buildWaypointConnectOriginateListener() *listener.Listener {
return buildConnectOriginateListener()
}
func buildConnectOriginateListener() *listener.Listener {
var headers []*core.HeaderValueOption
l := &listener.Listener{
Name: ConnectOriginate,
UseOriginalDst: wrappers.Bool(false),
ListenerSpecifier: &listener.Listener_InternalListener{InternalListener: &listener.Listener_InternalListenerConfig{}},
ListenerFilters: []*listener.ListenerFilter{
xdsfilters.OriginalDestination,
},
FilterChains: []*listener.FilterChain{{
Filters: []*listener.Filter{{
Name: wellknown.TCPProxy,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tcp.TcpProxy{
StatPrefix: ConnectOriginate,
ClusterSpecifier: &tcp.TcpProxy_Cluster{Cluster: ConnectOriginate},
TunnelingConfig: &tcp.TcpProxy_TunnelingConfig{
Hostname: "%DOWNSTREAM_LOCAL_ADDRESS%",
HeadersToAdd: headers,
},
}),
},
}},
}},
}
return l
}
// buildWaypointHTTPFilters augments the common chain of Waypoint-bound HTTP filters.
// Authn/authz filters are pre-pended. Telemetry filters are appended.
func (lb *ListenerBuilder) buildWaypointHTTPFilters() (pre []*hcm.HttpFilter, post []*hcm.HttpFilter) {
// TODO: consider dedicated listener class for waypoint filters
cls := istionetworking.ListenerClassSidecarInbound
wasm := lb.push.WasmPluginsByListenerInfo(lb.node, model.WasmPluginListenerInfo{
Class: cls,
}, model.WasmPluginTypeHTTP)
// TODO: how to deal with ext-authz? It will be in the ordering twice
pre = append(pre, lb.authzCustomBuilder.BuildHTTP(cls)...)
pre = extension.PopAppendHTTP(pre, wasm, extensions.PluginPhase_AUTHN)
pre = append(pre, lb.authnBuilder.BuildHTTP(cls)...)
pre = extension.PopAppendHTTP(pre, wasm, extensions.PluginPhase_AUTHZ)
pre = append(pre, lb.authzBuilder.BuildHTTP(cls)...)
// TODO: these feel like the wrong place to insert, but this retains backwards compatibility with the original implementation
post = extension.PopAppendHTTP(post, wasm, extensions.PluginPhase_STATS)
post = extension.PopAppendHTTP(post, wasm, extensions.PluginPhase_UNSPECIFIED_PHASE)
post = append(post, xdsfilters.WaypointUpstreamMetadataFilter)
post = append(post, lb.push.Telemetry.HTTPFilters(lb.node, cls)...)
return
}
// buildWaypointInboundHTTPFilters builds the network filters that should be inserted before an HCM.
// This should only be used with HTTP; see buildInboundNetworkFilters for TCP
func (lb *ListenerBuilder) buildWaypointInboundHTTPFilters(svc *model.Service, cc inboundChainConfig, pre, post []*hcm.HttpFilter) []*listener.Filter {
ph := GetProxyHeaders(lb.node, lb.push, istionetworking.ListenerClassSidecarInbound)
var filters []*listener.Filter
httpOpts := &httpListenerOpts{
routeConfig: buildWaypointInboundHTTPRouteConfig(lb, svc, cc),
rds: "", // no RDS for inbound traffic
useRemoteAddress: false,
connectionManager: &hcm.HttpConnectionManager{
ServerName: ph.ServerName,
ServerHeaderTransformation: ph.ServerHeaderTransformation,
GenerateRequestId: ph.GenerateRequestID,
},
suppressEnvoyDebugHeaders: ph.SuppressDebugHeaders,
protocol: cc.port.Protocol,
class: istionetworking.ListenerClassSidecarInbound,
statPrefix: cc.StatPrefix(),
isWaypoint: true,
}
// See https://github.com/grpc/grpc-web/tree/master/net/grpc/gateway/examples/helloworld#configure-the-proxy
if cc.port.Protocol.IsHTTP2() {
httpOpts.connectionManager.Http2ProtocolOptions = &core.Http2ProtocolOptions{}
}
if features.HTTP10 || enableHTTP10(lb.node.Metadata.HTTP10) {
httpOpts.connectionManager.HttpProtocolOptions = &core.Http1ProtocolOptions{
AcceptHttp_10: true,
}
}
h := lb.buildHTTPConnectionManager(httpOpts)
// Last filter must be router.
router := h.HttpFilters[len(h.HttpFilters)-1]
h.HttpFilters = append(pre, h.HttpFilters[:len(h.HttpFilters)-1]...)
h.HttpFilters = append(h.HttpFilters, post...)
h.HttpFilters = append(h.HttpFilters, router)
filters = append(filters, &listener.Filter{
Name: wellknown.HTTPConnectionManager,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(h)},
})
return filters
}
func buildWaypointInboundHTTPRouteConfig(lb *ListenerBuilder, svc *model.Service, cc inboundChainConfig) *route.RouteConfiguration {
// TODO: Policy binding via VIP+Host is inapplicable for direct pod access.
if svc == nil {
return buildSidecarInboundHTTPRouteConfig(lb, cc)
}
vss := getConfigsForHost(lb.node.ConfigNamespace, svc.Hostname, lb.node.SidecarScope.EgressListeners[0].VirtualServices())
if len(vss) == 0 {
return buildSidecarInboundHTTPRouteConfig(lb, cc)
}
if len(vss) > 1 {
log.Warnf("multiple virtual services for one service: %v", svc.Hostname)
}
vs := vss[0]
// Typically we setup routes with the Host header match. However, for waypoint inbound we are actually using
// hostname purely to match to the Service VIP. So we only need a single VHost, with routes compute based on the VS.
// For destinations, we need to hit the inbound clusters if it is an internal destination, otherwise outbound.
routes, err := lb.waypointInboundRoute(vs, cc.port.Port)
if err != nil {
return buildSidecarInboundHTTPRouteConfig(lb, cc)
}
inboundVHost := &route.VirtualHost{
Name: inboundVirtualHostPrefix + strconv.Itoa(cc.port.Port), // Format: "inbound|http|%d"
Domains: []string{"*"},
Routes: routes,
}
return &route.RouteConfiguration{
Name: cc.clusterName,
VirtualHosts: []*route.VirtualHost{inboundVHost},
ValidateClusters: proto.BoolFalse,
}
}
func (lb *ListenerBuilder) waypointInboundRoute(virtualService config.Config, listenPort int) ([]*route.Route, error) {
vs, ok := virtualService.Spec.(*networking.VirtualService)
if !ok { // should never happen
return nil, fmt.Errorf("in not a virtual service: %#v", virtualService)
}
out := make([]*route.Route, 0, len(vs.Http))
catchall := false
for _, http := range vs.Http {
if len(http.Match) == 0 {
if r := lb.translateRoute(virtualService, http, nil, listenPort); r != nil {
out = append(out, r)
}
catchall = true
} else {
for _, match := range http.Match {
if r := lb.translateRoute(virtualService, http, match, listenPort); r != nil {
out = append(out, r)
// This is a catch all path. Routes are matched in order, so we will never go beyond this match
// As an optimization, we can just top sending any more routes here.
//if isCatchAllMatch(match) {
// catchall = true
// break
//}
}
}
}
if catchall {
break
}
}
if len(out) == 0 {
return nil, fmt.Errorf("no routes matched")
}
return out, nil
}
func (lb *ListenerBuilder) translateRoute(
virtualService config.Config,
in *networking.HTTPRoute,
match *networking.HTTPMatchRequest,
listenPort int,
) *route.Route {
// When building routes, it's okay if the target cluster cannot be
// resolved Traffic to such clusters will blackhole.
// Match by the destination port specified in the match condition
if match != nil && match.Port != 0 && match.Port != uint32(listenPort) {
return nil
}
routeName := in.Name
if match != nil && match.Name != "" {
routeName = routeName + "." + match.Name
}
out := &route.Route{
Name: routeName,
Match: istio_route.TranslateRouteMatch(virtualService, match, true),
Metadata: util.BuildConfigInfoMetadata(virtualService.Meta),
}
authority := ""
if in.Headers != nil {
operations := istio_route.TranslateHeadersOperations(in.Headers)
out.RequestHeadersToAdd = operations.RequestHeadersToAdd
out.ResponseHeadersToAdd = operations.ResponseHeadersToAdd
out.RequestHeadersToRemove = operations.RequestHeadersToRemove
out.ResponseHeadersToRemove = operations.ResponseHeadersToRemove
authority = operations.Authority
}
if in.Redirect != nil {
istio_route.ApplyRedirect(out, in.Redirect, listenPort, false, model.UseGatewaySemantics(virtualService))
} else if in.DirectResponse != nil {
istio_route.ApplyDirectResponse(out, in.DirectResponse)
} else {
lb.routeDestination(out, in, authority, listenPort)
}
out.Decorator = &route.Decorator{
Operation: istio_route.GetRouteOperation(out, virtualService.Name, listenPort),
}
if in.Fault != nil || in.CorsPolicy != nil {
out.TypedPerFilterConfig = make(map[string]*any.Any)
}
if in.Fault != nil {
out.TypedPerFilterConfig[wellknown.Fault] = protoconv.MessageToAny(istio_route.TranslateFault(in.Fault))
}
if in.CorsPolicy != nil {
out.TypedPerFilterConfig[wellknown.CORS] = protoconv.MessageToAny(istio_route.TranslateCORSPolicy(in.CorsPolicy))
}
return out
}
func (lb *ListenerBuilder) routeDestination(out *route.Route, in *networking.HTTPRoute, authority string, listenerPort int) {
policy := in.Retries
if policy == nil {
// No VS policy set, use mesh defaults
policy = lb.push.Mesh.GetDefaultHttpRetryPolicy()
}
action := &route.RouteAction{
RetryPolicy: retry.ConvertPolicy(policy),
}
// Configure timeouts specified by Virtual Service if they are provided, otherwise set it to defaults.
action.Timeout = istio_route.Notimeout
if in.Timeout != nil {
action.Timeout = in.Timeout
}
// Use deprecated value for now as the replacement MaxStreamDuration has some regressions.
// nolint: staticcheck
action.MaxGrpcTimeout = action.Timeout
out.Action = &route.Route_Route{Route: action}
if in.Rewrite != nil {
action.PrefixRewrite = in.Rewrite.GetUri()
if in.Rewrite.GetAuthority() != "" {
authority = in.Rewrite.GetAuthority()
}
}
if authority != "" {
action.HostRewriteSpecifier = &route.RouteAction_HostRewriteLiteral{
HostRewriteLiteral: authority,
}
}
if in.Mirror != nil {
if mp := istio_route.MirrorPercent(in); mp != nil {
action.RequestMirrorPolicies = append(action.RequestMirrorPolicies,
istio_route.TranslateRequestMirrorPolicy(in.Mirror, lb.serviceForHostname(host.Name(in.Mirror.Host)), listenerPort, mp))
}
}
for _, mirror := range in.Mirrors {
if mp := istio_route.MirrorPercentByPolicy(mirror); mp != nil && mirror.Destination != nil {
action.RequestMirrorPolicies = append(action.RequestMirrorPolicies,
istio_route.TranslateRequestMirrorPolicy(mirror.Destination, lb.serviceForHostname(host.Name(mirror.Destination.Host)), listenerPort, mp))
}
}
// TODO: eliminate this logic and use the total_weight option in envoy route
weighted := make([]*route.WeightedCluster_ClusterWeight, 0)
for _, dst := range in.Route {
weight := &wrappers.UInt32Value{Value: uint32(dst.Weight)}
if dst.Weight == 0 {
// Ignore 0 weighted clusters if there are other clusters in the route.
// But if this is the only cluster in the route, then add it as a cluster with weight 100
if len(in.Route) == 1 {
weight.Value = uint32(100)
} else {
continue
}
}
hostname := host.Name(dst.GetDestination().GetHost())
n := lb.GetDestinationCluster(dst.Destination, lb.serviceForHostname(hostname), listenerPort)
clusterWeight := &route.WeightedCluster_ClusterWeight{
Name: n,
Weight: weight,
}
if dst.Headers != nil {
operations := istio_route.TranslateHeadersOperations(dst.Headers)
clusterWeight.RequestHeadersToAdd = operations.RequestHeadersToAdd
clusterWeight.RequestHeadersToRemove = operations.RequestHeadersToRemove
clusterWeight.ResponseHeadersToAdd = operations.ResponseHeadersToAdd
clusterWeight.ResponseHeadersToRemove = operations.ResponseHeadersToRemove
if operations.Authority != "" {
clusterWeight.HostRewriteSpecifier = &route.WeightedCluster_ClusterWeight_HostRewriteLiteral{
HostRewriteLiteral: operations.Authority,
}
}
}
weighted = append(weighted, clusterWeight)
}
// rewrite to a single cluster if there is only weighted cluster
if len(weighted) == 1 {
action.ClusterSpecifier = &route.RouteAction_Cluster{Cluster: weighted[0].Name}
out.RequestHeadersToAdd = append(out.RequestHeadersToAdd, weighted[0].RequestHeadersToAdd...)
out.RequestHeadersToRemove = append(out.RequestHeadersToRemove, weighted[0].RequestHeadersToRemove...)
out.ResponseHeadersToAdd = append(out.ResponseHeadersToAdd, weighted[0].ResponseHeadersToAdd...)
out.ResponseHeadersToRemove = append(out.ResponseHeadersToRemove, weighted[0].ResponseHeadersToRemove...)
if weighted[0].HostRewriteSpecifier != nil && action.HostRewriteSpecifier == nil {
// Ideally, if the weighted cluster overwrites authority, it has precedence. This mirrors behavior of headers,
// because for headers we append the weighted last which allows it to Set and wipe out previous Adds.
// However, Envoy behavior is different when we set at both cluster level and route level, and we want
// behavior to be consistent with a single cluster and multiple clusters.
// As a result, we only override if the top level rewrite is not set
action.HostRewriteSpecifier = &route.RouteAction_HostRewriteLiteral{
HostRewriteLiteral: weighted[0].GetHostRewriteLiteral(),
}
}
} else {
action.ClusterSpecifier = &route.RouteAction_WeightedClusters{
WeightedClusters: &route.WeightedCluster{
Clusters: weighted,
},
}
}
}
// GetDestinationCluster generates a cluster name for the route, or error if no cluster
// can be found. Called by translateRule to determine if
func (lb *ListenerBuilder) GetDestinationCluster(destination *networking.Destination, service *model.Service, listenerPort int) string {
dir, subset, port := model.TrafficDirectionInboundVIP, "http", listenerPort
if destination.Subset != "" {
subset += "/" + destination.Subset
}
if destination.GetPort() != nil {
port = int(destination.GetPort().GetNumber())
} else if service != nil && len(service.Ports) == 1 {
// if service only has one port defined, use that as the port, otherwise use default listenerPort
port = service.Ports[0].Port
// Do not return blackhole cluster for service==nil case as there is a legitimate use case for
// calling this function with nil service: to route to a pre-defined statically configured cluster
// declared as part of the bootstrap.
// If blackhole cluster is needed, do the check on the caller side. See gateway and tls.go for examples.
}
if service != nil {
_, wps := findWaypointResources(lb.node, lb.push)
_, f := wps.services[service.Hostname]
if !f || service.MeshExternal {
// this waypoint proxy isn't responsible for this service so we use outbound; TODO quicker lookup
dir, subset = model.TrafficDirectionOutbound, destination.Subset
}
}
return model.BuildSubsetKey(
dir,
subset,
host.Name(destination.Host),
port,
)
}
// NB: Un-typed SAN validation is ignored when typed is used, so only typed version must be used with this function.
func buildCommonConnectTLSContext(proxy *model.Proxy, push *model.PushContext) *tls.CommonTlsContext {
ctx := &tls.CommonTlsContext{}
security.ApplyToCommonTLSContext(ctx, proxy, nil, nil, true)
aliases := authn.TrustDomainsForValidation(push.Mesh)
validationCtx := ctx.GetCombinedValidationContext().DefaultValidationContext
if len(aliases) > 0 {
matchers := util.StringToPrefixMatch(security.AppendURIPrefixToTrustDomain(aliases))
for _, matcher := range matchers {
validationCtx.MatchTypedSubjectAltNames = append(validationCtx.MatchTypedSubjectAltNames, &tls.SubjectAltNameMatcher{
SanType: tls.SubjectAltNameMatcher_URI,
Matcher: matcher,
})
}
}
ctx.AlpnProtocols = []string{"h2"}
ctx.TlsParams = &tls.TlsParameters{
// Ensure TLS 1.3 is used everywhere
TlsMaximumProtocolVersion: tls.TlsParameters_TLSv1_3,
TlsMinimumProtocolVersion: tls.TlsParameters_TLSv1_3,
}
return ctx
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// packages used for load balancer setting
package loadbalancer
import (
"math"
"sort"
"strings"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/util/sets"
)
const (
FailoverPriorityLabelDefaultSeparator = '='
)
func GetLocalityLbSetting(
mesh *v1alpha3.LocalityLoadBalancerSetting,
destrule *v1alpha3.LocalityLoadBalancerSetting,
) *v1alpha3.LocalityLoadBalancerSetting {
var enabled bool
// Locality lb is enabled if its not explicitly disabled in mesh global config
if mesh != nil && (mesh.Enabled == nil || mesh.Enabled.Value) {
enabled = true
}
// Unless we explicitly override this in destination rule
if destrule != nil {
if destrule.Enabled != nil && !destrule.Enabled.Value {
enabled = false
} else {
enabled = true
}
}
if !enabled {
return nil
}
// Destination Rule overrides mesh config. If its defined, use that
if destrule != nil {
return destrule
}
// Otherwise fall back to mesh default
return mesh
}
func ApplyLocalityLBSetting(
loadAssignment *endpoint.ClusterLoadAssignment,
wrappedLocalityLbEndpoints []*WrappedLocalityLbEndpoints,
locality *core.Locality,
proxyLabels map[string]string,
localityLB *v1alpha3.LocalityLoadBalancerSetting,
enableFailover bool,
) {
if localityLB == nil || loadAssignment == nil {
return
}
// one of Distribute or Failover settings can be applied.
if localityLB.GetDistribute() != nil {
applyLocalityWeight(locality, loadAssignment, localityLB.GetDistribute())
// Failover needs outlier detection, otherwise Envoy will never drop down to a lower priority.
// Do not apply default failover when locality LB is disabled.
} else if enableFailover && (localityLB.Enabled == nil || localityLB.Enabled.Value) {
if len(localityLB.FailoverPriority) > 0 {
applyPriorityFailover(loadAssignment, wrappedLocalityLbEndpoints, proxyLabels, localityLB.FailoverPriority)
if len(localityLB.Failover) != 0 {
applyLocalityFailover(locality, loadAssignment, localityLB.Failover)
}
return
}
applyLocalityFailover(locality, loadAssignment, localityLB.Failover)
}
}
// set locality loadbalancing weight
func applyLocalityWeight(
locality *core.Locality,
loadAssignment *endpoint.ClusterLoadAssignment,
distribute []*v1alpha3.LocalityLoadBalancerSetting_Distribute,
) {
if distribute == nil {
return
}
// Support Locality weighted load balancing
// (https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/locality_weight#locality-weighted-load-balancing)
// by providing weights in LocalityLbEndpoints via load_balancing_weight.
// By setting weights across different localities, it can allow
// Envoy to weight assignments across different zones and geographical locations.
for _, localityWeightSetting := range distribute {
if localityWeightSetting != nil &&
util.LocalityMatch(locality, localityWeightSetting.From) {
misMatched := sets.Set[int]{}
for i := range loadAssignment.Endpoints {
misMatched.Insert(i)
}
for locality, weight := range localityWeightSetting.To {
// index -> original weight
destLocMap := map[int]uint32{}
totalWeight := uint32(0)
for i, ep := range loadAssignment.Endpoints {
if misMatched.Contains(i) {
if util.LocalityMatch(ep.Locality, locality) {
delete(misMatched, i)
if ep.LoadBalancingWeight != nil {
destLocMap[i] = ep.LoadBalancingWeight.Value
} else {
destLocMap[i] = 1
}
totalWeight += destLocMap[i]
}
}
}
// in case wildcard dest matching multi groups of endpoints
// the load balancing weight for a locality is divided by the sum of the weights of all localities
for index, originalWeight := range destLocMap {
destWeight := float64(originalWeight*weight) / float64(totalWeight)
if destWeight > 0 {
loadAssignment.Endpoints[index].LoadBalancingWeight = &wrappers.UInt32Value{
Value: uint32(math.Ceil(destWeight)),
}
}
}
}
// remove groups of endpoints in a locality that miss matched
for i := range misMatched {
if loadAssignment.Endpoints[i] != nil {
loadAssignment.Endpoints[i].LbEndpoints = nil
}
}
break
}
}
}
// set locality loadbalancing priority
func applyLocalityFailover(
locality *core.Locality,
loadAssignment *endpoint.ClusterLoadAssignment,
failover []*v1alpha3.LocalityLoadBalancerSetting_Failover,
) {
// key is priority, value is the index of the LocalityLbEndpoints in ClusterLoadAssignment
priorityMap := map[int][]int{}
// 1. calculate the LocalityLbEndpoints.Priority compared with proxy locality
for i, localityEndpoint := range loadAssignment.Endpoints {
// if region/zone/subZone all match, the priority is 0.
// if region/zone match, the priority is 1.
// if region matches, the priority is 2.
// if locality not match, the priority is 3.
priority := util.LbPriority(locality, localityEndpoint.Locality)
// region not match, apply failover settings when specified
// update localityLbEndpoints' priority to 4 if failover not match
if priority == 3 {
for _, failoverSetting := range failover {
if failoverSetting.From == locality.Region {
if localityEndpoint.Locality == nil || localityEndpoint.Locality.Region != failoverSetting.To {
priority = 4
}
break
}
}
}
// priority is calculated using the already assigned priority using failoverPriority.
// Since there are at most 5 priorities can be assigned using locality failover(0-4),
// we multiply the priority by 5 for maintaining the priorities already assigned.
// Afterwards the final priorities can be calculted from 0 (highest) to N (lowest) without skipping.
priorityInt := int(loadAssignment.Endpoints[i].Priority*5) + priority
loadAssignment.Endpoints[i].Priority = uint32(priorityInt)
priorityMap[priorityInt] = append(priorityMap[priorityInt], i)
}
// since Priorities should range from 0 (highest) to N (lowest) without skipping.
// 2. adjust the priorities in order
// 2.1 sort all priorities in increasing order.
priorities := []int{}
for priority := range priorityMap {
priorities = append(priorities, priority)
}
sort.Ints(priorities)
// 2.2 adjust LocalityLbEndpoints priority
// if the index and value of priorities array is not equal.
for i, priority := range priorities {
if i != priority {
// the LocalityLbEndpoints index in ClusterLoadAssignment.Endpoints
for _, index := range priorityMap[priority] {
loadAssignment.Endpoints[index].Priority = uint32(i)
}
}
}
}
// WrappedLocalityLbEndpoints contain an envoy LocalityLbEndpoints
// and the original IstioEndpoints used to generate it.
// It is used to do failover priority label match with proxy labels.
type WrappedLocalityLbEndpoints struct {
IstioEndpoints []*model.IstioEndpoint
LocalityLbEndpoints *endpoint.LocalityLbEndpoints
}
// set loadbalancing priority by failover priority label.
func applyPriorityFailover(
loadAssignment *endpoint.ClusterLoadAssignment,
wrappedLocalityLbEndpoints []*WrappedLocalityLbEndpoints,
proxyLabels map[string]string,
failoverPriorities []string,
) {
if len(proxyLabels) == 0 || len(wrappedLocalityLbEndpoints) == 0 {
return
}
priorityMap := make(map[int][]int, len(failoverPriorities))
localityLbEndpoints := []*endpoint.LocalityLbEndpoints{}
for _, wrappedLbEndpoint := range wrappedLocalityLbEndpoints {
localityLbEndpointsPerLocality := applyPriorityFailoverPerLocality(proxyLabels, wrappedLbEndpoint, failoverPriorities)
localityLbEndpoints = append(localityLbEndpoints, localityLbEndpointsPerLocality...)
}
for i, ep := range localityLbEndpoints {
priorityMap[int(ep.Priority)] = append(priorityMap[int(ep.Priority)], i)
}
// since Priorities should range from 0 (highest) to N (lowest) without skipping.
// adjust the priorities in order
// 1. sort all priorities in increasing order.
priorities := []int{}
for priority := range priorityMap {
priorities = append(priorities, priority)
}
sort.Ints(priorities)
// 2. adjust LocalityLbEndpoints priority
// if the index and value of priorities array is not equal.
for i, priority := range priorities {
if i != priority {
// the LocalityLbEndpoints index in ClusterLoadAssignment.Endpoints
for _, index := range priorityMap[priority] {
localityLbEndpoints[index].Priority = uint32(i)
}
}
}
loadAssignment.Endpoints = localityLbEndpoints
}
// Returning the label names in a separate array as the iteration of map is not ordered.
func priorityLabelOverrides(labels []string) ([]string, map[string]string) {
priorityLabels := make([]string, 0, len(labels))
overriddenValueByLabel := make(map[string]string, len(labels))
var tempStrings []string
for _, labelWithValue := range labels {
tempStrings = strings.Split(labelWithValue, string(FailoverPriorityLabelDefaultSeparator))
priorityLabels = append(priorityLabels, tempStrings[0])
if len(tempStrings) == 2 {
overriddenValueByLabel[tempStrings[0]] = tempStrings[1]
continue
}
}
return priorityLabels, overriddenValueByLabel
}
// set loadbalancing priority by failover priority label.
// split one LocalityLbEndpoints to multiple LocalityLbEndpoints based on failover priorities.
func applyPriorityFailoverPerLocality(
proxyLabels map[string]string,
ep *WrappedLocalityLbEndpoints,
failoverPriorities []string,
) []*endpoint.LocalityLbEndpoints {
lowestPriority := len(failoverPriorities)
// key is priority, value is the index of LocalityLbEndpoints.LbEndpoints
priorityMap := map[int][]int{}
priorityLabels, priorityLabelOverrides := priorityLabelOverrides(failoverPriorities)
for i, istioEndpoint := range ep.IstioEndpoints {
var priority int
// failoverPriority labels match
for j, label := range priorityLabels {
valueForProxy, ok := priorityLabelOverrides[label]
if !ok {
valueForProxy = proxyLabels[label]
}
if valueForProxy != istioEndpoint.Labels[label] {
priority = lowestPriority - j
break
}
}
priorityMap[priority] = append(priorityMap[priority], i)
}
// sort all priorities in increasing order.
priorities := []int{}
for priority := range priorityMap {
priorities = append(priorities, priority)
}
sort.Ints(priorities)
out := make([]*endpoint.LocalityLbEndpoints, len(priorityMap))
for i, priority := range priorities {
out[i] = util.CloneLocalityLbEndpoint(ep.LocalityLbEndpoints)
out[i].LbEndpoints = nil
out[i].Priority = uint32(priority)
var weight uint32
for _, index := range priorityMap[priority] {
out[i].LbEndpoints = append(out[i].LbEndpoints, ep.LocalityLbEndpoints.LbEndpoints[index])
weight += ep.LocalityLbEndpoints.LbEndpoints[index].GetLoadBalancingWeight().GetValue()
}
// reset weight
out[i].LoadBalancingWeight = &wrappers.UInt32Value{
Value: weight,
}
}
return out
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package match
import (
xds "github.com/cncf/xds/go/xds/core/v3"
matcher "github.com/cncf/xds/go/xds/type/matcher/v3"
network "github.com/envoyproxy/go-control-plane/envoy/extensions/matching/common_inputs/network/v3"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/log"
)
var (
DestinationPort = &xds.TypedExtensionConfig{
Name: "port",
TypedConfig: protoconv.MessageToAny(&network.DestinationPortInput{}),
}
DestinationIP = &xds.TypedExtensionConfig{
Name: "ip",
TypedConfig: protoconv.MessageToAny(&network.DestinationIPInput{}),
}
SourceIP = &xds.TypedExtensionConfig{
Name: "source-ip",
TypedConfig: protoconv.MessageToAny(&network.SourceIPInput{}),
}
SNI = &xds.TypedExtensionConfig{
Name: "sni",
TypedConfig: protoconv.MessageToAny(&network.ServerNameInput{}),
}
ApplicationProtocolInput = &xds.TypedExtensionConfig{
Name: "application-protocol",
TypedConfig: protoconv.MessageToAny(&network.ApplicationProtocolInput{}),
}
TransportProtocolInput = &xds.TypedExtensionConfig{
Name: "transport-protocol",
TypedConfig: protoconv.MessageToAny(&network.TransportProtocolInput{}),
}
)
type Mapper struct {
*matcher.Matcher
Map map[string]*matcher.Matcher_OnMatch
}
func newMapper(input *xds.TypedExtensionConfig) Mapper {
m := map[string]*matcher.Matcher_OnMatch{}
match := &matcher.Matcher{
MatcherType: &matcher.Matcher_MatcherTree_{
MatcherTree: &matcher.Matcher_MatcherTree{
Input: input,
TreeType: &matcher.Matcher_MatcherTree_ExactMatchMap{
ExactMatchMap: &matcher.Matcher_MatcherTree_MatchMap{
Map: m,
},
},
},
},
OnNoMatch: nil,
}
return Mapper{Matcher: match, Map: m}
}
func NewDestinationIP() Mapper {
return newMapper(DestinationIP)
}
func NewSourceIP() Mapper {
return newMapper(SourceIP)
}
func NewDestinationPort() Mapper {
return newMapper(DestinationPort)
}
type ProtocolMatch struct {
TCP, HTTP *matcher.Matcher_OnMatch
}
func NewAppProtocol(pm ProtocolMatch) *matcher.Matcher {
m := newMapper(ApplicationProtocolInput)
m.Map["'h2c'"] = pm.HTTP
m.Map["'http/1.1'"] = pm.HTTP
if features.HTTP10 {
m.Map["'http/1.0'"] = pm.HTTP
}
m.OnNoMatch = pm.TCP
return m.Matcher
}
func ToChain(name string) *matcher.Matcher_OnMatch {
return &matcher.Matcher_OnMatch{
OnMatch: &matcher.Matcher_OnMatch_Action{
Action: &xds.TypedExtensionConfig{
Name: name,
TypedConfig: protoconv.MessageToAny(&wrappers.StringValue{Value: name}),
},
},
}
}
func ToMatcher(match *matcher.Matcher) *matcher.Matcher_OnMatch {
return &matcher.Matcher_OnMatch{
OnMatch: &matcher.Matcher_OnMatch_Matcher{
Matcher: match,
},
}
}
// BuildMatcher cleans the entire match tree to avoid empty maps and returns a viable top-level matcher.
// Note: this mutates the internal mappers/matchers that make up the tree.
func (m Mapper) BuildMatcher() *matcher.Matcher {
root := m
for len(root.Map) == 0 {
// the top level matcher is empty; if its fallback goes to a matcher, return that
// TODO is there a way we can just say "always go to action"?
if fallback := root.GetOnNoMatch(); fallback != nil {
if replacement, ok := mapperFromMatch(fallback.GetMatcher()); ok {
root = replacement
continue
}
}
// no fallback or fallback isn't a mapper
log.Warnf("could not repair invalid matcher; empty map at root matcher does not have a map fallback")
return nil
}
q := []*matcher.Matcher_OnMatch{m.OnNoMatch}
for _, onMatch := range root.Map {
q = append(q, onMatch)
}
// fix the matchers, add child mappers OnMatch to the queue
for len(q) > 0 {
head := q[0]
q = q[1:]
q = append(q, fixEmptyOnMatchMap(head)...)
}
return root.Matcher
}
// if the onMatch sends to an empty mapper, make the onMatch send directly to the onNoMatch of that empty mapper
// returns mapper if it doesn't need to be fixed, or can't be fixed
func fixEmptyOnMatchMap(onMatch *matcher.Matcher_OnMatch) []*matcher.Matcher_OnMatch {
if onMatch == nil {
return nil
}
innerMatcher := onMatch.GetMatcher()
if innerMatcher == nil {
// this already just performs an Action
return nil
}
innerMapper, ok := mapperFromMatch(innerMatcher)
if !ok {
// this isn't a mapper or action, not supported by this func
return nil
}
if len(innerMapper.Map) > 0 {
return innerMapper.allOnMatches()
}
if fallback := innerMapper.GetOnNoMatch(); fallback != nil {
// change from: onMatch -> map (empty with fallback) to onMatch -> fallback
// that fallback may be an empty map, so we re-queue onMatch in case it still needs fixing
onMatch.OnMatch = fallback.OnMatch
return []*matcher.Matcher_OnMatch{onMatch} // the inner mapper is gone
}
// envoy will nack this eventually
log.Warnf("empty mapper %v with no fallback", innerMapper.Matcher)
return innerMapper.allOnMatches()
}
func (m Mapper) allOnMatches() []*matcher.Matcher_OnMatch {
var out []*matcher.Matcher_OnMatch
out = append(out, m.OnNoMatch)
if m.Map == nil {
return out
}
for _, match := range m.Map {
out = append(out, match)
}
return out
}
func mapperFromMatch(mmatcher *matcher.Matcher) (Mapper, bool) {
if mmatcher == nil {
return Mapper{}, false
}
switch m := mmatcher.MatcherType.(type) {
case *matcher.Matcher_MatcherTree_:
var mmap *matcher.Matcher_MatcherTree_MatchMap
switch t := m.MatcherTree.TreeType.(type) {
case *matcher.Matcher_MatcherTree_PrefixMatchMap:
mmap = t.PrefixMatchMap
case *matcher.Matcher_MatcherTree_ExactMatchMap:
mmap = t.ExactMatchMap
default:
return Mapper{}, false
}
return Mapper{Matcher: mmatcher, Map: mmap.Map}, true
}
return Mapper{}, false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
dnsProto "istio.io/istio/pkg/dns/proto"
dnsServer "istio.io/istio/pkg/dns/server"
)
// BuildNameTable produces a table of hostnames and their associated IPs that can then
// be used by the agent to resolve DNS. This logic is always active. However, local DNS resolution
// will only be effective if DNS capture is enabled in the proxy
func (configgen *ConfigGeneratorImpl) BuildNameTable(node *model.Proxy, push *model.PushContext) *dnsProto.NameTable {
return dnsServer.BuildNameTable(dnsServer.Config{
Node: node,
Push: push,
MulticlusterHeadlessEnabled: features.MulticlusterHeadlessEnabled,
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"time"
mysql "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/filters/network/mysql_proxy/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
mongo "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/mongo_proxy/v3"
redis "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/redis_proxy/v3"
tcp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
hashpolicy "github.com/envoyproxy/go-control-plane/envoy/type/v3"
"google.golang.org/protobuf/types/known/durationpb"
extensions "istio.io/api/extensions/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
istionetworking "istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/extension"
istioroute "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/tunnelingconfig"
"istio.io/istio/pilot/pkg/networking/telemetry"
"istio.io/istio/pilot/pkg/util/protoconv"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/wellknown"
)
// redisOpTimeout is the default operation timeout for the Redis proxy filter.
var redisOpTimeout = 5 * time.Second
func buildMetadataExchangeNetworkFilters() []*listener.Filter {
filterstack := make([]*listener.Filter, 0)
// We add metadata exchange on inbound only; outbound is handled in cluster filter
if features.MetadataExchange {
filterstack = append(filterstack, xdsfilters.TCPListenerMx)
}
return filterstack
}
func buildMetricsNetworkFilters(push *model.PushContext, proxy *model.Proxy, class istionetworking.ListenerClass) []*listener.Filter {
return push.Telemetry.TCPFilters(proxy, class)
}
// setAccessLogAndBuildTCPFilter sets the AccessLog configuration in the given
// TcpProxy instance and builds a TCP filter out of it.
func setAccessLogAndBuildTCPFilter(push *model.PushContext, node *model.Proxy, config *tcp.TcpProxy, class istionetworking.ListenerClass) *listener.Filter {
accessLogBuilder.setTCPAccessLog(push, node, config, class)
tcpFilter := &listener.Filter{
Name: wellknown.TCPProxy,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(config)},
}
return tcpFilter
}
// buildOutboundNetworkFiltersWithSingleDestination takes a single cluster name
// and builds a stack of network filters.
func (lb *ListenerBuilder) buildOutboundNetworkFiltersWithSingleDestination(
statPrefix, clusterName, subsetName string, port *model.Port, destinationRule *networking.DestinationRule, applyTunnelingConfig tunnelingconfig.ApplyFunc,
includeMx bool,
) []*listener.Filter {
idleTimeout := destinationRule.GetTrafficPolicy().GetConnectionPool().GetTcp().GetIdleTimeout()
if idleTimeout == nil {
idleTimeout = parseDuration(lb.node.Metadata.IdleTimeout)
}
tcpProxy := &tcp.TcpProxy{
StatPrefix: statPrefix,
ClusterSpecifier: &tcp.TcpProxy_Cluster{Cluster: clusterName},
IdleTimeout: idleTimeout,
MaxDownstreamConnectionDuration: destinationRule.GetTrafficPolicy().GetConnectionPool().GetTcp().GetMaxConnectionDuration(),
}
maybeSetHashPolicy(destinationRule, tcpProxy, subsetName)
applyTunnelingConfig(tcpProxy, destinationRule, subsetName)
class := model.OutboundListenerClass(lb.node.Type)
tcpFilter := setAccessLogAndBuildTCPFilter(lb.push, lb.node, tcpProxy, class)
networkFilterStack := buildNetworkFiltersStack(port.Protocol, tcpFilter, statPrefix, clusterName)
return lb.buildCompleteNetworkFilters(class, port.Port, networkFilterStack, includeMx)
}
func (lb *ListenerBuilder) buildCompleteNetworkFilters(
class istionetworking.ListenerClass,
port int,
networkFilterStack []*listener.Filter,
includeMx bool,
) []*listener.Filter {
var filters []*listener.Filter
wasm := lb.push.WasmPluginsByListenerInfo(lb.node, model.WasmPluginListenerInfo{
Port: port,
Class: class,
}, model.WasmPluginTypeNetwork)
// Metadata exchange goes first, so RBAC failures, etc can access the state. See https://github.com/istio/istio/issues/41066
if features.MetadataExchange && includeMx {
filters = append(filters, xdsfilters.TCPListenerMx)
}
// TODO: not sure why it goes here
filters = append(filters, lb.authzCustomBuilder.BuildTCP()...)
// Authn
filters = extension.PopAppendNetwork(filters, wasm, extensions.PluginPhase_AUTHN)
// Authz
filters = extension.PopAppendNetwork(filters, wasm, extensions.PluginPhase_AUTHZ)
filters = append(filters, lb.authzBuilder.BuildTCP()...)
// Stats
filters = extension.PopAppendNetwork(filters, wasm, extensions.PluginPhase_STATS)
filters = extension.PopAppendNetwork(filters, wasm, extensions.PluginPhase_UNSPECIFIED_PHASE)
filters = append(filters, buildMetricsNetworkFilters(lb.push, lb.node, class)...)
// Terminal filters
filters = append(filters, networkFilterStack...)
return filters
}
// buildOutboundNetworkFiltersWithWeightedClusters takes a set of weighted
// destination routes and builds a stack of network filters.
func (lb *ListenerBuilder) buildOutboundNetworkFiltersWithWeightedClusters(routes []*networking.RouteDestination,
port *model.Port, configMeta config.Meta, destinationRule *networking.DestinationRule,
includeMx bool,
) []*listener.Filter {
statPrefix := configMeta.Name + "." + configMeta.Namespace
clusterSpecifier := &tcp.TcpProxy_WeightedClusters{
WeightedClusters: &tcp.TcpProxy_WeightedCluster{},
}
idleTimeout := destinationRule.GetTrafficPolicy().GetConnectionPool().GetTcp().GetIdleTimeout()
if idleTimeout == nil {
idleTimeout = parseDuration(lb.node.Metadata.IdleTimeout)
}
tcpProxy := &tcp.TcpProxy{
StatPrefix: statPrefix,
ClusterSpecifier: clusterSpecifier,
IdleTimeout: idleTimeout,
MaxDownstreamConnectionDuration: destinationRule.GetTrafficPolicy().GetConnectionPool().GetTcp().GetMaxConnectionDuration(),
}
for _, route := range routes {
service := lb.push.ServiceForHostname(lb.node, host.Name(route.Destination.Host))
if route.Weight > 0 {
clusterName := istioroute.GetDestinationCluster(route.Destination, service, port.Port)
clusterSpecifier.WeightedClusters.Clusters = append(clusterSpecifier.WeightedClusters.Clusters, &tcp.TcpProxy_WeightedCluster_ClusterWeight{
Name: clusterName,
Weight: uint32(route.Weight),
})
}
}
// For weighted clusters set hash policy if any of the upstream destinations have sourceIP.
maybeSetHashPolicy(destinationRule, tcpProxy, "")
// In case of weighted clusters, tunneling config for a subset is ignored,
// because it is set on listener, not on a cluster.
tunnelingconfig.Apply(tcpProxy, destinationRule, "")
// TODO: Need to handle multiple cluster names for Redis
clusterName := clusterSpecifier.WeightedClusters.Clusters[0].Name
class := model.OutboundListenerClass(lb.node.Type)
tcpFilter := setAccessLogAndBuildTCPFilter(lb.push, lb.node, tcpProxy, class)
networkFilterStack := buildNetworkFiltersStack(port.Protocol, tcpFilter, statPrefix, clusterName)
return lb.buildCompleteNetworkFilters(class, port.Port, networkFilterStack, includeMx)
}
func maybeSetHashPolicy(destinationRule *networking.DestinationRule, tcpProxy *tcp.TcpProxy, subsetName string) {
if destinationRule != nil {
useSourceIP := destinationRule.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash().GetUseSourceIp()
for _, subset := range destinationRule.Subsets {
if subset.Name != subsetName {
continue
}
// If subset has load balancer - see if it is also consistent hash source IP
if subset.TrafficPolicy != nil && subset.TrafficPolicy.LoadBalancer != nil {
if subset.TrafficPolicy.LoadBalancer.GetConsistentHash() != nil {
useSourceIP = subset.TrafficPolicy.LoadBalancer.GetConsistentHash().GetUseSourceIp()
} else {
// This means that subset has defined non sourceIP consistent hash load balancer.
useSourceIP = false
}
}
break
}
// If destinationrule has consistent hash source ip set, use it for tcp proxy.
if useSourceIP {
tcpProxy.HashPolicy = []*hashpolicy.HashPolicy{{PolicySpecifier: &hashpolicy.HashPolicy_SourceIp_{
SourceIp: &hashpolicy.HashPolicy_SourceIp{},
}}}
}
}
}
// buildNetworkFiltersStack builds a slice of network filters based on
// the protocol in use and the given TCP filter instance.
func buildNetworkFiltersStack(p protocol.Instance, tcpFilter *listener.Filter, statPrefix string, clusterName string) []*listener.Filter {
filterstack := make([]*listener.Filter, 0)
switch p {
case protocol.Mongo:
if features.EnableMongoFilter {
filterstack = append(filterstack, buildMongoFilter(statPrefix), tcpFilter)
} else {
filterstack = append(filterstack, tcpFilter)
}
case protocol.Redis:
if features.EnableRedisFilter {
// redis filter has route config, it is a terminating filter, no need append tcp filter.
filterstack = append(filterstack, buildRedisFilter(statPrefix, clusterName))
} else {
filterstack = append(filterstack, tcpFilter)
}
case protocol.MySQL:
if features.EnableMysqlFilter {
filterstack = append(filterstack, buildMySQLFilter(statPrefix))
}
filterstack = append(filterstack, tcpFilter)
default:
filterstack = append(filterstack, tcpFilter)
}
return filterstack
}
// buildOutboundNetworkFilters generates a TCP proxy network filter for outbound
// connections. In addition, it generates protocol specific filters (e.g., Mongo
// filter).
func (lb *ListenerBuilder) buildOutboundNetworkFilters(
routes []*networking.RouteDestination,
port *model.Port, configMeta config.Meta, includeMx bool,
) []*listener.Filter {
push, node := lb.push, lb.node
service := push.ServiceForHostname(node, host.Name(routes[0].Destination.Host))
var destinationRule *networking.DestinationRule
if service != nil {
destinationRule = CastDestinationRule(node.SidecarScope.DestinationRule(model.TrafficDirectionOutbound, node, service.Hostname).GetRule())
}
if len(routes) == 1 {
clusterName := istioroute.GetDestinationCluster(routes[0].Destination, service, port.Port)
statPrefix := clusterName
// If stat name is configured, build the stat prefix from configured pattern.
if len(push.Mesh.OutboundClusterStatName) != 0 && service != nil {
statPrefix = telemetry.BuildStatPrefix(push.Mesh.OutboundClusterStatName, routes[0].Destination.Host,
routes[0].Destination.Subset, port, 0, &service.Attributes)
}
return lb.buildOutboundNetworkFiltersWithSingleDestination(
statPrefix, clusterName, routes[0].Destination.Subset, port, destinationRule, tunnelingconfig.Apply, includeMx)
}
return lb.buildOutboundNetworkFiltersWithWeightedClusters(routes, port, configMeta, destinationRule, includeMx)
}
// buildMongoFilter builds an outbound Envoy MongoProxy filter.
func buildMongoFilter(statPrefix string) *listener.Filter {
// TODO: add a watcher for /var/lib/istio/mongo/certs
// if certs are found use, TLS or mTLS clusters for talking to MongoDB.
// User is responsible for mounting those certs in the pod.
mongoProxy := &mongo.MongoProxy{
StatPrefix: statPrefix, // mongo stats are prefixed with mongo.<statPrefix> by Envoy
// TODO enable faults in mongo
}
out := &listener.Filter{
Name: wellknown.MongoProxy,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(mongoProxy)},
}
return out
}
// buildRedisFilter builds an outbound Envoy RedisProxy filter.
// Currently, if multiple clusters are defined, one of them will be picked for
// configuring the Redis proxy.
func buildRedisFilter(statPrefix, clusterName string) *listener.Filter {
redisProxy := &redis.RedisProxy{
LatencyInMicros: true, // redis latency stats are captured in micro seconds which is typically the case.
StatPrefix: statPrefix, // redis stats are prefixed with redis.<statPrefix> by Envoy
Settings: &redis.RedisProxy_ConnPoolSettings{
OpTimeout: durationpb.New(redisOpTimeout),
},
PrefixRoutes: &redis.RedisProxy_PrefixRoutes{
CatchAllRoute: &redis.RedisProxy_PrefixRoutes_Route{
Cluster: clusterName,
},
},
}
out := &listener.Filter{
Name: wellknown.RedisProxy,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(redisProxy)},
}
return out
}
// buildMySQLFilter builds an outbound Envoy MySQLProxy filter.
func buildMySQLFilter(statPrefix string) *listener.Filter {
mySQLProxy := &mysql.MySQLProxy{
StatPrefix: statPrefix, // MySQL stats are prefixed with mysql.<statPrefix> by Envoy.
}
out := &listener.Filter{
Name: wellknown.MySQLProxy,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(mySQLProxy)},
}
return out
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package retry
import (
"net/http"
"strconv"
"strings"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
previouspriorities "github.com/envoyproxy/go-control-plane/envoy/extensions/retry/priority/previous_priorities/v3"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/util/protoconv"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
)
var defaultRetryPriorityTypedConfig = protoconv.MessageToAny(buildPreviousPrioritiesConfig())
// DefaultPolicy gets a copy of the default retry policy.
func DefaultPolicy() *route.RetryPolicy {
policy := route.RetryPolicy{
NumRetries: &wrappers.UInt32Value{Value: 2},
RetryOn: "connect-failure,refused-stream,unavailable,cancelled,retriable-status-codes",
RetriableStatusCodes: []uint32{http.StatusServiceUnavailable},
RetryHostPredicate: []*route.RetryPolicy_RetryHostPredicate{
// to configure retries to prefer hosts that haven’t been attempted already,
// the builtin `envoy.retry_host_predicates.previous_hosts` predicate can be used.
xdsfilters.RetryPreviousHosts,
},
// TODO: allow this to be configured via API.
HostSelectionRetryMaxAttempts: 5,
}
return &policy
}
// ConvertPolicy converts the given Istio retry policy to an Envoy policy.
//
// If in is nil, DefaultPolicy is returned.
//
// If in.Attempts == 0, returns nil.
//
// Otherwise, the returned policy is DefaultPolicy with the following overrides:
//
// - NumRetries: set from in.Attempts
//
// - RetryOn, RetriableStatusCodes: set from in.RetryOn (if specified). RetriableStatusCodes
// is appended when encountering parts that are valid HTTP status codes.
//
// - PerTryTimeout: set from in.PerTryTimeout (if specified)
func ConvertPolicy(in *networking.HTTPRetry) *route.RetryPolicy {
if in == nil {
// No policy was set, use a default.
return DefaultPolicy()
}
if in.Attempts <= 0 {
// Configuration is explicitly disabling the retry policy.
return nil
}
// A policy was specified. Start with the default and override with user-provided fields where appropriate.
out := DefaultPolicy()
out.NumRetries = &wrappers.UInt32Value{Value: uint32(in.Attempts)}
if in.RetryOn != "" {
// Allow the incoming configuration to specify both Envoy RetryOn and RetriableStatusCodes. Any integers are
// assumed to be status codes.
out.RetryOn, out.RetriableStatusCodes = parseRetryOn(in.RetryOn)
// If user has just specified HTTP status codes in retryOn but have not specified "retriable-status-codes", let us add it.
if len(out.RetriableStatusCodes) > 0 && !strings.Contains(out.RetryOn, "retriable-status-codes") {
out.RetryOn += ",retriable-status-codes"
}
}
if in.PerTryTimeout != nil {
out.PerTryTimeout = in.PerTryTimeout
}
if in.RetryRemoteLocalities != nil && in.RetryRemoteLocalities.GetValue() {
out.RetryPriority = &route.RetryPolicy_RetryPriority{
Name: "envoy.retry_priorities.previous_priorities",
ConfigType: &route.RetryPolicy_RetryPriority_TypedConfig{
TypedConfig: defaultRetryPriorityTypedConfig,
},
}
}
return out
}
func parseRetryOn(retryOn string) (string, []uint32) {
codes := make([]uint32, 0)
tojoin := make([]string, 0)
parts := strings.Split(retryOn, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if part == "" {
continue
}
// Try converting it to an integer to see if it's a valid HTTP status code.
i, err := strconv.Atoi(part)
if err == nil && http.StatusText(i) != "" {
codes = append(codes, uint32(i))
} else {
tojoin = append(tojoin, part)
}
}
return strings.Join(tojoin, ","), codes
}
// buildPreviousPrioritiesConfig builds a PreviousPrioritiesConfig with a default
// value for UpdateFrequency which indicated how often to update the priority.
func buildPreviousPrioritiesConfig() *previouspriorities.PreviousPrioritiesConfig {
return &previouspriorities.PreviousPrioritiesConfig{
UpdateFrequency: int32(2),
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package route
import (
"fmt"
"regexp"
"sort"
"strconv"
"strings"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
xdsfault "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/fault/v3"
cors "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cors/v3"
xdshttpfault "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3"
statefulsession "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/stateful_session/v3"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
xdstype "github.com/envoyproxy/go-control-plane/envoy/type/v3"
"github.com/golang/protobuf/ptypes/duration"
anypb "google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
"k8s.io/apimachinery/pkg/types"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/route/retry"
"istio.io/istio/pilot/pkg/networking/telemetry"
"istio.io/istio/pilot/pkg/networking/util"
authz "istio.io/istio/pilot/pkg/security/authz/model"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/jwt"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/grpc"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/wellknown"
)
// Headers with special meaning in Envoy
const (
HeaderMethod = ":method"
HeaderAuthority = ":authority"
HeaderScheme = ":scheme"
)
// DefaultRouteName is the name assigned to a route generated by default in absence of a virtual service.
const DefaultRouteName = "default"
var Notimeout = durationpb.New(0)
// DefaultMaxDirectResponseBodySizeBytes is 1mb, the same limit the control plane validates via webhook. Set this to increase from envoy default of 4k
var DefaultMaxDirectResponseBodySizeBytes = wrappers.UInt32(1024 * 1024)
type DestinationHashMap map[*networking.HTTPRouteDestination]*networking.LoadBalancerSettings_ConsistentHashLB
// VirtualHostWrapper is a context-dependent virtual host entry with guarded routes.
// Note: Currently we are not fully utilizing this structure. We could invoke this logic
// once for all sidecars in the cluster to compute all RDS for inside the mesh and arrange
// it by listener port. However to properly use such an optimization, we need to have an
// eventing subsystem to invalidate the computed routes if any service changes/virtual Services change.
type VirtualHostWrapper struct {
// Port is the listener port for outbound sidecar (e.g. service port)
Port int
// Services are the Services from the registry. Each service
// in this list should have a virtual host entry
Services []*model.Service
// VirtualServiceHosts is a list of hosts defined in the virtual service
// if virtual service hostname is same as a the service registry host, then
// the host would appear in Services as we need to generate all variants of the
// service's hostname within a platform (e.g., foo, foo.default, foo.default.svc, etc.)
VirtualServiceHosts []string
// Routes in the virtual host
Routes []*route.Route
}
// BuildSidecarVirtualHostWrapper creates virtual hosts from the given set of virtual Services
// and a list of Services from the service registry. Services are indexed by FQDN hostnames.
// The list of Services is also passed to allow maintaining consistent ordering.
func BuildSidecarVirtualHostWrapper(routeCache *Cache, node *model.Proxy, push *model.PushContext, serviceRegistry map[host.Name]*model.Service,
virtualServices []config.Config, listenPort int, mostSpecificWildcardIndex map[host.Name]types.NamespacedName,
) []VirtualHostWrapper {
out := make([]VirtualHostWrapper, 0)
// dependentDestinationRules includes all the destinationrules referenced by
// the virtualservices, which have consistent hash policy.
dependentDestinationRules := []*model.ConsolidatedDestRule{}
// First build virtual host wrappers for services that have virtual services.
for _, virtualService := range virtualServices {
hashByDestination, destinationRules := hashForVirtualService(push, node, virtualService)
dependentDestinationRules = append(dependentDestinationRules, destinationRules...)
wrappers := buildSidecarVirtualHostsForVirtualService(
node, virtualService, serviceRegistry, hashByDestination, listenPort, push.Mesh, mostSpecificWildcardIndex,
)
out = append(out, wrappers...)
}
// Now exclude the services that have virtual services.
for _, wrapper := range out {
for _, service := range wrapper.Services {
delete(serviceRegistry, service.Hostname)
}
}
for _, svc := range serviceRegistry {
for _, port := range svc.Ports {
if port.Protocol.IsHTTPOrSniffed() {
hash, destinationRule := hashForService(push, node, svc, port)
if hash != nil {
dependentDestinationRules = append(dependentDestinationRules, destinationRule)
}
// append default hosts for the service missing virtual Services.
out = append(out, buildSidecarVirtualHostForService(svc, port, hash, push.Mesh))
}
}
}
if routeCache != nil {
routeCache.DestinationRules = dependentDestinationRules
}
return out
}
// separateVSHostsAndServices splits the virtual service hosts into Services (if they are found in the registry) and
// plain non-registry hostnames
func separateVSHostsAndServices(virtualService config.Config,
serviceRegistry map[host.Name]*model.Service,
mostSpecificWildcardIndex map[host.Name]types.NamespacedName,
) ([]string, []*model.Service) {
// TODO: A further optimization would be to completely rely on the index and not do the loop below
// However, that requires assuming that serviceRegistry never got filtered after the
// egressListener was created.
rule := virtualService.Spec.(*networking.VirtualService)
hosts := make([]string, 0)
servicesInVirtualService := make([]*model.Service, 0)
wchosts := make([]host.Name, 0)
// As a performance optimization, process non wildcard hosts first, so that they can be
// looked up directly in the service registry map.
for _, hostname := range rule.Hosts {
vshost := host.Name(hostname)
if !vshost.IsWildCarded() {
if svc, exists := serviceRegistry[vshost]; exists {
servicesInVirtualService = append(servicesInVirtualService, svc)
} else {
hosts = append(hosts, hostname)
}
} else {
// Add it to the wildcard hosts so that they can be processed later.
wchosts = append(wchosts, vshost)
}
}
// Now process wild card hosts as they need to follow the slow path of looping through all Services in the registry.
for _, hostname := range wchosts {
if model.UseGatewaySemantics(virtualService) {
hosts = append(hosts, string(hostname))
continue
}
// Say this VS's host is *.global and there's another VS with host *.foo.global
foundSvcMatch := false
// Say we have Services *.foo.global, *.bar.global
for svcHost, svc := range serviceRegistry {
vs, ok := mostSpecificWildcardIndex[svcHost]
if !ok {
// This service doesn't have a virtualService that matches it.
continue
}
foundSvcMatch = true // we did find a match
if vs != virtualService.NamespacedName() {
// This virtual service is not the most specific wildcard match for this service.
// So we don't add it to the list of services in this virtual service so as
// to avoid duplicates
continue
}
servicesInVirtualService = append(servicesInVirtualService, svc)
}
if !foundSvcMatch {
hosts = append(hosts, string(hostname))
}
}
return hosts, servicesInVirtualService
}
// buildSidecarVirtualHostsForVirtualService creates virtual hosts corresponding to a virtual service.
// Called for each port to determine the list of vhosts on the given port.
// It may return an empty list if no VirtualService rule has a matching service.
func buildSidecarVirtualHostsForVirtualService(
node *model.Proxy,
virtualService config.Config,
serviceRegistry map[host.Name]*model.Service,
hashByDestination DestinationHashMap,
listenPort int,
mesh *meshconfig.MeshConfig,
mostSpecificWildcardIndex map[host.Name]types.NamespacedName,
) []VirtualHostWrapper {
meshGateway := sets.New(constants.IstioMeshGateway)
opts := RouteOptions{
// Sidecar is never terminating TLS
IsTLS: false,
// Sidecar is never doing H3 (yet)
IsHTTP3AltSvcHeaderNeeded: false,
Mesh: mesh,
}
routes, err := BuildHTTPRoutesForVirtualService(node, virtualService, serviceRegistry, hashByDestination,
listenPort, meshGateway, opts)
if err != nil || len(routes) == 0 {
return nil
}
hosts, servicesInVirtualService := separateVSHostsAndServices(virtualService, serviceRegistry, mostSpecificWildcardIndex)
// Gateway allows only routes from the namespace of the proxy, or namespace of the destination.
if model.UseGatewaySemantics(virtualService) {
res := make([]*model.Service, 0, len(servicesInVirtualService))
for _, s := range servicesInVirtualService {
if s.Attributes.Namespace != virtualService.Namespace && node.ConfigNamespace != virtualService.Namespace {
continue
}
res = append(res, s)
}
if len(res) == 0 {
return nil
}
}
// Now group these Services by port so that we can infer the destination.port if the user
// doesn't specify any port for a multiport service. We need to know the destination port in
// order to build the cluster name (outbound|<port>|<subset>|<serviceFQDN>)
// If the destination service is being accessed on port X, we set that as the default
// destination port
serviceByPort := make(map[int][]*model.Service)
for _, svc := range servicesInVirtualService {
for _, port := range svc.Ports {
if port.Protocol.IsHTTPOrSniffed() {
serviceByPort[port.Port] = append(serviceByPort[port.Port], svc)
}
}
}
if len(serviceByPort) == 0 {
if listenPort == 80 {
// TODO: This is a gross HACK. Fix me. Its a much bigger surgery though, due to the way
// the current code is written.
serviceByPort[80] = nil
}
}
out := make([]VirtualHostWrapper, 0, len(serviceByPort))
for port, services := range serviceByPort {
out = append(out, VirtualHostWrapper{
Port: port,
Services: services,
VirtualServiceHosts: hosts,
Routes: routes,
})
}
return out
}
func buildSidecarVirtualHostForService(svc *model.Service,
port *model.Port,
hash *networking.LoadBalancerSettings_ConsistentHashLB,
mesh *meshconfig.MeshConfig,
) VirtualHostWrapper {
cluster := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", svc.Hostname, port.Port)
traceOperation := telemetry.TraceOperation(string(svc.Hostname), port.Port)
httpRoute := BuildDefaultHTTPOutboundRoute(cluster, traceOperation, mesh)
// if this host has no virtualservice, the consistentHash on its destinationRule will be useless
hashPolicy := consistentHashToHashPolicy(hash)
if hashPolicy != nil {
httpRoute.GetRoute().HashPolicy = []*route.RouteAction_HashPolicy{hashPolicy}
}
return VirtualHostWrapper{
Port: port.Port,
Services: []*model.Service{svc},
Routes: []*route.Route{httpRoute},
}
}
// GetDestinationCluster generates a cluster name for the route, or error if no cluster
// can be found. Called by translateRule to determine if
func GetDestinationCluster(destination *networking.Destination, service *model.Service, listenerPort int) string {
if len(destination.GetHost()) == 0 {
// only happens when the gateway-api BackendRef is invalid
return "UnknownService"
}
h := host.Name(destination.Host)
// If this is an Alias, point to the concrete service
// TODO: this will not work if we have Alias -> Alias -> Concrete service.
if service != nil && service.Attributes.K8sAttributes.ExternalName != "" {
h = host.Name(service.Attributes.K8sAttributes.ExternalName)
}
port := listenerPort
if destination.GetPort() != nil {
port = int(destination.GetPort().GetNumber())
} else if service != nil && len(service.Ports) == 1 {
// if service only has one port defined, use that as the port, otherwise use default listenerPort
port = service.Ports[0].Port
// Do not return blackhole cluster for service==nil case as there is a legitimate use case for
// calling this function with nil service: to route to a pre-defined statically configured cluster
// declared as part of the bootstrap.
// If blackhole cluster is needed, do the check on the caller side. See gateway and tls.go for examples.
}
return model.BuildSubsetKey(model.TrafficDirectionOutbound, destination.Subset, h, port)
}
type RouteOptions struct {
// IsTLS indicates if the route is intended for a TLS listener
IsTLS bool
// IsHTTP3AltSvcHeaderNeeded indicates if HTTP3 alt-svc header needs to be inserted
IsHTTP3AltSvcHeaderNeeded bool
Mesh *meshconfig.MeshConfig
}
// BuildHTTPRoutesForVirtualService creates data plane HTTP routes from the virtual service spec.
// The rule should be adapted to destination names (outbound clusters).
// Each rule is guarded by source labels.
//
// This is called for each port to compute virtual hosts.
// Each VirtualService is tried, with a list of Services that listen on the port.
// Error indicates the given virtualService can't be used on the port.
// This function is used by both the gateway and the sidecar
func BuildHTTPRoutesForVirtualService(
node *model.Proxy,
virtualService config.Config,
serviceRegistry map[host.Name]*model.Service,
hashByDestination DestinationHashMap,
listenPort int,
gatewayNames sets.String,
opts RouteOptions,
) ([]*route.Route, error) {
vs, ok := virtualService.Spec.(*networking.VirtualService)
if !ok { // should never happen
return nil, fmt.Errorf("in not a virtual service: %#v", virtualService)
}
out := make([]*route.Route, 0, len(vs.Http))
catchall := false
for _, http := range vs.Http {
if len(http.Match) == 0 {
if r := translateRoute(node, http, nil, listenPort, virtualService, serviceRegistry,
hashByDestination, gatewayNames, opts); r != nil {
out = append(out, r)
}
catchall = true
} else {
for _, match := range http.Match {
if r := translateRoute(node, http, match, listenPort, virtualService, serviceRegistry,
hashByDestination, gatewayNames, opts); r != nil {
out = append(out, r)
// This is a catch all path. Routes are matched in order, so we will never go beyond this match
// As an optimization, we can just top sending any more routes here.
if isCatchAllRoute(r) {
catchall = true
break
}
}
}
}
if catchall {
break
}
}
if len(out) == 0 {
return nil, fmt.Errorf("no routes matched")
}
return out, nil
}
// sourceMatchHttp checks if the sourceLabels or the gateways in a match condition match with the
// labels for the proxy or the gateway name for which we are generating a route
func sourceMatchHTTP(match *networking.HTTPMatchRequest, proxyLabels labels.Instance, gatewayNames sets.String, proxyNamespace string) bool {
if match == nil {
return true
}
// Trim by source labels or mesh gateway
if len(match.Gateways) > 0 {
for _, g := range match.Gateways {
if gatewayNames.Contains(g) {
return true
}
}
} else if labels.Instance(match.GetSourceLabels()).SubsetOf(proxyLabels) {
return match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
}
return false
}
// translateRoute translates HTTP routes
func translateRoute(
node *model.Proxy,
in *networking.HTTPRoute,
match *networking.HTTPMatchRequest,
listenPort int,
virtualService config.Config,
serviceRegistry map[host.Name]*model.Service,
hashByDestination DestinationHashMap,
gatewayNames sets.String,
opts RouteOptions,
) *route.Route {
// When building routes, it's okay if the target cluster cannot be
// resolved. Traffic to such clusters will blackhole.
// Match by the destination port specified in the match condition
if match != nil && match.Port != 0 && match.Port != uint32(listenPort) {
return nil
}
// Match by source labels/gateway names inside the match condition
if !sourceMatchHTTP(match, node.Labels, gatewayNames, node.Metadata.Namespace) {
return nil
}
routeName := in.Name
if match != nil && match.Name != "" {
routeName = routeName + "." + match.Name
}
out := &route.Route{
Name: routeName,
Match: TranslateRouteMatch(virtualService, match, node.SupportsEnvoyExtendedJwt()),
Metadata: util.BuildConfigInfoMetadata(virtualService.Meta),
}
if match != nil && match.StatPrefix != "" {
out.StatPrefix = match.StatPrefix
}
authority := ""
if in.Headers != nil {
operations := TranslateHeadersOperations(in.Headers)
out.RequestHeadersToAdd = operations.RequestHeadersToAdd
out.ResponseHeadersToAdd = operations.ResponseHeadersToAdd
out.RequestHeadersToRemove = operations.RequestHeadersToRemove
out.ResponseHeadersToRemove = operations.ResponseHeadersToRemove
authority = operations.Authority
}
var hostnames []host.Name
if in.Redirect != nil {
ApplyRedirect(out, in.Redirect, listenPort, opts.IsTLS, model.UseGatewaySemantics(virtualService))
} else if in.DirectResponse != nil {
ApplyDirectResponse(out, in.DirectResponse)
} else {
hostnames = applyHTTPRouteDestination(out, node, virtualService, in, opts.Mesh, authority, serviceRegistry, listenPort, hashByDestination)
}
out.Decorator = &route.Decorator{
Operation: GetRouteOperation(out, virtualService.Name, listenPort),
}
if in.Fault != nil || in.CorsPolicy != nil {
out.TypedPerFilterConfig = make(map[string]*anypb.Any)
}
if in.Fault != nil {
out.TypedPerFilterConfig[wellknown.Fault] = protoconv.MessageToAny(TranslateFault(in.Fault))
}
if in.CorsPolicy != nil {
out.TypedPerFilterConfig[wellknown.CORS] = protoconv.MessageToAny(TranslateCORSPolicy(in.CorsPolicy))
}
var statefulConfig *statefulsession.StatefulSession
for _, hostname := range hostnames {
perSvcStatefulConfig := util.MaybeBuildStatefulSessionFilterConfig(serviceRegistry[hostname])
// This means we have more than one stateful config for the same route because of weighed destinations.
// We should just pick the first and give a warning.
if perSvcStatefulConfig != nil && statefulConfig != nil {
log.Warnf("More than one stateful config for the same route %s. Picking the first one.", routeName)
break
}
statefulConfig = perSvcStatefulConfig
}
// Build stateful set config if the svc has appropriate labels attached.
if statefulConfig != nil {
if out.TypedPerFilterConfig == nil {
out.TypedPerFilterConfig = make(map[string]*anypb.Any)
}
perRouteStatefulSession := &statefulsession.StatefulSessionPerRoute{
Override: &statefulsession.StatefulSessionPerRoute_StatefulSession{
StatefulSession: statefulConfig,
},
}
out.TypedPerFilterConfig[util.StatefulSessionFilter] = protoconv.MessageToAny(perRouteStatefulSession)
}
if opts.IsHTTP3AltSvcHeaderNeeded {
http3AltSvcHeader := buildHTTP3AltSvcHeader(listenPort, util.ALPNHttp3OverQUIC)
if out.ResponseHeadersToAdd == nil {
out.ResponseHeadersToAdd = make([]*core.HeaderValueOption, 0)
}
out.ResponseHeadersToAdd = append(out.ResponseHeadersToAdd, http3AltSvcHeader)
}
return out
}
func applyHTTPRouteDestination(
out *route.Route,
node *model.Proxy,
vs config.Config,
in *networking.HTTPRoute,
mesh *meshconfig.MeshConfig,
authority string,
serviceRegistry map[host.Name]*model.Service,
listenerPort int,
hashByDestination DestinationHashMap,
) []host.Name {
policy := in.Retries
if policy == nil {
// No VS policy set, use mesh defaults
policy = mesh.GetDefaultHttpRetryPolicy()
}
action := &route.RouteAction{
RetryPolicy: retry.ConvertPolicy(policy),
}
setTimeout(action, in.Timeout, node)
if model.UseGatewaySemantics(vs) {
// return 500 for invalid backends
// https://github.com/kubernetes-sigs/gateway-api/blob/cea484e38e078a2c1997d8c7a62f410a1540f519/apis/v1beta1/httproute_types.go#L204
action.ClusterNotFoundResponseCode = route.RouteAction_INTERNAL_SERVER_ERROR
}
out.Action = &route.Route_Route{Route: action}
if in.Rewrite != nil {
action.ClusterSpecifier = &route.RouteAction_Cluster{
Cluster: in.Name,
}
if regexRewrite := in.Rewrite.GetUriRegexRewrite(); regexRewrite != nil {
action.RegexRewrite = &matcher.RegexMatchAndSubstitute{
Pattern: &matcher.RegexMatcher{
Regex: regexRewrite.Match,
},
Substitution: regexRewrite.Rewrite,
}
} else if uri := in.Rewrite.GetUri(); uri != "" {
if model.UseGatewaySemantics(vs) && uri == "/" {
// remove the prefix
action.RegexRewrite = &matcher.RegexMatchAndSubstitute{
Pattern: &matcher.RegexMatcher{
Regex: fmt.Sprintf(`^%s(/?)(.*)`, regexp.QuoteMeta(out.Match.GetPathSeparatedPrefix())),
},
// hold `/` in case the entire path is removed
Substitution: `/\2`,
}
} else {
action.PrefixRewrite = uri
}
}
if in.Rewrite.GetAuthority() != "" {
authority = in.Rewrite.GetAuthority()
}
}
if authority != "" {
action.HostRewriteSpecifier = &route.RouteAction_HostRewriteLiteral{
HostRewriteLiteral: authority,
}
}
if in.Mirror != nil {
if mp := MirrorPercent(in); mp != nil {
action.RequestMirrorPolicies = append(action.RequestMirrorPolicies,
TranslateRequestMirrorPolicy(in.Mirror, serviceRegistry[host.Name(in.Mirror.Host)], listenerPort, mp))
}
}
for _, mirror := range in.Mirrors {
if mp := MirrorPercentByPolicy(mirror); mp != nil && mirror.Destination != nil {
action.RequestMirrorPolicies = append(action.RequestMirrorPolicies,
TranslateRequestMirrorPolicy(mirror.Destination, serviceRegistry[host.Name(mirror.Destination.Host)], listenerPort, mp))
}
}
var hostnames []host.Name
if len(in.Route) == 1 {
hostnames = append(hostnames, processDestination(in.Route[0], serviceRegistry, listenerPort, hashByDestination, out, action))
} else {
weighted := make([]*route.WeightedCluster_ClusterWeight, 0)
for _, dst := range in.Route {
if dst.Weight == 0 {
// Ignore 0 weighted clusters if there are other clusters in the route.
continue
}
destinationweight, hostname := processWeightedDestination(dst, serviceRegistry, listenerPort, hashByDestination, action)
weighted = append(weighted, destinationweight)
hostnames = append(hostnames, hostname)
}
action.ClusterSpecifier = &route.RouteAction_WeightedClusters{
WeightedClusters: &route.WeightedCluster{
Clusters: weighted,
},
}
}
return hostnames
}
// processDestination processes a single destination in a route. It specifies to which cluster the route should
// be routed to. It also sets the headers and hash policy if specified.
// Returns the hostname of the destination.
func processDestination(dst *networking.HTTPRouteDestination, serviceRegistry map[host.Name]*model.Service,
listenerPort int,
hashByDestination DestinationHashMap,
out *route.Route,
action *route.RouteAction,
) host.Name {
hostname := host.Name(dst.GetDestination().GetHost())
action.ClusterSpecifier = &route.RouteAction_Cluster{
Cluster: GetDestinationCluster(dst.Destination, serviceRegistry[hostname], listenerPort),
}
if dst.Headers != nil {
operations := TranslateHeadersOperations(dst.Headers)
out.RequestHeadersToAdd = append(out.RequestHeadersToAdd, operations.RequestHeadersToAdd...)
out.RequestHeadersToRemove = append(out.RequestHeadersToRemove, operations.RequestHeadersToRemove...)
out.ResponseHeadersToAdd = append(out.ResponseHeadersToAdd, operations.ResponseHeadersToAdd...)
out.ResponseHeadersToRemove = append(out.ResponseHeadersToRemove, operations.ResponseHeadersToRemove...)
if operations.Authority != "" && action.HostRewriteSpecifier == nil {
// Ideally, if the weighted cluster overwrites authority, it has precedence. This mirrors behavior of headers,
// because for headers we append the weighted last which allows it to Set and wipe out previous Adds.
// However, Envoy behavior is different when we set at both cluster level and route level, and we want
// behavior to be consistent with a single cluster and multiple clusters.
// As a result, we only override if the top level rewrite is not set
action.HostRewriteSpecifier = &route.RouteAction_HostRewriteLiteral{
HostRewriteLiteral: operations.Authority,
}
}
}
hash := hashByDestination[dst]
hashPolicy := consistentHashToHashPolicy(hash)
if hashPolicy != nil {
action.HashPolicy = append(action.HashPolicy, hashPolicy)
}
return hostname
}
// processWeightedDestination processes a weighted destination in a route. It specifies to which cluster the route should
// be routed to. It also sets the headers and hash policy if specified.
// Returns the hostname of the destination along with its weight.
func processWeightedDestination(dst *networking.HTTPRouteDestination, serviceRegistry map[host.Name]*model.Service,
listenerPort int,
hashByDestination DestinationHashMap,
action *route.RouteAction,
) (*route.WeightedCluster_ClusterWeight, host.Name) {
hostname := host.Name(dst.GetDestination().GetHost())
clusterWeight := &route.WeightedCluster_ClusterWeight{
Name: GetDestinationCluster(dst.Destination, serviceRegistry[hostname], listenerPort),
Weight: &wrappers.UInt32Value{Value: uint32(dst.Weight)},
}
if dst.Headers != nil {
operations := TranslateHeadersOperations(dst.Headers)
// If weighted destination has headers, we need to set them on the cluster weight.
clusterWeight.RequestHeadersToAdd = operations.RequestHeadersToAdd
clusterWeight.RequestHeadersToRemove = operations.RequestHeadersToRemove
clusterWeight.ResponseHeadersToAdd = operations.ResponseHeadersToAdd
clusterWeight.ResponseHeadersToRemove = operations.ResponseHeadersToRemove
if operations.Authority != "" {
clusterWeight.HostRewriteSpecifier = &route.WeightedCluster_ClusterWeight_HostRewriteLiteral{
HostRewriteLiteral: operations.Authority,
}
}
}
hash := hashByDestination[dst]
hashPolicy := consistentHashToHashPolicy(hash)
if hashPolicy != nil {
action.HashPolicy = append(action.HashPolicy, hashPolicy)
}
return clusterWeight, hostname
}
func ApplyRedirect(out *route.Route, redirect *networking.HTTPRedirect, port int, isTLS bool, useGatewaySemantics bool) {
action := &route.Route_Redirect{
Redirect: &route.RedirectAction{
HostRedirect: redirect.Authority,
PathRewriteSpecifier: &route.RedirectAction_PathRedirect{
PathRedirect: redirect.Uri,
},
},
}
if useGatewaySemantics {
if uri, isPrefixReplace := cutPrefix(redirect.Uri, "%PREFIX()%"); isPrefixReplace {
action.Redirect.PathRewriteSpecifier = &route.RedirectAction_PrefixRewrite{
PrefixRewrite: uri,
}
}
}
if redirect.Scheme != "" {
action.Redirect.SchemeRewriteSpecifier = &route.RedirectAction_SchemeRedirect{SchemeRedirect: redirect.Scheme}
}
if redirect.RedirectPort != nil {
switch rp := redirect.RedirectPort.(type) {
case *networking.HTTPRedirect_DerivePort:
if rp.DerivePort == networking.HTTPRedirect_FROM_REQUEST_PORT {
// Envoy doesn't actually support deriving the port from the request dynamically. However,
// we always generate routes in the context of a specific request port. As a result, we can just
// use that port
action.Redirect.PortRedirect = uint32(port)
}
// Otherwise, no port needed; HTTPRedirect_FROM_PROTOCOL_DEFAULT is Envoy's default behavior
case *networking.HTTPRedirect_Port:
action.Redirect.PortRedirect = rp.Port
}
scheme := redirect.Scheme
if scheme == "" {
if isTLS {
scheme = "https"
} else {
scheme = "http"
}
}
// Do not put explicit :80 or :443 when its http/https
if action.Redirect.PortRedirect == 80 && scheme == "http" {
action.Redirect.PortRedirect = 0
}
if action.Redirect.PortRedirect == 443 && scheme == "https" {
action.Redirect.PortRedirect = 0
}
}
switch redirect.RedirectCode {
case 0, 301:
action.Redirect.ResponseCode = route.RedirectAction_MOVED_PERMANENTLY
case 302:
action.Redirect.ResponseCode = route.RedirectAction_FOUND
case 303:
action.Redirect.ResponseCode = route.RedirectAction_SEE_OTHER
case 307:
action.Redirect.ResponseCode = route.RedirectAction_TEMPORARY_REDIRECT
case 308:
action.Redirect.ResponseCode = route.RedirectAction_PERMANENT_REDIRECT
default:
log.Warnf("Redirect Code %d is not yet supported", redirect.RedirectCode)
action = nil
}
out.Action = action
}
func ApplyDirectResponse(out *route.Route, directResponse *networking.HTTPDirectResponse) {
action := &route.Route_DirectResponse{
DirectResponse: &route.DirectResponseAction{
Status: directResponse.Status,
},
}
if directResponse.Body != nil {
switch op := directResponse.Body.Specifier.(type) {
case *networking.HTTPBody_String_:
action.DirectResponse.Body = &core.DataSource{
Specifier: &core.DataSource_InlineString{
InlineString: op.String_,
},
}
case *networking.HTTPBody_Bytes:
action.DirectResponse.Body = &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: op.Bytes,
},
}
}
}
out.Action = action
}
func buildHTTP3AltSvcHeader(port int, h3Alpns []string) *core.HeaderValueOption {
// For example, www.cloudflare.com returns the following
// alt-svc: h3-27=":443"; ma=86400, h3-28=":443"; ma=86400, h3-29=":443"; ma=86400, h3=":443"; ma=86400
valParts := make([]string, 0, len(h3Alpns))
for _, alpn := range h3Alpns {
// Max-age is hardcoded to 1 day for now.
valParts = append(valParts, fmt.Sprintf(`%s=":%d"; ma=86400`, alpn, port))
}
headerVal := strings.Join(valParts, ", ")
return &core.HeaderValueOption{
AppendAction: core.HeaderValueOption_APPEND_IF_EXISTS_OR_ADD,
Header: &core.HeaderValue{
Key: util.AltSvcHeader,
Value: headerVal,
},
}
}
// SortHeaderValueOption type and the functions below (Len, Less and Swap) are for sort.Stable for type HeaderValueOption
type SortHeaderValueOption []*core.HeaderValueOption
// MirrorPercent computes the mirror percent to be used based on "Mirror" data in route.
func MirrorPercent(in *networking.HTTPRoute) *core.RuntimeFractionalPercent {
switch {
case in.MirrorPercentage != nil:
if in.MirrorPercentage.GetValue() > 0 {
return &core.RuntimeFractionalPercent{
DefaultValue: translatePercentToFractionalPercent(in.MirrorPercentage),
}
}
// If zero percent is provided explicitly, we should not mirror.
return nil
// nolint: staticcheck
case in.MirrorPercent != nil:
if in.MirrorPercent.GetValue() > 0 {
return &core.RuntimeFractionalPercent{
DefaultValue: translateIntegerToFractionalPercent((int32(in.MirrorPercent.GetValue()))),
}
}
// If zero percent is provided explicitly, we should not mirror.
return nil
default:
// Default to 100 percent if percent is not given.
return &core.RuntimeFractionalPercent{
DefaultValue: translateIntegerToFractionalPercent(100),
}
}
}
// MirrorPercentByPolicy computes the mirror percent to be used based on HTTPMirrorPolicy.
func MirrorPercentByPolicy(mirror *networking.HTTPMirrorPolicy) *core.RuntimeFractionalPercent {
switch {
case mirror.Percentage != nil:
if mirror.Percentage.GetValue() > 0 {
return &core.RuntimeFractionalPercent{
DefaultValue: translatePercentToFractionalPercent(mirror.Percentage),
}
}
// If zero percent is provided explicitly, we should not mirror.
return nil
default:
// Default to 100 percent if percent is not given.
return &core.RuntimeFractionalPercent{
DefaultValue: translateIntegerToFractionalPercent(100),
}
}
}
// Len is i the sort.Interface for SortHeaderValueOption
func (b SortHeaderValueOption) Len() int {
return len(b)
}
// Less is in the sort.Interface for SortHeaderValueOption
func (b SortHeaderValueOption) Less(i, j int) bool {
if b[i] == nil || b[i].Header == nil {
return false
} else if b[j] == nil || b[j].Header == nil {
return true
}
return strings.Compare(b[i].Header.Key, b[j].Header.Key) < 0
}
// Swap is in the sort.Interface for SortHeaderValueOption
func (b SortHeaderValueOption) Swap(i, j int) {
b[i], b[j] = b[j], b[i]
}
// translateAppendHeaders translates headers
func translateAppendHeaders(headers map[string]string, appendFlag bool) ([]*core.HeaderValueOption, string) {
if len(headers) == 0 {
return nil, ""
}
authority := ""
headerValueOptionList := make([]*core.HeaderValueOption, 0, len(headers))
for key, value := range headers {
if isAuthorityHeader(key) {
// If there are multiple, last one wins; validation will reject
authority = value
}
if isInternalHeader(key) {
continue
}
headerValueOption := &core.HeaderValueOption{
Header: &core.HeaderValue{
Key: key,
Value: value,
},
}
if appendFlag {
headerValueOption.AppendAction = core.HeaderValueOption_APPEND_IF_EXISTS_OR_ADD
} else {
headerValueOption.AppendAction = core.HeaderValueOption_OVERWRITE_IF_EXISTS_OR_ADD
}
headerValueOptionList = append(headerValueOptionList, headerValueOption)
}
sort.Stable(SortHeaderValueOption(headerValueOptionList))
return headerValueOptionList, authority
}
type HeadersOperations struct {
RequestHeadersToAdd []*core.HeaderValueOption
ResponseHeadersToAdd []*core.HeaderValueOption
RequestHeadersToRemove []string
ResponseHeadersToRemove []string
Authority string
}
// isInternalHeader returns true if a header refers to an internal value that cannot be modified by Envoy
func isInternalHeader(headerKey string) bool {
return strings.HasPrefix(headerKey, ":") || strings.EqualFold(headerKey, "host")
}
// isAuthorityHeader returns true if a header refers to the authority header
func isAuthorityHeader(headerKey string) bool {
return strings.EqualFold(headerKey, ":authority") || strings.EqualFold(headerKey, "host")
}
func dropInternal(keys []string) []string {
result := make([]string, 0, len(keys))
for _, k := range keys {
if isInternalHeader(k) {
continue
}
result = append(result, k)
}
return result
}
// TranslateHeadersOperations translates headers operations
func TranslateHeadersOperations(headers *networking.Headers) HeadersOperations {
req := headers.GetRequest()
resp := headers.GetResponse()
requestHeadersToAdd, setAuthority := translateAppendHeaders(req.GetSet(), false)
reqAdd, addAuthority := translateAppendHeaders(req.GetAdd(), true)
requestHeadersToAdd = append(requestHeadersToAdd, reqAdd...)
responseHeadersToAdd, _ := translateAppendHeaders(resp.GetSet(), false)
respAdd, _ := translateAppendHeaders(resp.GetAdd(), true)
responseHeadersToAdd = append(responseHeadersToAdd, respAdd...)
auth := addAuthority
if setAuthority != "" {
// If authority is set in 'add' and 'set', pick the one from 'set'
auth = setAuthority
}
return HeadersOperations{
RequestHeadersToAdd: requestHeadersToAdd,
ResponseHeadersToAdd: responseHeadersToAdd,
RequestHeadersToRemove: dropInternal(req.GetRemove()),
ResponseHeadersToRemove: dropInternal(resp.GetRemove()),
Authority: auth,
}
}
// TranslateRouteMatch translates match condition
func TranslateRouteMatch(vs config.Config, in *networking.HTTPMatchRequest, useExtendedJwt bool) *route.RouteMatch {
out := &route.RouteMatch{PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"}}
if in == nil {
return out
}
for name, stringMatch := range in.Headers {
// The metadata matcher takes precedence over the header matcher.
if metadataMatcher := translateMetadataMatch(name, stringMatch, useExtendedJwt); metadataMatcher != nil {
out.DynamicMetadata = append(out.DynamicMetadata, metadataMatcher)
} else {
matcher := translateHeaderMatch(name, stringMatch)
out.Headers = append(out.Headers, matcher)
}
}
for name, stringMatch := range in.WithoutHeaders {
if metadataMatcher := translateMetadataMatch(name, stringMatch, useExtendedJwt); metadataMatcher != nil {
metadataMatcher.Invert = true
out.DynamicMetadata = append(out.DynamicMetadata, metadataMatcher)
} else {
matcher := translateHeaderMatch(name, stringMatch)
matcher.InvertMatch = true
out.Headers = append(out.Headers, matcher)
}
}
// guarantee ordering of headers
sort.Slice(out.Headers, func(i, j int) bool {
return out.Headers[i].Name < out.Headers[j].Name
})
if in.Uri != nil {
switch m := in.Uri.MatchType.(type) {
case *networking.StringMatch_Exact:
out.PathSpecifier = &route.RouteMatch_Path{Path: m.Exact}
case *networking.StringMatch_Prefix:
if (model.UseIngressSemantics(vs) || model.UseGatewaySemantics(vs)) && m.Prefix != "/" {
path := strings.TrimSuffix(m.Prefix, "/")
out.PathSpecifier = &route.RouteMatch_PathSeparatedPrefix{PathSeparatedPrefix: path}
} else {
out.PathSpecifier = &route.RouteMatch_Prefix{Prefix: m.Prefix}
}
case *networking.StringMatch_Regex:
out.PathSpecifier = &route.RouteMatch_SafeRegex{
SafeRegex: &matcher.RegexMatcher{
Regex: m.Regex,
},
}
}
}
out.CaseSensitive = &wrappers.BoolValue{Value: !in.IgnoreUriCase}
if in.Method != nil {
matcher := translateHeaderMatch(HeaderMethod, in.Method)
out.Headers = append(out.Headers, matcher)
}
if in.Authority != nil {
matcher := translateHeaderMatch(HeaderAuthority, in.Authority)
out.Headers = append(out.Headers, matcher)
}
if in.Scheme != nil {
matcher := translateHeaderMatch(HeaderScheme, in.Scheme)
out.Headers = append(out.Headers, matcher)
}
for name, stringMatch := range in.QueryParams {
matcher := translateQueryParamMatch(name, stringMatch)
out.QueryParameters = append(out.QueryParameters, matcher)
}
return out
}
// translateQueryParamMatch translates a StringMatch to a QueryParameterMatcher.
func translateQueryParamMatch(name string, in *networking.StringMatch) *route.QueryParameterMatcher {
out := &route.QueryParameterMatcher{
Name: name,
}
if isCatchAllStringMatch(in) {
out.QueryParameterMatchSpecifier = &route.QueryParameterMatcher_PresentMatch{
PresentMatch: true,
}
return out
}
if em := util.ConvertToEnvoyMatch(in); em != nil {
out.QueryParameterMatchSpecifier = &route.QueryParameterMatcher_StringMatch{
StringMatch: em,
}
}
return out
}
// isCatchAllStringMatch determines if the given matcher is matched with all strings or not.
// Currently, if the regex has "*" value, it returns true
func isCatchAllStringMatch(in *networking.StringMatch) bool {
if in == nil || in.MatchType == nil {
return true
}
catchall := false
switch m := in.MatchType.(type) {
case *networking.StringMatch_Regex:
catchall = m.Regex == "*"
}
return catchall
}
// translateMetadataMatch translates a header match to dynamic metadata matcher. Returns nil if the header is not supported
// or the header format is invalid for generating metadata matcher.
//
// The currently only supported header is @request.auth.claims for JWT claims matching. Claims of type string or list of string
// are supported and nested claims are also supported using `.` or `[]` as a separator for claim names, `[]` is recommended.
//
// Examples using `.` as a separator:
// - `@request.auth.claims.admin` matches the claim "admin".
// - `@request.auth.claims.group.id` matches the nested claims "group" and "id".
//
// Examples using `[]` as a separator:
// - `@request.auth.claims[admin]` matches the claim "admin".
// - `@request.auth.claims[group][id]` matches the nested claims "group" and "id".
func translateMetadataMatch(name string, in *networking.StringMatch, useExtendedJwt bool) *matcher.MetadataMatcher {
rc := jwt.ToRoutingClaim(name)
if !rc.Match {
return nil
}
return authz.MetadataMatcherForJWTClaims(rc.Claims, util.ConvertToEnvoyMatch(in), useExtendedJwt)
}
// translateHeaderMatch translates to HeaderMatcher
func translateHeaderMatch(name string, in *networking.StringMatch) *route.HeaderMatcher {
out := &route.HeaderMatcher{
Name: name,
}
if isCatchAllStringMatch(in) {
out.HeaderMatchSpecifier = &route.HeaderMatcher_PresentMatch{PresentMatch: true}
return out
}
if em := util.ConvertToEnvoyMatch(in); em != nil {
out.HeaderMatchSpecifier = &route.HeaderMatcher_StringMatch{
StringMatch: em,
}
}
return out
}
// TranslateCORSPolicy translates CORS policy
func TranslateCORSPolicy(in *networking.CorsPolicy) *cors.CorsPolicy {
if in == nil {
return nil
}
// CORS filter is enabled by default
out := cors.CorsPolicy{}
// nolint: staticcheck
if in.AllowOrigins != nil {
out.AllowOriginStringMatch = util.ConvertToEnvoyMatches(in.AllowOrigins)
} else if in.AllowOrigin != nil {
out.AllowOriginStringMatch = util.StringToExactMatch(in.AllowOrigin)
}
out.FilterEnabled = &core.RuntimeFractionalPercent{
DefaultValue: &xdstype.FractionalPercent{
Numerator: 100,
Denominator: xdstype.FractionalPercent_HUNDRED,
},
}
out.AllowCredentials = in.AllowCredentials
out.AllowHeaders = strings.Join(in.AllowHeaders, ",")
out.AllowMethods = strings.Join(in.AllowMethods, ",")
out.ExposeHeaders = strings.Join(in.ExposeHeaders, ",")
if in.MaxAge != nil {
out.MaxAge = strconv.FormatInt(in.MaxAge.GetSeconds(), 10)
}
return &out
}
// GetRouteOperation returns readable route description for trace.
func GetRouteOperation(in *route.Route, vsName string, port int) string {
path := "/*"
m := in.GetMatch()
ps := m.GetPathSpecifier()
if ps != nil {
switch ps.(type) {
case *route.RouteMatch_Prefix:
path = m.GetPrefix() + "*"
case *route.RouteMatch_Path:
path = m.GetPath()
case *route.RouteMatch_SafeRegex:
path = m.GetSafeRegex().GetRegex()
}
}
// If there is only one destination cluster in route, return host:port/uri as description of route.
// Otherwise there are multiple destination clusters and destination host is not clear. For that case
// return virtual service name:port/uri as substitute.
if c := in.GetRoute().GetCluster(); model.IsValidSubsetKey(c) {
// Parse host and port from cluster name.
_, _, h, p := model.ParseSubsetKey(c)
return string(h) + ":" + strconv.Itoa(p) + path
}
return vsName + ":" + strconv.Itoa(port) + path
}
// BuildDefaultHTTPInboundRoute builds a default inbound route.
func BuildDefaultHTTPInboundRoute(clusterName string, operation string) *route.Route {
out := buildDefaultHTTPRoute(clusterName, operation)
// For inbound, configure with notimeout.
out.GetRoute().Timeout = Notimeout
out.GetRoute().MaxStreamDuration = &route.RouteAction_MaxStreamDuration{
MaxStreamDuration: Notimeout,
// If not configured at all, the grpc-timeout header is not used and
// gRPC requests time out like any other requests using timeout or its default.
GrpcTimeoutHeaderMax: Notimeout,
}
return out
}
func buildDefaultHTTPRoute(clusterName string, operation string) *route.Route {
routeAction := &route.RouteAction{
ClusterSpecifier: &route.RouteAction_Cluster{Cluster: clusterName},
}
val := &route.Route{
Match: TranslateRouteMatch(config.Config{}, nil, true),
Decorator: &route.Decorator{
Operation: operation,
},
Action: &route.Route_Route{
Route: routeAction,
},
}
val.Name = DefaultRouteName
return val
}
// setTimeout sets timeout for a route.
func setTimeout(action *route.RouteAction, vsTimeout *duration.Duration, node *model.Proxy) {
// Configure timeouts specified by Virtual Service if they are provided, otherwise set it to defaults.
action.Timeout = Notimeout
if vsTimeout != nil {
action.Timeout = vsTimeout
}
if node != nil && node.IsProxylessGrpc() {
// TODO(stevenctl) merge these paths; grpc's xDS impl will not read the deprecated value
action.MaxStreamDuration = &route.RouteAction_MaxStreamDuration{
MaxStreamDuration: action.Timeout,
}
} else {
// If not configured at all, the grpc-timeout header is not used and
// gRPC requests time out like any other requests using timeout or its default.
// Use deprecated value for now as the replacement MaxStreamDuration has some regressions.
// nolint: staticcheck
if action.Timeout.AsDuration().Nanoseconds() == 0 {
action.MaxGrpcTimeout = Notimeout
} else {
action.MaxGrpcTimeout = action.Timeout
}
}
}
// BuildDefaultHTTPOutboundRoute builds a default outbound route, including a retry policy.
func BuildDefaultHTTPOutboundRoute(clusterName string, operation string, mesh *meshconfig.MeshConfig) *route.Route {
out := buildDefaultHTTPRoute(clusterName, operation)
// Add a default retry policy for outbound routes.
out.GetRoute().RetryPolicy = retry.ConvertPolicy(mesh.GetDefaultHttpRetryPolicy())
setTimeout(out.GetRoute(), nil, nil)
return out
}
// translatePercentToFractionalPercent translates an v1alpha3 Percent instance
// to an envoy.type.FractionalPercent instance.
func translatePercentToFractionalPercent(p *networking.Percent) *xdstype.FractionalPercent {
return &xdstype.FractionalPercent{
Numerator: uint32(p.Value * 10000),
Denominator: xdstype.FractionalPercent_MILLION,
}
}
// translateIntegerToFractionalPercent translates an int32 instance to an
// envoy.type.FractionalPercent instance.
func translateIntegerToFractionalPercent(p int32) *xdstype.FractionalPercent {
return &xdstype.FractionalPercent{
Numerator: uint32(p),
Denominator: xdstype.FractionalPercent_HUNDRED,
}
}
// TranslateFault translates networking.HTTPFaultInjection into Envoy's HTTPFault
func TranslateFault(in *networking.HTTPFaultInjection) *xdshttpfault.HTTPFault {
if in == nil {
return nil
}
out := xdshttpfault.HTTPFault{}
if in.Delay != nil {
out.Delay = &xdsfault.FaultDelay{}
if in.Delay.Percentage != nil {
out.Delay.Percentage = translatePercentToFractionalPercent(in.Delay.Percentage)
} else {
out.Delay.Percentage = translateIntegerToFractionalPercent(in.Delay.Percent) // nolint: staticcheck
}
switch d := in.Delay.HttpDelayType.(type) {
case *networking.HTTPFaultInjection_Delay_FixedDelay:
out.Delay.FaultDelaySecifier = &xdsfault.FaultDelay_FixedDelay{
FixedDelay: d.FixedDelay,
}
default:
log.Warnf("Exponential faults are not yet supported")
out.Delay = nil
}
}
if in.Abort != nil {
out.Abort = &xdshttpfault.FaultAbort{}
if in.Abort.Percentage != nil {
out.Abort.Percentage = translatePercentToFractionalPercent(in.Abort.Percentage)
}
switch a := in.Abort.ErrorType.(type) {
case *networking.HTTPFaultInjection_Abort_HttpStatus:
out.Abort.ErrorType = &xdshttpfault.FaultAbort_HttpStatus{
HttpStatus: uint32(a.HttpStatus),
}
case *networking.HTTPFaultInjection_Abort_GrpcStatus:
// We wouldn't have an unknown gRPC code here. This is because
// the validation webhook would have already caught the invalid
// code and we wouldn't reach here.
out.Abort.ErrorType = &xdshttpfault.FaultAbort_GrpcStatus{
GrpcStatus: uint32(grpc.SupportedGRPCStatus[a.GrpcStatus]),
}
default:
log.Warnf("Only HTTP and gRPC type abort faults are supported")
out.Abort = nil
}
}
if out.Delay == nil && out.Abort == nil {
return nil
}
return &out
}
func TranslateRequestMirrorPolicy(dst *networking.Destination, service *model.Service,
listenerPort int, mp *core.RuntimeFractionalPercent,
) *route.RouteAction_RequestMirrorPolicy {
return &route.RouteAction_RequestMirrorPolicy{
Cluster: GetDestinationCluster(dst, service, listenerPort),
RuntimeFraction: mp,
TraceSampled: &wrappers.BoolValue{Value: false},
}
}
func portLevelSettingsConsistentHash(dst *networking.Destination,
pls []*networking.TrafficPolicy_PortTrafficPolicy,
) *networking.LoadBalancerSettings_ConsistentHashLB {
if dst.Port != nil {
portNumber := dst.GetPort().GetNumber()
for _, setting := range pls {
number := setting.GetPort().GetNumber()
if number == portNumber {
return setting.GetLoadBalancer().GetConsistentHash()
}
}
}
return nil
}
func consistentHashToHashPolicy(consistentHash *networking.LoadBalancerSettings_ConsistentHashLB) *route.RouteAction_HashPolicy {
switch consistentHash.GetHashKey().(type) {
case *networking.LoadBalancerSettings_ConsistentHashLB_HttpHeaderName:
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_Header_{
Header: &route.RouteAction_HashPolicy_Header{
HeaderName: consistentHash.GetHttpHeaderName(),
},
},
}
case *networking.LoadBalancerSettings_ConsistentHashLB_HttpCookie:
cookie := consistentHash.GetHttpCookie()
var ttl *durationpb.Duration
if cookie.GetTtl() != nil {
ttl = cookie.GetTtl()
}
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_Cookie_{
Cookie: &route.RouteAction_HashPolicy_Cookie{
Name: cookie.GetName(),
Ttl: ttl,
Path: cookie.GetPath(),
},
},
}
case *networking.LoadBalancerSettings_ConsistentHashLB_UseSourceIp:
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_ConnectionProperties_{
ConnectionProperties: &route.RouteAction_HashPolicy_ConnectionProperties{
SourceIp: consistentHash.GetUseSourceIp(),
},
},
}
case *networking.LoadBalancerSettings_ConsistentHashLB_HttpQueryParameterName:
return &route.RouteAction_HashPolicy{
PolicySpecifier: &route.RouteAction_HashPolicy_QueryParameter_{
QueryParameter: &route.RouteAction_HashPolicy_QueryParameter{
Name: consistentHash.GetHttpQueryParameterName(),
},
},
}
}
return nil
}
func hashForService(push *model.PushContext,
node *model.Proxy,
svc *model.Service,
port *model.Port,
) (*networking.LoadBalancerSettings_ConsistentHashLB, *model.ConsolidatedDestRule) {
if push == nil {
return nil, nil
}
mergedDR := node.SidecarScope.DestinationRule(model.TrafficDirectionOutbound, node, svc.Hostname)
destinationRule := mergedDR.GetRule()
if destinationRule == nil {
return nil, nil
}
rule := destinationRule.Spec.(*networking.DestinationRule)
consistentHash := rule.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash()
portLevelSettings := rule.GetTrafficPolicy().GetPortLevelSettings()
for _, setting := range portLevelSettings {
number := setting.GetPort().GetNumber()
if int(number) == port.Port {
if setting.GetLoadBalancer().GetConsistentHash() != nil {
consistentHash = setting.GetLoadBalancer().GetConsistentHash()
}
break
}
}
return consistentHash, mergedDR
}
func hashForVirtualService(push *model.PushContext,
node *model.Proxy,
virtualService config.Config,
) (DestinationHashMap, []*model.ConsolidatedDestRule) {
hashByDestination := DestinationHashMap{}
destinationRules := make([]*model.ConsolidatedDestRule, 0)
for _, httpRoute := range virtualService.Spec.(*networking.VirtualService).Http {
for _, destination := range httpRoute.Route {
hash, dr := hashForHTTPDestination(push, node, destination)
if hash != nil {
hashByDestination[destination] = hash
destinationRules = append(destinationRules, dr)
}
}
}
return hashByDestination, destinationRules
}
func GetConsistentHashForVirtualService(push *model.PushContext, node *model.Proxy, virtualService config.Config) DestinationHashMap {
hashByDestination, _ := hashForVirtualService(push, node, virtualService)
return hashByDestination
}
// hashForHTTPDestination return the ConsistentHashLB and the DestinationRule associated with HTTP route destination.
func hashForHTTPDestination(push *model.PushContext, node *model.Proxy,
dst *networking.HTTPRouteDestination,
) (*networking.LoadBalancerSettings_ConsistentHashLB, *model.ConsolidatedDestRule) {
if push == nil {
return nil, nil
}
destination := dst.GetDestination()
mergedDR := node.SidecarScope.DestinationRule(model.TrafficDirectionOutbound, node, host.Name(destination.Host))
destinationRule := mergedDR.GetRule()
if destinationRule == nil {
return nil, nil
}
rule := destinationRule.Spec.(*networking.DestinationRule)
consistentHash := rule.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash()
portLevelSettings := rule.GetTrafficPolicy().GetPortLevelSettings()
plsHash := portLevelSettingsConsistentHash(destination, portLevelSettings)
var subsetHash, subsetPLSHash *networking.LoadBalancerSettings_ConsistentHashLB
for _, subset := range rule.GetSubsets() {
if subset.GetName() == destination.GetSubset() {
subsetPortLevelSettings := subset.GetTrafficPolicy().GetPortLevelSettings()
subsetHash = subset.GetTrafficPolicy().GetLoadBalancer().GetConsistentHash()
subsetPLSHash = portLevelSettingsConsistentHash(destination, subsetPortLevelSettings)
break
}
}
switch {
case subsetPLSHash != nil:
consistentHash = subsetPLSHash
case subsetHash != nil:
consistentHash = subsetHash
case plsHash != nil:
consistentHash = plsHash
}
return consistentHash, mergedDR
}
// isCatchAll returns true if HTTPMatchRequest is a catchall match otherwise
// false. Note - this may not be exactly "catch all" as we don't know the full
// class of possible inputs As such, this is used only for optimization.
func isCatchAllMatch(m *networking.HTTPMatchRequest) bool {
catchall := false
if m.Uri != nil {
switch m := m.Uri.MatchType.(type) {
case *networking.StringMatch_Prefix:
catchall = m.Prefix == "/"
case *networking.StringMatch_Regex:
catchall = m.Regex == "*"
}
}
// A Match is catch all if and only if it has no match set
// and URI has a prefix / or regex *.
return catchall &&
len(m.Headers) == 0 &&
len(m.QueryParams) == 0 &&
len(m.SourceLabels) == 0 &&
len(m.WithoutHeaders) == 0 &&
len(m.Gateways) == 0 &&
m.Method == nil &&
m.Scheme == nil &&
m.Port == 0 &&
m.Authority == nil &&
m.SourceNamespace == ""
}
// SortVHostRoutes moves the catch all routes alone to the end, while retaining
// the relative order of other routes in the slice.
func SortVHostRoutes(routes []*route.Route) []*route.Route {
allroutes := make([]*route.Route, 0, len(routes))
catchAllRoutes := make([]*route.Route, 0)
for _, r := range routes {
if isCatchAllRoute(r) {
catchAllRoutes = append(catchAllRoutes, r)
} else {
allroutes = append(allroutes, r)
}
}
return append(allroutes, catchAllRoutes...)
}
// isCatchAllRoute returns true if an Envoy route is a catchall route otherwise false.
func isCatchAllRoute(r *route.Route) bool {
catchall := false
switch ir := r.Match.PathSpecifier.(type) {
case *route.RouteMatch_Prefix:
catchall = ir.Prefix == "/"
case *route.RouteMatch_PathSeparatedPrefix:
catchall = ir.PathSeparatedPrefix == "/"
case *route.RouteMatch_SafeRegex:
catchall = ir.SafeRegex.GetRegex() == "*"
}
// A Match is catch all if and only if it has no header/query param match
// and URI has a prefix / or regex *.
return catchall && len(r.Match.Headers) == 0 && len(r.Match.QueryParameters) == 0 && len(r.Match.DynamicMetadata) == 0
}
func cutPrefix(s, prefix string) (after string, found bool) {
if !strings.HasPrefix(s, prefix) {
return s, false
}
return s[len(prefix):], true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package route
import (
"fmt"
"math/big"
"strconv"
"strings"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/hash"
)
var (
Separator = []byte{'~'}
Slash = []byte{'/'}
)
// Cache includes the variables that can influence a Route Configuration.
// Implements XdsCacheEntry interface.
type Cache struct {
RouteName string
ProxyVersion string
// proxy cluster ID
ClusterID string
// proxy dns domain
DNSDomain string
// DNSCapture indicates whether the workload has enabled dns capture
DNSCapture bool
// DNSAutoAllocate indicates whether the workload should have auto allocated addresses for ServiceEntry
// This allows resolving ServiceEntries, which is especially useful for distinguishing TCP traffic
// This depends on DNSCapture.
DNSAutoAllocate bool
// AllowAny indicates if the proxy should allow all outbound traffic or only known registries
AllowAny bool
ListenerPort int
Services []*model.Service
VirtualServices []config.Config
DelegateVirtualServices []model.ConfigHash
DestinationRules []*model.ConsolidatedDestRule
EnvoyFilterKeys []string
}
func (r *Cache) Type() string {
return model.RDSType
}
func (r *Cache) Cacheable() bool {
if r == nil {
return false
}
if r.ListenerPort == 0 {
return false
}
for _, config := range r.VirtualServices {
vs := config.Spec.(*networking.VirtualService)
for _, httpRoute := range vs.Http {
for _, match := range httpRoute.Match {
// if vs has source match, not cacheable
if len(match.SourceLabels) > 0 || match.SourceNamespace != "" {
return false
}
}
}
}
return true
}
func extractNamespaceForKubernetesService(hostname string) (string, error) {
ih := strings.Index(hostname, ".svc.")
if ih < 0 {
return "", fmt.Errorf("hostname is a not a Kubernetes name, missing .svc: %v", hostname)
}
nsI := strings.Index(hostname, ".")
if nsI+1 >= len(hostname) || nsI+1 > ih {
// Invalid domain
return "", fmt.Errorf("hostname is a not a Kubernetes name, missing namespace: %v", hostname)
}
ns := hostname[nsI+1 : ih]
if len(ns) == 0 {
return "", fmt.Errorf("namespace not found")
}
return ns, nil
}
func (r *Cache) DependentConfigs() []model.ConfigHash {
size := len(r.Services) + len(r.VirtualServices) + len(r.DelegateVirtualServices) + len(r.EnvoyFilterKeys)
for _, mergedDR := range r.DestinationRules {
size += len(mergedDR.GetFrom())
}
configs := make([]model.ConfigHash, 0, size)
for _, svc := range r.Services {
configs = append(configs, model.ConfigKey{
Kind: kind.ServiceEntry,
Name: string(svc.Hostname),
Namespace: svc.Attributes.Namespace,
}.HashCode())
for _, alias := range svc.Attributes.Aliases {
configs = append(configs, model.ConfigKey{Kind: kind.ServiceEntry, Name: alias.Hostname.String(), Namespace: alias.Namespace}.HashCode())
}
}
for _, vs := range r.VirtualServices {
for _, cfg := range model.VirtualServiceDependencies(vs) {
configs = append(configs, cfg.HashCode())
}
}
// add delegate virtual services to dependent configs
// so that we can clear the rds cache when delegate virtual services are updated
configs = append(configs, r.DelegateVirtualServices...)
for _, mergedDR := range r.DestinationRules {
for _, dr := range mergedDR.GetFrom() {
configs = append(configs, model.ConfigKey{Kind: kind.DestinationRule, Name: dr.Name, Namespace: dr.Namespace}.HashCode())
}
}
for _, efKey := range r.EnvoyFilterKeys {
items := strings.Split(efKey, "/")
configs = append(configs, model.ConfigKey{Kind: kind.EnvoyFilter, Name: items[1], Namespace: items[0]}.HashCode())
}
return configs
}
func (r *Cache) Key() any {
// nolint: gosec
// Not security sensitive code
h := hash.New()
h.WriteString(r.RouteName)
h.Write(Separator)
h.WriteString(r.ProxyVersion)
h.Write(Separator)
h.WriteString(r.ClusterID)
h.Write(Separator)
h.WriteString(r.DNSDomain)
h.Write(Separator)
h.WriteString(strconv.FormatBool(r.DNSCapture))
h.Write(Separator)
h.WriteString(strconv.FormatBool(r.DNSAutoAllocate))
h.Write(Separator)
h.WriteString(strconv.FormatBool(r.AllowAny))
h.Write(Separator)
for _, svc := range r.Services {
h.WriteString(string(svc.Hostname))
h.Write(Slash)
h.WriteString(svc.Attributes.Namespace)
h.Write(Separator)
}
h.Write(Separator)
for _, vs := range r.VirtualServices {
for _, cfg := range model.VirtualServiceDependencies(vs) {
h.WriteString(cfg.Kind.String())
h.Write(Slash)
h.WriteString(cfg.Name)
h.Write(Slash)
h.WriteString(cfg.Namespace)
h.Write(Separator)
}
}
h.Write(Separator)
for _, vs := range r.DelegateVirtualServices {
h.Write(hashToBytes(vs))
h.Write(Separator)
}
h.Write(Separator)
for _, mergedDR := range r.DestinationRules {
for _, dr := range mergedDR.GetFrom() {
h.WriteString(dr.Name)
h.Write(Slash)
h.WriteString(dr.Namespace)
h.Write(Separator)
}
}
h.Write(Separator)
for _, efk := range r.EnvoyFilterKeys {
h.WriteString(efk)
h.Write(Separator)
}
h.Write(Separator)
return h.Sum64()
}
func hashToBytes(number model.ConfigHash) []byte {
big := new(big.Int)
big.SetUint64(uint64(number))
return big.Bytes()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"sort"
"strings"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/tunnelingconfig"
"istio.io/istio/pilot/pkg/networking/telemetry"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
// Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being
// bound, etc. All these can be checked statically, since we are generating the configuration for a proxy
// with predefined labels, on a specific port.
func matchTLS(match *v1alpha3.TLSMatchAttributes, proxyLabels labels.Instance, gateways sets.String, port int, proxyNamespace string) bool {
if match == nil {
return true
}
gatewayMatch := len(match.Gateways) == 0
for _, gateway := range match.Gateways {
gatewayMatch = gatewayMatch || gateways.Contains(gateway)
}
labelMatch := labels.Instance(match.SourceLabels).SubsetOf(proxyLabels)
portMatch := match.Port == 0 || match.Port == uint32(port)
nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
return gatewayMatch && labelMatch && portMatch && nsMatch
}
// Match by source labels, the listener port where traffic comes in, the gateway on which the rule is being
// bound, etc. All these can be checked statically, since we are generating the configuration for a proxy
// with predefined labels, on a specific port.
func matchTCP(match *v1alpha3.L4MatchAttributes, proxyLabels labels.Instance, gateways sets.String, port int, proxyNamespace string) bool {
if match == nil {
return true
}
gatewayMatch := len(match.Gateways) == 0
for _, gateway := range match.Gateways {
gatewayMatch = gatewayMatch || gateways.Contains(gateway)
}
labelMatch := labels.Instance(match.SourceLabels).SubsetOf(proxyLabels)
portMatch := match.Port == 0 || match.Port == uint32(port)
nsMatch := match.SourceNamespace == "" || match.SourceNamespace == proxyNamespace
return gatewayMatch && labelMatch && portMatch && nsMatch
}
// Select the config pertaining to the service being processed.
func getConfigsForHost(filterNamespace string, hostname host.Name, configs []config.Config) []config.Config {
svcConfigs := make([]config.Config, 0)
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, vsHost := range virtualService.Hosts {
if filterNamespace != "" && filterNamespace != cfg.Namespace {
continue
}
if host.Name(vsHost).Matches(hostname) {
svcConfigs = append(svcConfigs, cfg)
break
}
}
}
return svcConfigs
}
// hashRuntimeTLSMatchPredicates hashes runtime predicates of a TLS match
func hashRuntimeTLSMatchPredicates(match *v1alpha3.TLSMatchAttributes) string {
return strings.Join(match.SniHosts, ",") + "|" + strings.Join(match.DestinationSubnets, ",")
}
func buildSidecarOutboundTLSFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, bind string, listenPort *model.Port,
gateways sets.String, configs []config.Config,
) []*filterChainOpts {
if !listenPort.Protocol.IsTLS() {
return nil
}
actualWildcard, _ := getActualWildcardAndLocalHost(node)
// TLS matches are composed of runtime and static predicates.
// Static predicates can be evaluated during the generation of the config. Examples: gateway, source labels, etc.
// Runtime predicates cannot be evaluated during config generation. Instead the proxy must be configured to
// evaluate them. Examples: SNI hosts, source/destination subnets, etc.
//
// A list of matches may contain duplicate runtime matches, but different static matches. For example:
//
// {sni_hosts: A, sourceLabels: X} => destination M
// {sni_hosts: A, sourceLabels: *} => destination N
//
// For a proxy with labels X, we can evaluate the static predicates to get:
// {sni_hosts: A} => destination M
// {sni_hosts: A} => destination N
//
// The matches have the same runtime predicates. Since the second match can never be reached, we only
// want to generate config for the first match.
//
// To achieve this in this function we keep track of which runtime matches we have already generated config for
// and only add config if the we have not already generated config for that set of runtime predicates.
matchHasBeenHandled := sets.New[string]() // Runtime predicate set -> have we generated config for this set?
// Is there a virtual service with a TLS block that matches us?
hasTLSMatch := false
lb := &ListenerBuilder{node: node, push: push}
out := make([]*filterChainOpts, 0)
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tls := range virtualService.Tls {
for _, match := range tls.Match {
if matchTLS(match, node.Labels, gateways, listenPort.Port, node.Metadata.Namespace) {
// Use the service's CIDRs.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
// destinationCIDR will be empty for services with VIPs
var destinationCIDRs []string
if destinationCIDR != "" {
destinationCIDRs = []string{destinationCIDR}
}
// Only set CIDR match if the listener is bound to an IP.
// If its bound to a unix domain socket, then ignore the CIDR matches
// Unix domain socket bound ports have Port value set to 0
if len(match.DestinationSubnets) > 0 && listenPort.Port > 0 {
destinationCIDRs = match.DestinationSubnets
}
matchHash := hashRuntimeTLSMatchPredicates(match)
if !matchHasBeenHandled.Contains(matchHash) {
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadata(cfg.Meta),
sniHosts: match.SniHosts,
destinationCIDRs: destinationCIDRs,
networkFilters: lb.buildOutboundNetworkFilters(tls.Route, listenPort, cfg.Meta, false),
})
hasTLSMatch = true
}
matchHasBeenHandled.Insert(matchHash)
}
}
}
}
// HTTPS or TLS ports without associated virtual service
if !hasTLSMatch {
var sniHosts []string
// In case of a sidecar config with user defined port, if the user specified port is not the same as the
// service's port, then pick the service port if and only if the service has only one port. If service
// has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the
// traffic will most likely blackhole.
port := listenPort.Port
if len(service.Ports) == 1 {
port = service.Ports[0].Port
}
clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port)
statPrefix := clusterName
// If stat name is configured, use it to build the stat prefix.
if len(push.Mesh.OutboundClusterStatName) != 0 {
statPrefix = telemetry.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, 0, &service.Attributes)
}
// Use the hostname as the SNI value if and only:
// 1) if the destination is a CIDR;
// 2) or if we have an empty destination VIP (i.e. which we should never get in case some platform adapter improper handlings);
// 3) or if the destination is a wildcard destination VIP with the listener bound to the wildcard as well.
// In the above cited cases, the listener will be bound to 0.0.0.0. So SNI match is the only way to distinguish different
// target services. If we have a VIP, then we know the destination. Or if we do not have an VIP, but have
// `PILOT_ENABLE_HEADLESS_SERVICE_POD_LISTENERS` enabled (by default) and applicable to all that's needed, pilot will generate
// an outbound listener for each pod in a headless service. There is thus no need to do a SNI match. It saves us from having to
// generate expensive permutations of the host name just like RDS does..
// NOTE that we cannot have two services with the same VIP as our listener build logic will treat it as a collision and
// ignore one of the services.
svcListenAddress := service.GetAddressForProxy(node)
if strings.Contains(svcListenAddress, "/") {
// Address is a CIDR, already captured by destinationCIDR parameter.
svcListenAddress = ""
}
if len(destinationCIDR) > 0 || len(svcListenAddress) == 0 || (svcListenAddress == actualWildcard && bind == actualWildcard) {
sniHosts = []string{string(service.Hostname)}
for _, a := range service.Attributes.Aliases {
alt := GenerateAltVirtualHosts(a.Hostname.String(), 0, node.DNSDomain)
sniHosts = append(sniHosts, a.Hostname.String())
sniHosts = append(sniHosts, alt...)
}
}
destinationRule := CastDestinationRule(node.SidecarScope.DestinationRule(
model.TrafficDirectionOutbound, node, service.Hostname).GetRule())
var destinationCIDRs []string
if destinationCIDR != "" {
destinationCIDRs = []string{destinationCIDR}
}
out = append(out, &filterChainOpts{
sniHosts: sniHosts,
destinationCIDRs: destinationCIDRs,
networkFilters: lb.buildOutboundNetworkFiltersWithSingleDestination(statPrefix, clusterName, "",
listenPort, destinationRule, tunnelingconfig.Apply, false),
})
}
return out
}
func buildSidecarOutboundTCPFilterChainOpts(node *model.Proxy, push *model.PushContext, destinationCIDR string,
service *model.Service, listenPort *model.Port,
gateways sets.String, configs []config.Config,
) []*filterChainOpts {
if listenPort.Protocol.IsTLS() {
return nil
}
out := make([]*filterChainOpts, 0)
lb := &ListenerBuilder{node: node, push: push}
// very basic TCP
// break as soon as we add one network filter with no destination addresses to match
// This is the terminating condition in the filter chain match list
defaultRouteAdded := false
TcpLoop:
for _, cfg := range configs {
virtualService := cfg.Spec.(*v1alpha3.VirtualService)
for _, tcp := range virtualService.Tcp {
var destinationCIDRs []string
if destinationCIDR != "" {
destinationCIDRs = []string{destinationCIDR}
}
if len(tcp.Match) == 0 {
// implicit match
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadata(cfg.Meta),
destinationCIDRs: destinationCIDRs,
networkFilters: lb.buildOutboundNetworkFilters(tcp.Route, listenPort, cfg.Meta, false),
})
defaultRouteAdded = true
break TcpLoop
}
// Use the service's virtual address first.
// But if a virtual service overrides it with its own destination subnet match
// give preference to the user provided one
virtualServiceDestinationSubnets := make([]string, 0)
for _, match := range tcp.Match {
if matchTCP(match, node.Labels, gateways, listenPort.Port, node.Metadata.Namespace) {
// Scan all the match blocks
// if we find any match block without a runtime destination subnet match
// i.e. match any destination address, then we treat it as the terminal match/catch all match
// and break out of the loop. We also treat it as a terminal match if the listener is bound
// to a unix domain socket.
// But if we find only runtime destination subnet matches in all match blocks, collect them
// (this is similar to virtual hosts in http) and create filter chain match accordingly.
if len(match.DestinationSubnets) == 0 || listenPort.Port == 0 {
out = append(out, &filterChainOpts{
metadata: util.BuildConfigInfoMetadata(cfg.Meta),
destinationCIDRs: destinationCIDRs,
networkFilters: lb.buildOutboundNetworkFilters(tcp.Route, listenPort, cfg.Meta, false),
})
defaultRouteAdded = true
break TcpLoop
}
virtualServiceDestinationSubnets = append(virtualServiceDestinationSubnets, match.DestinationSubnets...)
}
}
if len(virtualServiceDestinationSubnets) > 0 {
out = append(out, &filterChainOpts{
destinationCIDRs: virtualServiceDestinationSubnets,
networkFilters: lb.buildOutboundNetworkFilters(tcp.Route, listenPort, cfg.Meta, false),
})
// If at this point there is a filter chain generated with the same CIDR match as the
// one that may be generated for the service as the default route, do not generate it.
// Otherwise, Envoy will complain about having filter chains with identical matches
// and will reject the config.
sort.Strings(virtualServiceDestinationSubnets)
sort.Strings(destinationCIDRs)
if slices.Equal(virtualServiceDestinationSubnets, destinationCIDRs) {
log.Warnf("Existing filter chain with same matching CIDR: %v.", destinationCIDRs)
defaultRouteAdded = true
}
}
}
}
if !defaultRouteAdded {
// In case of a sidecar config with user defined port, if the user specified port is not the same as the
// service's port, then pick the service port if and only if the service has only one port. If service
// has multiple ports, then route to a cluster with the listener port (i.e. sidecar defined port) - the
// traffic will most likely blackhole.
port := listenPort.Port
if len(service.Ports) == 1 {
port = service.Ports[0].Port
}
clusterName := model.BuildSubsetKey(model.TrafficDirectionOutbound, "", service.Hostname, port)
statPrefix := clusterName
destinationRule := CastDestinationRule(node.SidecarScope.DestinationRule(
model.TrafficDirectionOutbound, node, service.Hostname).GetRule())
// If stat name is configured, use it to build the stat prefix.
if len(push.Mesh.OutboundClusterStatName) != 0 {
statPrefix = telemetry.BuildStatPrefix(push.Mesh.OutboundClusterStatName, string(service.Hostname), "", &model.Port{Port: port}, 0, &service.Attributes)
}
var destinationCIDRs []string
if destinationCIDR != "" {
destinationCIDRs = []string{destinationCIDR}
}
out = append(out, &filterChainOpts{
destinationCIDRs: destinationCIDRs,
networkFilters: lb.buildOutboundNetworkFiltersWithSingleDestination(statPrefix, clusterName, "",
listenPort, destinationRule, tunnelingconfig.Apply, false),
})
}
return out
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"fmt"
"net/url"
"sort"
"strconv"
opb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
tracingcfg "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
envoy_type_metadata_v3 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3"
tracing "github.com/envoyproxy/go-control-plane/envoy/type/tracing/v3"
xdstype "github.com/envoyproxy/go-control-plane/envoy/type/v3"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/wrapperspb"
meshconfig "istio.io/api/mesh/v1alpha1"
telemetrypb "istio.io/api/telemetry/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking"
authz_model "istio.io/istio/pilot/pkg/security/authz/model"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pilot/pkg/xds/requestidextension"
"istio.io/istio/pkg/bootstrap/platform"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/wellknown"
)
const (
envoyDatadog = "envoy.tracers.datadog"
envoyOpenCensus = "envoy.tracers.opencensus"
envoyOpenTelemetry = "envoy.tracers.opentelemetry"
envoySkywalking = "envoy.tracers.skywalking"
envoyZipkin = "envoy.tracers.zipkin"
)
// this is used for testing. it should not be changed in regular code.
var clusterLookupFn = model.LookupCluster
type typedConfigGenFn func() (*anypb.Any, error)
func configureTracing(
push *model.PushContext,
proxy *model.Proxy,
httpConnMgr *hcm.HttpConnectionManager,
class networking.ListenerClass,
) (bool, *requestidextension.UUIDRequestIDExtensionContext) {
tracing := push.Telemetry.Tracing(proxy)
return configureTracingFromTelemetry(tracing, push, proxy, httpConnMgr, class)
}
func configureTracingFromTelemetry(
tracing *model.TracingConfig,
push *model.PushContext,
proxy *model.Proxy,
h *hcm.HttpConnectionManager,
class networking.ListenerClass,
) (bool, *requestidextension.UUIDRequestIDExtensionContext) {
proxyCfg := proxy.Metadata.ProxyConfigOrDefault(push.Mesh.DefaultConfig)
// If there is no telemetry config defined, fallback to legacy mesh config.
if tracing == nil {
meshCfg := push.Mesh
if !meshCfg.EnableTracing {
log.Debug("No valid tracing configuration found")
return false, nil
}
// use the prior configuration bits of sampling and custom tags
h.Tracing = &hcm.HttpConnectionManager_Tracing{}
configureSampling(h.Tracing, proxyConfigSamplingValue(proxyCfg))
configureCustomTags(h.Tracing, map[string]*telemetrypb.Tracing_CustomTag{}, proxyCfg, proxy)
if proxyCfg.GetTracing().GetMaxPathTagLength() != 0 {
h.Tracing.MaxPathTagLength = wrapperspb.UInt32(proxyCfg.GetTracing().MaxPathTagLength)
}
return false, nil
}
spec := tracing.ServerSpec
if class == networking.ListenerClassSidecarOutbound || class == networking.ListenerClassGateway {
spec = tracing.ClientSpec
}
if spec.Disabled {
return false, nil
}
var startChildSpan bool
if spec.Provider != nil {
tcfg, child, err := configureFromProviderConfig(push, proxy, spec.Provider)
if err != nil {
log.Warnf("Not able to configure requested tracing provider %q: %v", spec.Provider.Name, err)
return false, nil
}
h.Tracing = tcfg
startChildSpan = child
} else {
// TODO: should this `return nil, nil` instead ?
log.Warnf("Not able to configure tracing provider. Provider lookup failed.")
h.Tracing = &hcm.HttpConnectionManager_Tracing{}
// TODO: transition to configuring providers from proxy config here?
// something like: configureFromProxyConfig(tracingCfg, opts.proxy.Metadata.ProxyConfig.Tracing)
}
var sampling float64
if spec.RandomSamplingPercentage != nil {
sampling = *spec.RandomSamplingPercentage
} else {
// gracefully fallback to MeshConfig configuration. It will act as an implicit
// parent configuration during transition period.
sampling = proxyConfigSamplingValue(proxyCfg)
}
configureSampling(h.Tracing, sampling)
configureCustomTags(h.Tracing, spec.CustomTags, proxyCfg, proxy)
// if there is configured max tag length somewhere, fallback to it.
if h.GetTracing().GetMaxPathTagLength() == nil && proxyCfg.GetTracing().GetMaxPathTagLength() != 0 {
h.Tracing.MaxPathTagLength = wrapperspb.UInt32(proxyCfg.GetTracing().MaxPathTagLength)
}
reqIDExtension := &requestidextension.UUIDRequestIDExtensionContext{}
reqIDExtension.UseRequestIDForTraceSampling = spec.UseRequestIDForTraceSampling
return startChildSpan, reqIDExtension
}
// configureFromProviderConfigHandled contains the number of providers we handle below.
// This is to ensure this stays in sync as new handlers are added
// STOP. DO NOT UPDATE THIS WITHOUT UPDATING configureFromProviderConfig.
const configureFromProviderConfigHandled = 14
func configureFromProviderConfig(pushCtx *model.PushContext, proxy *model.Proxy,
providerCfg *meshconfig.MeshConfig_ExtensionProvider,
) (*hcm.HttpConnectionManager_Tracing, bool, error) {
startChildSpan := false
var serviceCluster string
var maxTagLength uint32
var providerConfig typedConfigGenFn
var providerName string
if proxy.XdsNode != nil {
serviceCluster = proxy.XdsNode.Cluster
}
switch provider := providerCfg.Provider.(type) {
case *meshconfig.MeshConfig_ExtensionProvider_Zipkin:
maxTagLength = provider.Zipkin.GetMaxTagLength()
providerName = envoyZipkin
providerConfig = func() (*anypb.Any, error) {
hostname, cluster, err := clusterLookupFn(pushCtx, provider.Zipkin.GetService(), int(provider.Zipkin.GetPort()))
if err != nil {
model.IncLookupClusterFailures("zipkin")
return nil, fmt.Errorf("could not find cluster for tracing provider %q: %v", provider, err)
}
return zipkinConfig(hostname, cluster, !provider.Zipkin.GetEnable_64BitTraceId())
}
case *meshconfig.MeshConfig_ExtensionProvider_Datadog:
maxTagLength = provider.Datadog.GetMaxTagLength()
providerName = envoyDatadog
providerConfig = func() (*anypb.Any, error) {
hostname, cluster, err := clusterLookupFn(pushCtx, provider.Datadog.GetService(), int(provider.Datadog.GetPort()))
if err != nil {
model.IncLookupClusterFailures("datadog")
return nil, fmt.Errorf("could not find cluster for tracing provider %q: %v", provider, err)
}
return datadogConfig(serviceCluster, hostname, cluster)
}
case *meshconfig.MeshConfig_ExtensionProvider_Lightstep:
//nolint: staticcheck // Lightstep deprecated
maxTagLength = provider.Lightstep.GetMaxTagLength()
providerName = envoyOpenTelemetry
//nolint: staticcheck // Lightstep deprecated
providerConfig = func() (*anypb.Any, error) {
hostname, clusterName, err := clusterLookupFn(pushCtx, provider.Lightstep.GetService(), int(provider.Lightstep.GetPort()))
if err != nil {
model.IncLookupClusterFailures("lightstep")
return nil, fmt.Errorf("could not find cluster for tracing provider %q: %v", provider, err)
}
return otelLightStepConfig(clusterName, hostname, provider.Lightstep.GetAccessToken())
}
case *meshconfig.MeshConfig_ExtensionProvider_Opencensus:
//nolint: staticcheck
maxTagLength = provider.Opencensus.GetMaxTagLength()
providerName = envoyOpenCensus
providerConfig = func() (*anypb.Any, error) {
//nolint: staticcheck
return opencensusConfig(provider.Opencensus)
}
case *meshconfig.MeshConfig_ExtensionProvider_Skywalking:
maxTagLength = 0
providerName = envoySkywalking
providerConfig = func() (*anypb.Any, error) {
hostname, clusterName, err := clusterLookupFn(pushCtx, provider.Skywalking.GetService(), int(provider.Skywalking.GetPort()))
if err != nil {
model.IncLookupClusterFailures("skywalking")
return nil, fmt.Errorf("could not find cluster for tracing provider %q: %v", provider, err)
}
return skywalkingConfig(clusterName, hostname)
}
startChildSpan = true
case *meshconfig.MeshConfig_ExtensionProvider_Stackdriver:
maxTagLength = provider.Stackdriver.GetMaxTagLength()
providerName = envoyOpenCensus
providerConfig = func() (*anypb.Any, error) {
return stackdriverConfig(proxy.Metadata, provider.Stackdriver)
}
case *meshconfig.MeshConfig_ExtensionProvider_Opentelemetry:
maxTagLength = provider.Opentelemetry.GetMaxTagLength()
providerName = envoyOpenTelemetry
providerConfig = func() (*anypb.Any, error) {
hostname, clusterName, err := clusterLookupFn(pushCtx, provider.Opentelemetry.GetService(), int(provider.Opentelemetry.GetPort()))
if err != nil {
model.IncLookupClusterFailures("opentelemetry")
return nil, fmt.Errorf("could not find cluster for tracing provider %q: %v", provider, err)
}
return otelConfig(serviceCluster, hostname, clusterName, provider.Opentelemetry)
}
// Providers without any tracing support
// Explicitly list to be clear what does and does not support tracing
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyExtAuthzHttp,
*meshconfig.MeshConfig_ExtensionProvider_EnvoyExtAuthzGrpc,
*meshconfig.MeshConfig_ExtensionProvider_EnvoyHttpAls,
*meshconfig.MeshConfig_ExtensionProvider_EnvoyTcpAls,
*meshconfig.MeshConfig_ExtensionProvider_EnvoyOtelAls,
*meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLog,
*meshconfig.MeshConfig_ExtensionProvider_Prometheus:
return nil, false, fmt.Errorf("provider %T does not support tracing", provider)
// Should never happen, but just in case we forget to add one
default:
return nil, false, fmt.Errorf("provider %T does not support tracing", provider)
}
tracing, err := buildHCMTracing(providerName, maxTagLength, providerConfig)
return tracing, startChildSpan, err
}
func zipkinConfig(hostname, cluster string, enable128BitTraceID bool) (*anypb.Any, error) {
zc := &tracingcfg.ZipkinConfig{
CollectorCluster: cluster,
CollectorEndpoint: "/api/v2/spans", // envoy deprecated v1 support
CollectorEndpointVersion: tracingcfg.ZipkinConfig_HTTP_JSON, // use v2 JSON for now
CollectorHostname: hostname, // http host header
TraceId_128Bit: enable128BitTraceID, // istio default enable 128 bit trace id
SharedSpanContext: wrapperspb.Bool(false),
}
return protoconv.MessageToAnyWithError(zc)
}
func datadogConfig(serviceName, hostname, cluster string) (*anypb.Any, error) {
dc := &tracingcfg.DatadogConfig{
CollectorCluster: cluster,
ServiceName: serviceName,
CollectorHostname: hostname,
}
return protoconv.MessageToAnyWithError(dc)
}
func otelConfig(serviceName, hostname, cluster string, otelProvider *meshconfig.MeshConfig_ExtensionProvider_OpenTelemetryTracingProvider) (*anypb.Any, error) {
var oc *tracingcfg.OpenTelemetryConfig
if otelProvider.GetHttp() == nil {
// export via gRPC
oc = &tracingcfg.OpenTelemetryConfig{
GrpcService: &core.GrpcService{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{
ClusterName: cluster,
Authority: hostname,
},
},
},
}
} else {
// export via HTTP
httpService := otelProvider.GetHttp()
te, err := url.JoinPath(hostname, httpService.GetPath())
if err != nil {
return nil, fmt.Errorf("could not parse otlp/http traces endpoint: %v", err)
}
oc = &tracingcfg.OpenTelemetryConfig{
HttpService: &core.HttpService{
HttpUri: &core.HttpUri{
Uri: te,
HttpUpstreamType: &core.HttpUri_Cluster{
Cluster: cluster,
},
Timeout: httpService.GetTimeout(),
},
},
}
for _, h := range httpService.GetHeaders() {
hvo := &core.HeaderValueOption{
AppendAction: core.HeaderValueOption_OVERWRITE_IF_EXISTS_OR_ADD,
Header: &core.HeaderValue{
Key: h.GetName(),
Value: h.GetValue(),
},
}
oc.GetHttpService().RequestHeadersToAdd = append(oc.GetHttpService().GetRequestHeadersToAdd(), hvo)
}
}
oc.ServiceName = serviceName
return anypb.New(oc)
}
func opencensusConfig(opencensusProvider *meshconfig.MeshConfig_ExtensionProvider_OpenCensusAgentTracingProvider) (*anypb.Any, error) {
oc := &tracingcfg.OpenCensusConfig{
OcagentAddress: fmt.Sprintf("%s:%d", opencensusProvider.GetService(), opencensusProvider.GetPort()),
OcagentExporterEnabled: true,
// this is incredibly dangerous for proxy stability, as switching provider config for OC providers
// is not allowed during the lifetime of a proxy.
IncomingTraceContext: convert(opencensusProvider.GetContext()),
OutgoingTraceContext: convert(opencensusProvider.GetContext()),
}
return protoconv.MessageToAnyWithError(oc)
}
func stackdriverConfig(proxyMetaData *model.NodeMetadata, sdProvider *meshconfig.MeshConfig_ExtensionProvider_StackdriverProvider) (*anypb.Any, error) {
proj, ok := proxyMetaData.PlatformMetadata[platform.GCPProject]
if !ok {
proj, ok = proxyMetaData.PlatformMetadata[platform.GCPProjectNumber]
}
if !ok {
return nil, fmt.Errorf("could not configure Stackdriver tracer - unknown project id")
}
sd := &tracingcfg.OpenCensusConfig{
StackdriverExporterEnabled: true,
StackdriverProjectId: proj,
IncomingTraceContext: allContexts,
OutgoingTraceContext: allContexts,
// supporting dynamic control is considered harmful, as OC can only be configured once per lifetime
StdoutExporterEnabled: false,
TraceConfig: &opb.TraceConfig{
MaxNumberOfAnnotations: 200,
MaxNumberOfAttributes: 200,
MaxNumberOfMessageEvents: 200,
},
}
if proxyMetaData.StsPort != "" {
stsPort, err := strconv.Atoi(proxyMetaData.StsPort)
if err != nil || stsPort < 1 {
return nil, fmt.Errorf("could not configure Stackdriver tracer - bad sts port: %v", err)
}
tokenPath := constants.TrustworthyJWTPath
// nolint: staticcheck
sd.StackdriverGrpcService = &core.GrpcService{
InitialMetadata: []*core.HeaderValue{
{
Key: "x-goog-user-project",
Value: proj,
},
},
TargetSpecifier: &core.GrpcService_GoogleGrpc_{
GoogleGrpc: &core.GrpcService_GoogleGrpc{
TargetUri: "cloudtrace.googleapis.com",
StatPrefix: "oc_stackdriver_tracer",
ChannelCredentials: &core.GrpcService_GoogleGrpc_ChannelCredentials{
CredentialSpecifier: &core.GrpcService_GoogleGrpc_ChannelCredentials_SslCredentials{
SslCredentials: &core.GrpcService_GoogleGrpc_SslCredentials{},
},
},
CallCredentials: []*core.GrpcService_GoogleGrpc_CallCredentials{
{
CredentialSpecifier: &core.GrpcService_GoogleGrpc_CallCredentials_StsService_{
StsService: &core.GrpcService_GoogleGrpc_CallCredentials_StsService{
TokenExchangeServiceUri: fmt.Sprintf("http://localhost:%d/token", stsPort),
SubjectTokenPath: tokenPath,
SubjectTokenType: "urn:ietf:params:oauth:token-type:jwt",
Scope: "https://www.googleapis.com/auth/cloud-platform",
},
},
},
},
},
},
}
}
// supporting dynamic control is considered harmful, as OC can only be configured once per lifetime
// so, we should not allow dynamic control based on provider configuration of the following params:
// - max number of annotations
// - max number of attributes
// - max number of message events
// The following code block allows control for a single configuration once during the lifecycle of a
// mesh.
// nolint: staticcheck
if sdProvider.GetMaxNumberOfAnnotations() != nil {
sd.TraceConfig.MaxNumberOfAnnotations = sdProvider.GetMaxNumberOfAnnotations().GetValue()
}
// nolint: staticcheck
if sdProvider.GetMaxNumberOfAttributes() != nil {
sd.TraceConfig.MaxNumberOfAttributes = sdProvider.GetMaxNumberOfAttributes().GetValue()
}
// nolint: staticcheck
if sdProvider.GetMaxNumberOfMessageEvents() != nil {
sd.TraceConfig.MaxNumberOfMessageEvents = sdProvider.GetMaxNumberOfMessageEvents().GetValue()
}
return protoconv.MessageToAnyWithError(sd)
}
func skywalkingConfig(clusterName, hostname string) (*anypb.Any, error) {
s := &tracingcfg.SkyWalkingConfig{
GrpcService: &core.GrpcService{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{
ClusterName: clusterName,
Authority: hostname,
},
},
},
}
return protoconv.MessageToAnyWithError(s)
}
func otelLightStepConfig(clusterName, hostname, accessToken string) (*anypb.Any, error) {
dc := &tracingcfg.OpenTelemetryConfig{
GrpcService: &core.GrpcService{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{
ClusterName: clusterName,
Authority: hostname,
},
},
InitialMetadata: []*core.HeaderValue{
{
Key: "lightstep-access-token",
Value: accessToken,
},
},
},
}
return anypb.New(dc)
}
func buildHCMTracing(provider string, maxTagLen uint32, anyFn typedConfigGenFn) (*hcm.HttpConnectionManager_Tracing, error) {
config := &hcm.HttpConnectionManager_Tracing{}
cfg, err := anyFn()
if err != nil {
return config, fmt.Errorf("could not configure tracing provider %q: %v", provider, err)
}
config.Provider = &tracingcfg.Tracing_Http{
Name: provider,
ConfigType: &tracingcfg.Tracing_Http_TypedConfig{TypedConfig: cfg},
}
if maxTagLen != 0 {
config.MaxPathTagLength = &wrapperspb.UInt32Value{Value: maxTagLen}
}
return config, nil
}
var allContexts = []tracingcfg.OpenCensusConfig_TraceContext{
tracingcfg.OpenCensusConfig_B3,
tracingcfg.OpenCensusConfig_CLOUD_TRACE_CONTEXT,
tracingcfg.OpenCensusConfig_GRPC_TRACE_BIN,
tracingcfg.OpenCensusConfig_TRACE_CONTEXT,
}
func convert(ctxs []meshconfig.MeshConfig_ExtensionProvider_OpenCensusAgentTracingProvider_TraceContext) []tracingcfg.OpenCensusConfig_TraceContext {
if len(ctxs) == 0 {
return allContexts
}
converted := make([]tracingcfg.OpenCensusConfig_TraceContext, 0, len(ctxs))
for _, c := range ctxs {
switch c {
case meshconfig.MeshConfig_ExtensionProvider_OpenCensusAgentTracingProvider_B3:
converted = append(converted, tracingcfg.OpenCensusConfig_B3)
case meshconfig.MeshConfig_ExtensionProvider_OpenCensusAgentTracingProvider_CLOUD_TRACE_CONTEXT:
converted = append(converted, tracingcfg.OpenCensusConfig_CLOUD_TRACE_CONTEXT)
case meshconfig.MeshConfig_ExtensionProvider_OpenCensusAgentTracingProvider_GRPC_BIN:
converted = append(converted, tracingcfg.OpenCensusConfig_GRPC_TRACE_BIN)
case meshconfig.MeshConfig_ExtensionProvider_OpenCensusAgentTracingProvider_W3C_TRACE_CONTEXT:
converted = append(converted, tracingcfg.OpenCensusConfig_TRACE_CONTEXT)
}
}
return converted
}
func dryRunPolicyTraceTag(name, key string) *tracing.CustomTag {
// The tag will not be populated when not used as there is no default value set for the tag.
// See https://www.envoyproxy.io/docs/envoy/v1.17.1/configuration/http/http_filters/rbac_filter#dynamic-metadata.
return &tracing.CustomTag{
Tag: name,
Type: &tracing.CustomTag_Metadata_{
Metadata: &tracing.CustomTag_Metadata{
Kind: &envoy_type_metadata_v3.MetadataKind{
Kind: &envoy_type_metadata_v3.MetadataKind_Request_{
Request: &envoy_type_metadata_v3.MetadataKind_Request{},
},
},
MetadataKey: &envoy_type_metadata_v3.MetadataKey{
Key: wellknown.HTTPRoleBasedAccessControl,
Path: []*envoy_type_metadata_v3.MetadataKey_PathSegment{
{
Segment: &envoy_type_metadata_v3.MetadataKey_PathSegment_Key{
Key: key,
},
},
},
},
},
},
}
}
var optionalPolicyTags = []*tracing.CustomTag{
dryRunPolicyTraceTag("istio.authorization.dry_run.allow_policy.name", authz_model.RBACShadowRulesAllowStatPrefix+authz_model.RBACShadowEffectivePolicyID),
dryRunPolicyTraceTag("istio.authorization.dry_run.allow_policy.result", authz_model.RBACShadowRulesAllowStatPrefix+authz_model.RBACShadowEngineResult),
dryRunPolicyTraceTag("istio.authorization.dry_run.deny_policy.name", authz_model.RBACShadowRulesDenyStatPrefix+authz_model.RBACShadowEffectivePolicyID),
dryRunPolicyTraceTag("istio.authorization.dry_run.deny_policy.result", authz_model.RBACShadowRulesDenyStatPrefix+authz_model.RBACShadowEngineResult),
}
func buildServiceTags(metadata *model.NodeMetadata, labels map[string]string) []*tracing.CustomTag {
var revision, service string
if labels != nil {
revision = labels["service.istio.io/canonical-revision"]
service = labels["service.istio.io/canonical-name"]
}
if revision == "" {
revision = "latest"
}
// TODO: This should have been properly handled with the injector.
if service == "" {
service = "unknown"
}
meshID := metadata.MeshID
if meshID == "" {
meshID = "unknown"
}
namespace := metadata.Namespace
if namespace == "" {
namespace = "default"
}
clusterID := string(metadata.ClusterID)
if clusterID == "" {
clusterID = "unknown"
}
return []*tracing.CustomTag{
{
Tag: "istio.canonical_revision",
Type: &tracing.CustomTag_Literal_{
Literal: &tracing.CustomTag_Literal{
Value: revision,
},
},
},
{
Tag: "istio.canonical_service",
Type: &tracing.CustomTag_Literal_{
Literal: &tracing.CustomTag_Literal{
Value: service,
},
},
},
{
Tag: "istio.mesh_id",
Type: &tracing.CustomTag_Literal_{
Literal: &tracing.CustomTag_Literal{
Value: meshID,
},
},
},
{
Tag: "istio.namespace",
Type: &tracing.CustomTag_Literal_{
Literal: &tracing.CustomTag_Literal{
Value: namespace,
},
},
},
{
Tag: "istio.cluster_id",
Type: &tracing.CustomTag_Literal_{
Literal: &tracing.CustomTag_Literal{
Value: clusterID,
},
},
},
}
}
func configureSampling(hcmTracing *hcm.HttpConnectionManager_Tracing, providerPercentage float64) {
hcmTracing.ClientSampling = &xdstype.Percent{
Value: 100.0,
}
hcmTracing.OverallSampling = &xdstype.Percent{
Value: 100.0,
}
hcmTracing.RandomSampling = &xdstype.Percent{
Value: providerPercentage,
}
}
func proxyConfigSamplingValue(config *meshconfig.ProxyConfig) float64 {
sampling := features.TraceSampling
if config.Tracing != nil && config.Tracing.Sampling != 0.0 {
sampling = config.Tracing.Sampling
if sampling > 100.0 {
sampling = 1.0
}
}
return sampling
}
func configureCustomTags(hcmTracing *hcm.HttpConnectionManager_Tracing,
providerTags map[string]*telemetrypb.Tracing_CustomTag, proxyCfg *meshconfig.ProxyConfig, node *model.Proxy,
) {
tags := append(buildServiceTags(node.Metadata, node.Labels), optionalPolicyTags...)
if len(providerTags) == 0 {
tags = append(tags, buildCustomTagsFromProxyConfig(proxyCfg.GetTracing().GetCustomTags())...)
} else {
tags = append(tags, buildCustomTagsFromProvider(providerTags)...)
}
// looping over customTags, a map, results in the returned value
// being non-deterministic when multiple tags were defined; sort by the tag name
// to rectify this
sort.Slice(tags, func(i, j int) bool {
return tags[i].Tag < tags[j].Tag
})
hcmTracing.CustomTags = tags
}
func buildCustomTagsFromProvider(providerTags map[string]*telemetrypb.Tracing_CustomTag) []*tracing.CustomTag {
var tags []*tracing.CustomTag
for tagName, tagInfo := range providerTags {
if tagInfo == nil {
log.Warnf("while building custom tags from provider, encountered nil custom tag: %s, skipping", tagName)
continue
}
switch tag := tagInfo.Type.(type) {
case *telemetrypb.Tracing_CustomTag_Environment:
env := &tracing.CustomTag{
Tag: tagName,
Type: &tracing.CustomTag_Environment_{
Environment: &tracing.CustomTag_Environment{
Name: tag.Environment.Name,
DefaultValue: tag.Environment.DefaultValue,
},
},
}
tags = append(tags, env)
case *telemetrypb.Tracing_CustomTag_Header:
header := &tracing.CustomTag{
Tag: tagName,
Type: &tracing.CustomTag_RequestHeader{
RequestHeader: &tracing.CustomTag_Header{
Name: tag.Header.Name,
DefaultValue: tag.Header.DefaultValue,
},
},
}
tags = append(tags, header)
case *telemetrypb.Tracing_CustomTag_Literal:
env := &tracing.CustomTag{
Tag: tagName,
Type: &tracing.CustomTag_Literal_{
Literal: &tracing.CustomTag_Literal{
Value: tag.Literal.Value,
},
},
}
tags = append(tags, env)
}
}
return tags
}
func buildCustomTagsFromProxyConfig(customTags map[string]*meshconfig.Tracing_CustomTag) []*tracing.CustomTag {
var tags []*tracing.CustomTag
for tagName, tagInfo := range customTags {
if tagInfo == nil {
log.Warnf("while building custom tags from proxyConfig, encountered nil custom tag: %s, skipping", tagName)
continue
}
switch tag := tagInfo.Type.(type) {
case *meshconfig.Tracing_CustomTag_Environment:
env := &tracing.CustomTag{
Tag: tagName,
Type: &tracing.CustomTag_Environment_{
Environment: &tracing.CustomTag_Environment{
Name: tag.Environment.Name,
DefaultValue: tag.Environment.DefaultValue,
},
},
}
tags = append(tags, env)
case *meshconfig.Tracing_CustomTag_Header:
header := &tracing.CustomTag{
Tag: tagName,
Type: &tracing.CustomTag_RequestHeader{
RequestHeader: &tracing.CustomTag_Header{
Name: tag.Header.Name,
DefaultValue: tag.Header.DefaultValue,
},
},
}
tags = append(tags, header)
case *meshconfig.Tracing_CustomTag_Literal:
env := &tracing.CustomTag{
Tag: tagName,
Type: &tracing.CustomTag_Literal_{
Literal: &tracing.CustomTag_Literal{
Value: tag.Literal.Value,
},
},
}
tags = append(tags, env)
}
}
return tags
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tunnelingconfig
import (
"net"
"strconv"
tcp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
networking "istio.io/api/networking/v1alpha3"
)
type ApplyFunc = func(tcpProxy *tcp.TcpProxy, destinationRule *networking.DestinationRule, subsetName string)
// Apply configures tunneling_config in a given TcpProxy depending on the destination rule and the destination hosts
var Apply ApplyFunc = func(tcpProxy *tcp.TcpProxy, destinationRule *networking.DestinationRule, subsetName string) {
var tunnelSettings *networking.TrafficPolicy_TunnelSettings
if subsetName != "" {
for _, s := range destinationRule.GetSubsets() {
if s.Name == subsetName {
tunnelSettings = s.GetTrafficPolicy().GetTunnel()
break
}
}
} else {
tunnelSettings = destinationRule.GetTrafficPolicy().GetTunnel()
}
if tunnelSettings == nil {
return
}
tcpProxy.TunnelingConfig = &tcp.TcpProxy_TunnelingConfig{
Hostname: net.JoinHostPort(tunnelSettings.GetTargetHost(), strconv.Itoa(int(tunnelSettings.GetTargetPort()))),
UsePost: tunnelSettings.Protocol == "POST",
}
}
// Skip has no effect; its only purpose is to avoid passing nil values for ApplyFunc arguments
// when it is not desired to apply `tunneling_config` to a listener, e.g. AUTO_PASSTHROUGH
var Skip ApplyFunc = func(_ *tcp.TcpProxy, _ *networking.DestinationRule, _ string) {}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha3
import (
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/util/sets"
)
const (
// ConnectTerminate is the name for the resources associated with the termination of HTTP CONNECT.
ConnectTerminate = "connect_terminate"
// MainInternalName is the name for the resources associated with the main (non-tunnel) internal listener.
MainInternalName = "main_internal"
// ConnectOriginate is the name for the resources associated with the origination of HTTP CONNECT.
ConnectOriginate = "connect_originate"
// EncapClusterName is the name of the cluster used for traffic to the connect_originate listener.
EncapClusterName = "encap"
// ConnectUpgradeType is the type of upgrade for HTTP CONNECT.
ConnectUpgradeType = "CONNECT"
)
type waypointServices struct {
services map[host.Name]*model.Service
orderedServices []*model.Service
}
// findWaypointResources returns workloads and services associated with the waypoint proxy
func findWaypointResources(node *model.Proxy, push *model.PushContext) ([]*model.WorkloadInfo, *waypointServices) {
scope := node.WaypointScope()
workloads := push.WorkloadsForWaypoint(scope)
return workloads, findWorkloadServices(workloads, push)
}
func findWorkloadServices(workloads []*model.WorkloadInfo, push *model.PushContext) *waypointServices {
wps := &waypointServices{}
for _, wl := range workloads {
for _, ns := range push.ServiceIndex.HostnameAndNamespace {
svc := ns[wl.Namespace]
if svc == nil {
continue
}
if labels.Instance(svc.Attributes.LabelSelectors).Match(wl.Labels) {
if wps.services == nil {
wps.services = map[host.Name]*model.Service{}
}
wps.services[svc.Hostname] = svc
}
}
}
services := maps.Values(wps.services)
if len(services) > 0 {
wps.orderedServices = model.SortServicesByCreationTime(services)
}
return wps
}
// filterWaypointOutboundServices is used to determine the set of outbound clusters we need to build for waypoints.
// Waypoints typically only have inbound clusters, except in cases where we have a route from
// a service owned by the waypoint to a service not owned by the waypoint.
// It looks at:
// * referencedServices: all services referenced by mesh virtual services
// * waypointServices: all services owned by this waypoint
// * all services
// We want to find any VirtualServices that are from a waypointServices to a non-waypointService
func filterWaypointOutboundServices(
referencedServices map[string]sets.String,
waypointServices map[host.Name]*model.Service,
services []*model.Service,
) []*model.Service {
outboundServices := sets.New[string]()
for waypointService := range waypointServices {
refs := referencedServices[waypointService.String()]
for ref := range refs {
// We reference this service. Is it "inbound" for the waypoint or "outbound"?
ws, f := waypointServices[host.Name(ref)]
if !f || ws.MeshExternal {
outboundServices.Insert(ref)
}
}
}
res := make([]*model.Service, 0, len(outboundServices))
for _, s := range services {
if outboundServices.Contains(s.Hostname.String()) {
res = append(res, s)
}
}
return res
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcgen
import (
"fmt"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
corexds "istio.io/istio/pilot/pkg/networking/core/v1alpha3"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/util/sets"
)
// BuildClusters handles a gRPC CDS request, used with the 'ApiListener' style of requests.
// The main difference is that the request includes Resources to filter.
func (g *GrpcConfigGenerator) BuildClusters(node *model.Proxy, push *model.PushContext, names []string) model.Resources {
filter := newClusterFilter(names)
clusters := make([]*cluster.Cluster, 0, len(names))
for defaultClusterName, subsetFilter := range filter {
builder, err := newClusterBuilder(node, push, defaultClusterName, subsetFilter)
if err != nil {
log.Warn(err)
continue
}
clusters = append(clusters, builder.build()...)
}
resp := make(model.Resources, 0, len(clusters))
for _, c := range clusters {
resp = append(resp, &discovery.Resource{
Name: c.Name,
Resource: protoconv.MessageToAny(c),
})
}
if len(resp) == 0 && len(names) == 0 {
log.Warnf("did not generate any cds for %s; no names provided", node.ID)
}
return resp
}
// newClusterFilter maps a non-subset cluster name to the list of actual cluster names (default or subset) actually
// requested by the client. gRPC will usually request a group of clusters that are used in the same route; in some
// cases this means subsets associated with the same default cluster aren't all expected in the same CDS response.
func newClusterFilter(names []string) map[string]sets.String {
filter := map[string]sets.String{}
for _, name := range names {
dir, _, hn, p := model.ParseSubsetKey(name)
defaultKey := model.BuildSubsetKey(dir, "", hn, p)
sets.InsertOrNew(filter, defaultKey, name)
}
return filter
}
// clusterBuilder is responsible for building a single default and subset clusters for a service
// TODO re-use the v1alpha3.ClusterBuilder:
// Most of the logic is similar, I think we can just share the code if we expose:
// * BuildSubsetCluster
// * BuildDefaultCluster
// * BuildClusterOpts and members
// * Add something to allow us to override how tlscontext is built
type clusterBuilder struct {
push *model.PushContext
node *model.Proxy
// guaranteed to be set in init
defaultClusterName string
hostname host.Name
portNum int
// may not be set
svc *model.Service
port *model.Port
filter sets.String
}
func newClusterBuilder(node *model.Proxy, push *model.PushContext, defaultClusterName string, filter sets.String) (*clusterBuilder, error) {
_, _, hostname, portNum := model.ParseSubsetKey(defaultClusterName)
if hostname == "" || portNum == 0 {
return nil, fmt.Errorf("failed parsing subset key: %s", defaultClusterName)
}
// try to resolve the service and port
var port *model.Port
svc := push.ServiceForHostname(node, hostname)
if svc == nil {
return nil, fmt.Errorf("cds gen for %s: did not find service for cluster %s", node.ID, defaultClusterName)
}
port, ok := svc.Ports.GetByPort(portNum)
if !ok {
return nil, fmt.Errorf("cds gen for %s: did not find port %d in service for cluster %s", node.ID, portNum, defaultClusterName)
}
return &clusterBuilder{
node: node,
push: push,
defaultClusterName: defaultClusterName,
hostname: hostname,
portNum: portNum,
filter: filter,
svc: svc,
port: port,
}, nil
}
func (b *clusterBuilder) build() []*cluster.Cluster {
var defaultCluster *cluster.Cluster
if b.filter.Contains(b.defaultClusterName) {
defaultCluster = edsCluster(b.defaultClusterName)
if b.svc.Attributes.Labels[features.PersistentSessionLabel] != "" {
// see core/v1alpha3/cluster.go
defaultCluster.CommonLbConfig.OverrideHostStatus = &core.HealthStatusSet{
Statuses: []core.HealthStatus{
core.HealthStatus_HEALTHY,
core.HealthStatus_DRAINING, core.HealthStatus_UNKNOWN, core.HealthStatus_DEGRADED,
},
}
}
}
subsetClusters := b.applyDestinationRule(defaultCluster)
out := make([]*cluster.Cluster, 0, 1+len(subsetClusters))
if defaultCluster != nil {
out = append(out, defaultCluster)
}
return append(out, subsetClusters...)
}
// edsCluster creates a simple cluster to read endpoints from ads/eds.
func edsCluster(name string) *cluster.Cluster {
return &cluster.Cluster{
Name: name,
ClusterDiscoveryType: &cluster.Cluster_Type{Type: cluster.Cluster_EDS},
EdsClusterConfig: &cluster.Cluster_EdsClusterConfig{
ServiceName: name,
EdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_Ads{
Ads: &core.AggregatedConfigSource{},
},
},
},
}
}
// applyDestinationRule mutates the default cluster to reflect traffic policies, and returns a set of additional
// subset clusters if specified by a destination rule
func (b *clusterBuilder) applyDestinationRule(defaultCluster *cluster.Cluster) (subsetClusters []*cluster.Cluster) {
if b.svc == nil || b.port == nil {
return nil
}
// resolve policy from context
destinationRule := corexds.CastDestinationRule(b.node.SidecarScope.DestinationRule(
model.TrafficDirectionOutbound, b.node, b.svc.Hostname).GetRule())
trafficPolicy, _ := util.GetPortLevelTrafficPolicy(destinationRule.GetTrafficPolicy(), b.port)
// setup default cluster
b.applyTrafficPolicy(defaultCluster, trafficPolicy)
// subset clusters
if len(destinationRule.GetSubsets()) > 0 {
subsetClusters = make([]*cluster.Cluster, 0, len(destinationRule.GetSubsets()))
for _, subset := range destinationRule.GetSubsets() {
subsetKey := subsetClusterKey(subset.Name, string(b.hostname), b.portNum)
if !b.filter.Contains(subsetKey) {
continue
}
c := edsCluster(subsetKey)
trafficPolicy := util.MergeSubsetTrafficPolicy(trafficPolicy, subset.TrafficPolicy, b.port)
b.applyTrafficPolicy(c, trafficPolicy)
subsetClusters = append(subsetClusters, c)
}
}
return
}
// applyTrafficPolicy mutates the give cluster (if not-nil) so that the given merged traffic policy applies.
func (b *clusterBuilder) applyTrafficPolicy(c *cluster.Cluster, trafficPolicy *networking.TrafficPolicy) {
// cluster can be nil if it wasn't requested
if c == nil {
return
}
b.applyTLS(c, trafficPolicy)
b.applyLoadBalancing(c, trafficPolicy)
// TODO status or log when unsupported features are included
}
func (b *clusterBuilder) applyLoadBalancing(c *cluster.Cluster, policy *networking.TrafficPolicy) {
switch policy.GetLoadBalancer().GetSimple() {
case networking.LoadBalancerSettings_ROUND_ROBIN, networking.LoadBalancerSettings_UNSPECIFIED:
// ok
default:
log.Warnf("cannot apply LbPolicy %s to %s", policy.LoadBalancer.GetSimple(), b.node.ID)
}
corexds.ApplyRingHashLoadBalancer(c, policy.GetLoadBalancer())
}
func (b *clusterBuilder) applyTLS(c *cluster.Cluster, policy *networking.TrafficPolicy) {
// TODO for now, we leave mTLS *off* by default:
// 1. We don't know if the client uses xds.NewClientCredentials; these settings will be ignored if not
// 2. We cannot reach servers in PERMISSIVE mode; gRPC doesn't allow us to override the alpn to one of Istio's
// 3. Once we support gRPC servers, we have no good way to detect if a server is implemented with xds.NewGrpcServer and will actually support our config
// For these reasons, support only explicit tls configuration.
switch policy.GetTls().GetMode() {
case networking.ClientTLSSettings_DISABLE:
// nothing to do
case networking.ClientTLSSettings_SIMPLE:
// TODO support this
case networking.ClientTLSSettings_MUTUAL:
// TODO support this
case networking.ClientTLSSettings_ISTIO_MUTUAL:
tlsCtx := buildUpstreamTLSContext(b.push.ServiceAccounts(b.hostname, b.svc.Attributes.Namespace))
c.TransportSocket = &core.TransportSocket{
Name: transportSocketName,
ConfigType: &core.TransportSocket_TypedConfig{TypedConfig: protoconv.MessageToAny(tlsCtx)},
}
}
}
// TransportSocket proto message has a `name` field which is expected to be set to exactly this value by the
// management server (see grpc/xds/internal/client/xds.go securityConfigFromCluster).
const transportSocketName = "envoy.transport_sockets.tls"
func buildUpstreamTLSContext(sans []string) *tls.UpstreamTlsContext {
return &tls.UpstreamTlsContext{
CommonTlsContext: buildCommonTLSContext(sans),
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcgen
import (
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/config/host"
istiolog "istio.io/istio/pkg/log"
)
// Support generation of 'ApiListener' LDS responses, used for native support of gRPC.
// The same response can also be used by other apps using XDS directly.
// GRPC proposal:
// https://github.com/grpc/proposal/blob/master/A27-xds-global-load-balancing.md
//
// Note that this implementation is tested against gRPC, but it is generic - any other framework can
// use this XDS mode to get load balancing info from Istio, including MC/VM/etc.
// The corresponding RDS response is also generated - currently gRPC has special differences
// and can't understand normal Istio RDS - in particular expects "" instead of "/" as
// default prefix, and is expects just the route for one host.
// handleAck will detect if the message is an ACK or NACK, and update/log/count
// using the generic structures. "Classical" CDS/LDS/RDS/EDS use separate logic -
// this is used for the API-based LDS and generic messages.
var log = istiolog.RegisterScope("grpcgen", "xDS Generator for Proxyless gRPC")
type GrpcConfigGenerator struct{}
func clusterKey(hostname string, port int) string {
return subsetClusterKey("", hostname, port)
}
func subsetClusterKey(subset, hostname string, port int) string {
return model.BuildSubsetKey(model.TrafficDirectionOutbound, subset, host.Name(hostname), port)
}
func (g *GrpcConfigGenerator) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
switch w.TypeUrl {
case v3.ListenerType:
return g.BuildListeners(proxy, req.Push, w.ResourceNames), model.DefaultXdsLogDetails, nil
case v3.ClusterType:
return g.BuildClusters(proxy, req.Push, w.ResourceNames), model.DefaultXdsLogDetails, nil
case v3.RouteType:
return g.BuildHTTPRoutes(proxy, req.Push, w.ResourceNames), model.DefaultXdsLogDetails, nil
}
return nil, model.DefaultXdsLogDetails, nil
}
// buildCommonTLSContext creates a TLS context that assumes 'default' name, and credentials/tls/certprovider/pemfile
// (see grpc/xds/internal/client/xds.go securityConfigFromCluster).
func buildCommonTLSContext(sans []string) *tls.CommonTlsContext {
var sanMatch []*matcher.StringMatcher
if len(sans) > 0 {
sanMatch = util.StringToExactMatch(sans)
}
return &tls.CommonTlsContext{
TlsCertificateCertificateProviderInstance: &tls.CommonTlsContext_CertificateProviderInstance{
InstanceName: "default",
CertificateName: "default",
},
ValidationContextType: &tls.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &tls.CommonTlsContext_CombinedCertificateValidationContext{
ValidationContextCertificateProviderInstance: &tls.CommonTlsContext_CertificateProviderInstance{
InstanceName: "default",
CertificateName: "ROOTCA",
},
DefaultValidationContext: &tls.CertificateValidationContext{
MatchSubjectAltNames: sanMatch,
},
},
},
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcgen
import (
"fmt"
"net"
"strconv"
"strings"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
rbachttp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
"istio.io/api/label"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/security/authn"
authzmodel "istio.io/istio/pilot/pkg/security/authz/model"
"istio.io/istio/pilot/pkg/util/protoconv"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
"istio.io/istio/pkg/istio-agent/grpcxds"
"istio.io/istio/pkg/util/sets"
)
var supportedFilters = []*hcm.HttpFilter{
xdsfilters.Fault,
xdsfilters.BuildRouterFilter(xdsfilters.RouterFilterContext{
StartChildSpan: false,
SuppressDebugHeaders: false, // No need to set this to true, gRPC doesn't respect it anyways
}),
}
const (
RBACHTTPFilterName = "envoy.filters.http.rbac"
RBACHTTPFilterNameDeny = "envoy.filters.http.rbac.DENY"
)
// BuildListeners handles a LDS request, returning listeners of ApiListener type.
// The request may include a list of resource names, using the full_hostname[:port] format to select only
// specific services.
func (g *GrpcConfigGenerator) BuildListeners(node *model.Proxy, push *model.PushContext, names []string) model.Resources {
filter := newListenerNameFilter(names, node)
log.Debugf("building lds for %s with filter:\n%v", node.ID, filter)
resp := make(model.Resources, 0, len(filter))
resp = append(resp, buildOutboundListeners(node, push, filter)...)
resp = append(resp, buildInboundListeners(node, push, filter.inboundNames())...)
return resp
}
func buildInboundListeners(node *model.Proxy, push *model.PushContext, names []string) model.Resources {
if len(names) == 0 {
return nil
}
var out model.Resources
mtlsPolicy := authn.NewMtlsPolicy(push, node.Metadata.Namespace, node.Labels, node.IsWaypointProxy())
serviceInstancesByPort := map[uint32]model.ServiceTarget{}
for _, si := range node.ServiceTargets {
serviceInstancesByPort[si.Port.TargetPort] = si
}
for _, name := range names {
listenAddress := strings.TrimPrefix(name, grpcxds.ServerListenerNamePrefix)
listenHost, listenPortStr, err := net.SplitHostPort(listenAddress)
if err != nil {
log.Errorf("failed parsing address from gRPC listener name %s: %v", name, err)
continue
}
listenPort, err := strconv.Atoi(listenPortStr)
if err != nil {
log.Errorf("failed parsing port from gRPC listener name %s: %v", name, err)
continue
}
si, ok := serviceInstancesByPort[uint32(listenPort)]
if !ok {
log.Warnf("%s has no service instance for port %s", node.ID, listenPortStr)
continue
}
ll := &listener.Listener{
Name: name,
Address: &core.Address{Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
Address: listenHost,
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: uint32(listenPort),
},
},
}},
FilterChains: buildInboundFilterChains(node, push, si, mtlsPolicy),
// the following must not be set or the client will NACK
ListenerFilters: nil,
UseOriginalDst: nil,
}
// add extra addresses for the listener
extrAddresses := si.Service.GetExtraAddressesForProxy(node)
if len(extrAddresses) > 0 {
ll.AdditionalAddresses = util.BuildAdditionalAddresses(extrAddresses, uint32(listenPort))
}
out = append(out, &discovery.Resource{
Name: ll.Name,
Resource: protoconv.MessageToAny(ll),
})
}
return out
}
// nolint: unparam
func buildInboundFilterChains(node *model.Proxy, push *model.PushContext, si model.ServiceTarget, checker authn.MtlsPolicy) []*listener.FilterChain {
mode := checker.GetMutualTLSModeForPort(si.Port.TargetPort)
// auto-mtls label is set - clients will attempt to connect using mtls, and
// gRPC doesn't support permissive.
if node.Labels[label.SecurityTlsMode.Name] == "istio" && mode == model.MTLSPermissive {
mode = model.MTLSStrict
}
var tlsContext *tls.DownstreamTlsContext
if mode != model.MTLSDisable && mode != model.MTLSUnknown {
tlsContext = &tls.DownstreamTlsContext{
CommonTlsContext: buildCommonTLSContext(nil),
// TODO match_subject_alt_names field in validation context is not supported on the server
// CommonTlsContext: buildCommonTLSContext(authnplugin.TrustDomainsForValidation(push.Mesh)),
// TODO plain TLS support
RequireClientCertificate: &wrappers.BoolValue{Value: true},
}
}
if mode == model.MTLSUnknown {
log.Warnf("could not find mTLS mode for %s on %s; defaulting to DISABLE", si.Service.Hostname, node.ID)
mode = model.MTLSDisable
}
if mode == model.MTLSPermissive {
// TODO gRPC's filter chain match is super limited - only effective transport_protocol match is "raw_buffer"
// see https://github.com/grpc/proposal/blob/master/A36-xds-for-servers.md for detail
// No need to warn on each push - the behavior is still consistent with auto-mtls, which is the
// replacement for permissive.
mode = model.MTLSDisable
}
var out []*listener.FilterChain
switch mode {
case model.MTLSDisable:
out = append(out, buildInboundFilterChain(node, push, "plaintext", nil))
case model.MTLSStrict:
out = append(out, buildInboundFilterChain(node, push, "mtls", tlsContext))
// TODO permissive builts both plaintext and mtls; when tlsContext is present add a match for protocol
}
return out
}
func buildInboundFilterChain(node *model.Proxy, push *model.PushContext, nameSuffix string, tlsContext *tls.DownstreamTlsContext) *listener.FilterChain {
fc := []*hcm.HttpFilter{}
// See security/authz/builder and grpc internal/xds/rbac
// grpc supports ALLOW and DENY actions (fail if it is not one of them), so we can't use the normal generator
selectionOpts := model.WorkloadSelectionOpts{
Namespace: node.ConfigNamespace,
WorkloadLabels: node.Labels,
}
policies := push.AuthzPolicies.ListAuthorizationPolicies(selectionOpts)
if len(policies.Deny)+len(policies.Allow) > 0 {
rules := buildRBAC(node, push, nameSuffix, tlsContext, rbacpb.RBAC_DENY, policies.Deny)
if rules != nil && len(rules.Policies) > 0 {
rbac := &rbachttp.RBAC{
Rules: rules,
}
fc = append(fc,
&hcm.HttpFilter{
Name: RBACHTTPFilterNameDeny,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: protoconv.MessageToAny(rbac)},
})
}
arules := buildRBAC(node, push, nameSuffix, tlsContext, rbacpb.RBAC_ALLOW, policies.Allow)
if arules != nil && len(arules.Policies) > 0 {
rbac := &rbachttp.RBAC{
Rules: arules,
}
fc = append(fc,
&hcm.HttpFilter{
Name: RBACHTTPFilterName,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: protoconv.MessageToAny(rbac)},
})
}
}
// Must be last
fc = append(fc, xdsfilters.BuildRouterFilter(xdsfilters.RouterFilterContext{
StartChildSpan: false,
SuppressDebugHeaders: false, // No need to set this to true, gRPC doesn't respect it anyways
}))
out := &listener.FilterChain{
Name: "inbound-" + nameSuffix,
FilterChainMatch: nil,
Filters: []*listener.Filter{{
Name: "inbound-hcm" + nameSuffix,
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&hcm.HttpConnectionManager{
RouteSpecifier: &hcm.HttpConnectionManager_RouteConfig{
// https://github.com/grpc/grpc-go/issues/4924
RouteConfig: &route.RouteConfiguration{
Name: "inbound",
VirtualHosts: []*route.VirtualHost{{
Domains: []string{"*"},
Routes: []*route.Route{{
Match: &route.RouteMatch{
PathSpecifier: &route.RouteMatch_Prefix{Prefix: "/"},
},
Action: &route.Route_NonForwardingAction{},
}},
}},
},
},
HttpFilters: fc,
}),
},
}},
}
if tlsContext != nil {
out.TransportSocket = &core.TransportSocket{
Name: transportSocketName,
ConfigType: &core.TransportSocket_TypedConfig{TypedConfig: protoconv.MessageToAny(tlsContext)},
}
}
return out
}
// buildRBAC builds the RBAC config expected by gRPC.
//
// See: xds/internal/httpfilter/rbac
//
// TODO: gRPC also supports 'per route override' - not yet clear how to use it, Istio uses path expressions instead and we don't generate
// vhosts or routes for the inbound listener.
//
// For gateways it would make a lot of sense to use this concept, same for moving path prefix at top level ( more scalable, easier for users)
// This should probably be done for the v2 API.
//
// nolint: unparam
func buildRBAC(node *model.Proxy, push *model.PushContext, suffix string, context *tls.DownstreamTlsContext,
a rbacpb.RBAC_Action, policies []model.AuthorizationPolicy,
) *rbacpb.RBAC {
rules := &rbacpb.RBAC{
Action: a,
Policies: map[string]*rbacpb.Policy{},
}
for _, policy := range policies {
for i, rule := range policy.Spec.Rules {
name := fmt.Sprintf("%s-%s-%d", policy.Namespace, policy.Name, i)
m, err := authzmodel.New(rule, true)
if err != nil {
log.Warnf("Invalid rule %v: %v", rule, err)
continue
}
generated, _ := m.Generate(false, true, a)
rules.Policies[name] = generated
}
}
return rules
}
// nolint: unparam
func buildOutboundListeners(node *model.Proxy, push *model.PushContext, filter listenerNames) model.Resources {
out := make(model.Resources, 0, len(filter))
for _, sv := range node.SidecarScope.Services() {
serviceHost := string(sv.Hostname)
match, ok := filter.includes(serviceHost)
if !ok {
continue
}
// we must duplicate the listener for every requested host - grpc may have watches for both foo and foo.ns
for _, matchedHost := range sets.SortedList(match.RequestedNames) {
for _, p := range sv.Ports {
sPort := strconv.Itoa(p.Port)
if !match.includesPort(sPort) {
continue
}
filters := supportedFilters
if sessionFilter := util.BuildStatefulSessionFilter(sv); sessionFilter != nil {
filters = append([]*hcm.HttpFilter{sessionFilter}, filters...)
}
ll := &listener.Listener{
Name: net.JoinHostPort(matchedHost, sPort),
Address: &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
Address: sv.GetAddressForProxy(node),
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: uint32(p.Port),
},
},
},
},
ApiListener: &listener.ApiListener{
ApiListener: protoconv.MessageToAny(&hcm.HttpConnectionManager{
HttpFilters: filters,
RouteSpecifier: &hcm.HttpConnectionManager_Rds{
// TODO: for TCP listeners don't generate RDS, but some indication of cluster name.
Rds: &hcm.Rds{
ConfigSource: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_Ads{
Ads: &core.AggregatedConfigSource{},
},
},
RouteConfigName: clusterKey(serviceHost, p.Port),
},
},
}),
},
}
// add extra addresses for the listener
extrAddresses := sv.GetExtraAddressesForProxy(node)
if len(extrAddresses) > 0 {
ll.AdditionalAddresses = util.BuildAdditionalAddresses(extrAddresses, uint32(p.Port))
}
out = append(out, &discovery.Resource{
Name: ll.Name,
Resource: protoconv.MessageToAny(ll),
})
}
}
}
return out
}
// map[host] -> map[port] -> exists
// if the map[port] is empty, an exact listener name was provided (non-hostport)
type listenerNames map[string]listenerName
type listenerName struct {
RequestedNames sets.String
Ports sets.String
}
func (ln *listenerName) includesPort(port string) bool {
if len(ln.Ports) == 0 {
return true
}
_, ok := ln.Ports[port]
return ok
}
func (f listenerNames) includes(s string) (listenerName, bool) {
if len(f) == 0 {
// filter is empty, include everything
return listenerName{RequestedNames: sets.New(s)}, true
}
n, ok := f[s]
return n, ok
}
func (f listenerNames) inboundNames() []string {
var out []string
for key := range f {
if strings.HasPrefix(key, grpcxds.ServerListenerNamePrefix) {
out = append(out, key)
}
}
return out
}
func newListenerNameFilter(names []string, node *model.Proxy) listenerNames {
filter := make(listenerNames, len(names))
for _, name := range names {
// inbound, create a simple entry and move on
if strings.HasPrefix(name, grpcxds.ServerListenerNamePrefix) {
filter[name] = listenerName{RequestedNames: sets.New(name)}
continue
}
host, port, err := net.SplitHostPort(name)
hasPort := err == nil
// attempt to expand shortname to FQDN
requestedName := name
if hasPort {
requestedName = host
}
allNames := []string{requestedName}
if fqdn := tryFindFQDN(requestedName, node); fqdn != "" {
allNames = append(allNames, fqdn)
}
for _, name := range allNames {
ln, ok := filter[name]
if !ok {
ln = listenerName{RequestedNames: sets.New[string]()}
}
ln.RequestedNames.Insert(requestedName)
// only build the portmap if we aren't filtering this name yet, or if the existing filter is non-empty
if hasPort && (!ok || len(ln.Ports) != 0) {
if ln.Ports == nil {
ln.Ports = map[string]struct{}{}
}
ln.Ports.Insert(port)
} else if !hasPort {
// if we didn't have a port, we should clear the portmap
ln.Ports = nil
}
filter[name] = ln
}
}
return filter
}
func tryFindFQDN(name string, node *model.Proxy) string {
// no "." - assuming this is a shortname "foo" -> "foo.ns.svc.cluster.local"
if !strings.Contains(name, ".") {
return fmt.Sprintf("%s.%s", name, node.DNSDomain)
}
for _, suffix := range []string{
node.Metadata.Namespace,
node.Metadata.Namespace + ".svc",
} {
shortname := strings.TrimSuffix(name, "."+suffix)
if shortname != name && strings.HasPrefix(node.DNSDomain, suffix) {
return fmt.Sprintf("%s.%s", shortname, node.DNSDomain)
}
}
return ""
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcgen
import (
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3"
"istio.io/istio/pilot/pkg/util/protoconv"
)
// BuildHTTPRoutes supports per-VIP routes, as used by GRPC.
// This mode is indicated by using names containing full host:port instead of just port.
// Returns true of the request is of this type.
func (g *GrpcConfigGenerator) BuildHTTPRoutes(node *model.Proxy, push *model.PushContext, routeNames []string) model.Resources {
resp := model.Resources{}
for _, routeName := range routeNames {
if rc := buildHTTPRoute(node, push, routeName); rc != nil {
resp = append(resp, &discovery.Resource{
Name: routeName,
Resource: protoconv.MessageToAny(rc),
})
}
}
return resp
}
func buildHTTPRoute(node *model.Proxy, push *model.PushContext, routeName string) *route.RouteConfiguration {
// TODO use route-style naming instead of cluster naming
_, _, hostname, port := model.ParseSubsetKey(routeName)
if hostname == "" || port == 0 {
log.Warnf("failed to parse %v", routeName)
return nil
}
virtualHosts, _, _ := v1alpha3.BuildSidecarOutboundVirtualHosts(node, push, routeName, port, nil, &model.DisabledCache{})
// Only generate the required route for grpc. Will need to generate more
// as GRPC adds more features.
return &route.RouteConfiguration{
Name: routeName,
VirtualHosts: virtualHosts,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package networking
import (
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"istio.io/istio/pkg/config/protocol"
)
// ListenerProtocol is the protocol associated with the listener.
type ListenerProtocol int
const (
// ListenerProtocolUnknown is an unknown type of listener.
ListenerProtocolUnknown = iota
// ListenerProtocolTCP is a TCP listener.
ListenerProtocolTCP
// ListenerProtocolHTTP is an HTTP listener.
ListenerProtocolHTTP
// ListenerProtocolAuto enables auto protocol detection
ListenerProtocolAuto
)
// ModelProtocolToListenerProtocol converts from a config.Protocol to its corresponding plugin.ListenerProtocol
func ModelProtocolToListenerProtocol(p protocol.Instance) ListenerProtocol {
switch p {
case protocol.HTTP, protocol.HTTP2, protocol.HTTP_PROXY, protocol.GRPC, protocol.GRPCWeb:
return ListenerProtocolHTTP
case protocol.TCP, protocol.HTTPS, protocol.TLS,
protocol.Mongo, protocol.Redis, protocol.MySQL:
return ListenerProtocolTCP
case protocol.UDP:
return ListenerProtocolUnknown
case protocol.Unsupported:
return ListenerProtocolAuto
default:
// Should not reach here.
return ListenerProtocolAuto
}
}
type TransportProtocol uint8
const (
// TransportProtocolTCP is a TCP listener
TransportProtocolTCP = iota
// TransportProtocolQUIC is a QUIC listener
TransportProtocolQUIC
)
func (tp TransportProtocol) String() string {
switch tp {
case TransportProtocolTCP:
return "tcp"
case TransportProtocolQUIC:
return "quic"
}
return "unknown"
}
func (tp TransportProtocol) ToEnvoySocketProtocol() core.SocketAddress_Protocol {
if tp == TransportProtocolQUIC {
return core.SocketAddress_UDP
}
return core.SocketAddress_TCP
}
// ListenerClass defines the class of the listener
type ListenerClass int
const (
ListenerClassUndefined ListenerClass = iota
ListenerClassSidecarInbound
ListenerClassSidecarOutbound
ListenerClassGateway
)
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authn
import (
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/security/authn"
"istio.io/istio/pkg/log"
)
var authnLog = log.RegisterScope("authn", "authn debugging")
type Builder struct {
applier authn.PolicyApplier
trustDomains []string
proxy *model.Proxy
}
func NewBuilder(push *model.PushContext, proxy *model.Proxy) *Builder {
applier := authn.NewPolicyApplier(push, proxy.Metadata.Namespace, proxy.Labels, proxy.IsWaypointProxy())
trustDomains := TrustDomainsForValidation(push.Mesh)
return &Builder{
applier: applier,
proxy: proxy,
trustDomains: trustDomains,
}
}
func (b *Builder) ForPort(port uint32) authn.MTLSSettings {
if b == nil {
return authn.MTLSSettings{
Port: port,
Mode: model.MTLSDisable,
}
}
return b.applier.InboundMTLSSettings(port, b.proxy, b.trustDomains, authn.NoOverride)
}
func (b *Builder) ForHBONE() authn.MTLSSettings {
if b == nil {
return authn.MTLSSettings{
Port: model.HBoneInboundListenPort,
Mode: model.MTLSDisable,
}
}
// HBONE is always strict
return b.applier.InboundMTLSSettings(model.HBoneInboundListenPort, b.proxy, b.trustDomains, model.MTLSStrict)
}
func (b *Builder) ForPassthrough() []authn.MTLSSettings {
if b == nil {
return []authn.MTLSSettings{{
Port: 0,
Mode: model.MTLSDisable,
}}
}
// We need to create configuration for the passthrough,
// but also any ports that are not explicitly declared in the Service but are in the mTLS port level settings.
resp := []authn.MTLSSettings{
// Full passthrough - no port match
b.applier.InboundMTLSSettings(0, b.proxy, b.trustDomains, authn.NoOverride),
}
// Then generate the per-port passthrough filter chains.
for port := range b.applier.PortLevelSetting() {
// Skip the per-port passthrough filterchain if the port is already handled by InboundMTLSConfiguration().
if !needPerPortPassthroughFilterChain(port, b.proxy) {
continue
}
authnLog.Debugf("InboundMTLSConfiguration: build extra pass through filter chain for %v:%d", b.proxy.ID, port)
resp = append(resp, b.applier.InboundMTLSSettings(port, b.proxy, b.trustDomains, authn.NoOverride))
}
return resp
}
func (b *Builder) BuildHTTP(class networking.ListenerClass) []*hcm.HttpFilter {
if b == nil {
return nil
}
if class == networking.ListenerClassSidecarOutbound {
// Only applies to inbound and gateways
return nil
}
if b.proxy.SupportsEnvoyExtendedJwt() {
filter := b.applier.JwtFilter(true, b.proxy.Type != model.SidecarProxy)
if filter != nil {
return []*hcm.HttpFilter{filter}
}
return nil
}
res := []*hcm.HttpFilter{}
if filter := b.applier.JwtFilter(false, false); filter != nil {
res = append(res, filter)
}
forSidecar := b.proxy.Type == model.SidecarProxy
if filter := b.applier.AuthNFilter(forSidecar); filter != nil {
res = append(res, filter)
}
return res
}
func needPerPortPassthroughFilterChain(port uint32, node *model.Proxy) bool {
// If there is any Sidecar defined, check if the port is explicitly defined there.
// This means the Sidecar resource takes precedence over the service. A port defined in service but not in Sidecar
// means the port is going to be handled by the pass through filter chain.
if node.SidecarScope.HasIngressListener() {
for _, ingressListener := range node.SidecarScope.Sidecar.Ingress {
if port == ingressListener.Port.Number {
return false
}
}
return true
}
// If there is no Sidecar, check if the port is appearing in any service.
for _, si := range node.ServiceTargets {
if port == si.Port.TargetPort {
return false
}
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authn
import (
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/util/sets"
)
func TrustDomainsForValidation(meshConfig *meshconfig.MeshConfig) []string {
if features.SkipValidateTrustDomain {
return nil
}
tds := append([]string{meshConfig.TrustDomain}, meshConfig.TrustDomainAliases...)
for _, cacert := range meshConfig.GetCaCertificates() {
tds = append(tds, cacert.GetTrustDomains()...)
}
return dedupTrustDomains(tds)
}
func dedupTrustDomains(tds []string) []string {
known := sets.New[string]()
deduped := make([]string, 0, len(tds))
for _, td := range tds {
if td != "" && !known.InsertContains(td) {
deduped = append(deduped, td)
}
}
return deduped
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authz
import (
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/security/authz/builder"
"istio.io/istio/pilot/pkg/security/trustdomain"
)
type ActionType int
const (
// Local for action ALLOW, DENY and AUDIT and is enforced by Envoy RBAC filter.
Local ActionType = iota
// Custom action is enforced by Envoy ext_authz filter.
Custom
)
type Builder struct {
// Lazy load
httpBuilt, tcpBuilt bool
httpFilters []*hcm.HttpFilter
tcpFilters []*listener.Filter
builder *builder.Builder
}
func NewBuilder(actionType ActionType, push *model.PushContext, proxy *model.Proxy, useFilterState bool) *Builder {
tdBundle := trustdomain.NewBundle(push.Mesh.TrustDomain, push.Mesh.TrustDomainAliases)
option := builder.Option{
IsCustomBuilder: actionType == Custom,
UseFilterState: useFilterState,
UseExtendedJwt: proxy.SupportsEnvoyExtendedJwt(),
}
selectionOpts := model.WorkloadSelectionOpts{
Namespace: proxy.ConfigNamespace,
WorkloadLabels: proxy.Labels,
}
policies := push.AuthzPolicies.ListAuthorizationPolicies(selectionOpts)
b := builder.New(tdBundle, push, policies, option)
return &Builder{builder: b}
}
func (b *Builder) BuildTCP() []*listener.Filter {
if b == nil || b.builder == nil {
return nil
}
if b.tcpBuilt {
return b.tcpFilters
}
b.tcpBuilt = true
b.tcpFilters = b.builder.BuildTCP()
return b.tcpFilters
}
func (b *Builder) BuildHTTP(class networking.ListenerClass) []*hcm.HttpFilter {
if b == nil || b.builder == nil {
return nil
}
if class == networking.ListenerClassSidecarOutbound {
// Only applies to inbound and gateways
return nil
}
if b.httpBuilt {
return b.httpFilters
}
b.httpBuilt = true
b.httpFilters = b.builder.BuildHTTP()
return b.httpFilters
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"strconv"
"strings"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/config/host"
)
var (
// StatName patterns
serviceStatPattern = "%SERVICE%"
serviceFQDNStatPattern = "%SERVICE_FQDN%"
servicePortStatPattern = "%SERVICE_PORT%"
serviceTargetPortStatPattern = "%TARGET_PORT%"
servicePortNameStatPattern = "%SERVICE_PORT_NAME%"
subsetNameStatPattern = "%SUBSET_NAME%"
)
// BuildStatPrefix builds a stat prefix based on the stat pattern.
func BuildStatPrefix(statPattern string, host string, subset string, port *model.Port, targetPort int, attributes *model.ServiceAttributes) string {
prefix := strings.ReplaceAll(statPattern, serviceStatPattern, shortHostName(host, attributes))
prefix = strings.ReplaceAll(prefix, serviceFQDNStatPattern, host)
prefix = strings.ReplaceAll(prefix, subsetNameStatPattern, subset)
prefix = strings.ReplaceAll(prefix, serviceTargetPortStatPattern, strconv.Itoa(targetPort))
prefix = strings.ReplaceAll(prefix, servicePortStatPattern, strconv.Itoa(port.Port))
prefix = strings.ReplaceAll(prefix, servicePortNameStatPattern, port.Name)
return prefix
}
// BuildInboundStatPrefix builds a stat prefix based on the stat pattern and filter chain telemetry data.
func BuildInboundStatPrefix(statPattern string, tm FilterChainMetadata, subset string, port uint32, portName string) string {
prefix := strings.ReplaceAll(statPattern, serviceStatPattern, tm.ShortHostname())
prefix = strings.ReplaceAll(prefix, serviceFQDNStatPattern, tm.InstanceHostname.String())
prefix = strings.ReplaceAll(prefix, subsetNameStatPattern, subset)
prefix = strings.ReplaceAll(prefix, servicePortStatPattern, strconv.Itoa(int(port)))
prefix = strings.ReplaceAll(prefix, servicePortNameStatPattern, portName)
return prefix
}
// shortHostName constructs the name from kubernetes hosts based on attributes (name and namespace).
// For other hosts like VMs, this method does not do any thing - just returns the passed in host as is.
func shortHostName(host string, attributes *model.ServiceAttributes) string {
if attributes.ServiceRegistry == provider.Kubernetes {
return attributes.Name + "." + attributes.Namespace
}
return host
}
// TraceOperation builds the string format: "%s:%d/*" for a given host and port
func TraceOperation(host string, port int) string {
// Format : "%s:%d/*"
return util.DomainName(host, port) + "/*"
}
// FilterChainMetadata defines additional metadata for telemetry use for a filter chain.
type FilterChainMetadata struct {
// InstanceHostname defines the hostname of the service this filter chain is built for.
// Note: This is best effort; this may be empty if generated by Sidecar config, and there may be multiple
// Services that make up the filter chain.
InstanceHostname host.Name
// KubernetesServiceNamespace is the namespace the service is defined in, if it is for a Kubernetes Service.
// Note: This is best effort; this may be empty if generated by Sidecar config, and there may be multiple
// Services that make up the filter chain.
KubernetesServiceNamespace string
// KubernetesServiceName is the name of service, if it is for a Kubernetes Service.
// Note: This is best effort; this may be empty if generated by Sidecar config, and there may be multiple
// Services that make up the filter chain.
KubernetesServiceName string
}
// ShortHostname constructs the name from kubernetes service name if available or just uses instance host name.
func (tm FilterChainMetadata) ShortHostname() string {
if tm.KubernetesServiceName != "" {
return tm.KubernetesServiceName + "." + tm.KubernetesServiceNamespace
}
return tm.InstanceHostname.String()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"bytes"
"fmt"
"net"
"net/netip"
"sort"
"strconv"
"strings"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
customBytes "github.com/AdamKorcz/bugdetectors/bytes"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
statefulsession "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/stateful_session/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
cookiev3 "github.com/envoyproxy/go-control-plane/envoy/extensions/http/stateful_session/cookie/v3"
headerv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/http/stateful_session/header/v3"
httpv3 "github.com/envoyproxy/go-control-plane/envoy/type/http/v3"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/wrapperspb"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
istionetworking "istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/serviceregistry/util/label"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config"
kubelabels "istio.io/istio/pkg/kube/labels"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/proto/merge"
"istio.io/istio/pkg/util/strcase"
"istio.io/istio/pkg/wellknown"
)
const (
// BlackHoleCluster to catch traffic from routes with unresolved clusters. Traffic arriving here goes nowhere.
BlackHoleCluster = "BlackHoleCluster"
// BlackHole is the name of the virtual host and route name used to block all traffic
BlackHole = "block_all"
// PassthroughCluster to forward traffic to the original destination requested. This cluster is used when
// traffic does not match any listener in envoy.
PassthroughCluster = "PassthroughCluster"
// Passthrough is the name of the virtual host used to forward traffic to the
// PassthroughCluster
Passthrough = "allow_any"
// PassthroughFilterChain to catch traffic that doesn't match other filter chains.
PassthroughFilterChain = "PassthroughFilterChain"
// Inbound pass through cluster need to the bind the loopback ip address for the security and loop avoidance.
InboundPassthroughClusterIpv4 = "InboundPassthroughClusterIpv4"
InboundPassthroughClusterIpv6 = "InboundPassthroughClusterIpv6"
// IstioMetadataKey is the key under which metadata is added to a route or cluster
// regarding the virtual service or destination rule used for each
IstioMetadataKey = "istio"
// EnvoyTransportSocketMetadataKey is the key under which metadata is added to an endpoint
// which determines the endpoint level transport socket configuration.
EnvoyTransportSocketMetadataKey = "envoy.transport_socket_match"
// Well-known header names
AltSvcHeader = "alt-svc"
// Envoy Stateful Session Filter
// TODO: Move to well known.
StatefulSessionFilter = "envoy.filters.http.stateful_session"
// AlpnOverrideMetadataKey is the key under which metadata is added
// to indicate whether Istio rewrite the ALPN headers
AlpnOverrideMetadataKey = "alpn_override"
)
// ALPNH2Only advertises that Proxy is going to use HTTP/2 when talking to the cluster.
var ALPNH2Only = []string{"h2"}
// ALPNInMeshH2 advertises that Proxy is going to use HTTP/2 when talking to the in-mesh cluster.
// The custom "istio" value indicates in-mesh traffic and it's going to be used for routing decisions.
// Once Envoy supports client-side ALPN negotiation, this should be {"istio", "h2", "http/1.1"}.
var ALPNInMeshH2 = []string{"istio", "h2"}
// ALPNInMeshH2WithMxc advertises that Proxy is going to use HTTP/2 when talking to the in-mesh cluster.
// The custom "istio" value indicates in-mesh traffic and it's going to be used for routing decisions.
// The custom "istio-peer-exchange" value indicates, metadata exchange is enabled for TCP.
var ALPNInMeshH2WithMxc = []string{"istio-peer-exchange", "istio", "h2"}
// ALPNInMesh advertises that Proxy is going to talk to the in-mesh cluster.
// The custom "istio" value indicates in-mesh traffic and it's going to be used for routing decisions.
var ALPNInMesh = []string{"istio"}
// ALPNInMeshWithMxc advertises that Proxy is going to talk to the in-mesh cluster and has metadata exchange enabled for
// TCP. The custom "istio-peer-exchange" value indicates, metadata exchange is enabled for TCP. The custom "istio" value
// indicates in-mesh traffic and it's going to be used for routing decisions.
var ALPNInMeshWithMxc = []string{"istio-peer-exchange", "istio"}
// ALPNHttp advertises that Proxy is going to talking either http2 or http 1.1.
var ALPNHttp = []string{"h2", "http/1.1"}
// ALPNHttp3OverQUIC advertises that Proxy is going to talk HTTP/3 over QUIC
var ALPNHttp3OverQUIC = []string{"h3"}
// ALPNDownstreamWithMxc advertises that Proxy is going to talk either tcp(for metadata exchange), http2 or http 1.1.
var ALPNDownstreamWithMxc = []string{"istio-peer-exchange", "h2", "http/1.1"}
// ALPNDownstream advertises that Proxy is going to talk either http2 or http 1.1.
var ALPNDownstream = []string{"h2", "http/1.1"}
// ConvertAddressToCidr converts from string to CIDR proto
func ConvertAddressToCidr(addr string) *core.CidrRange {
cidr, err := AddrStrToCidrRange(addr)
if err != nil {
log.Errorf("failed to convert address %s to CidrRange: %v", addr, err)
return nil
}
return cidr
}
// AddrStrToCidrRange converts from string to CIDR prefix
func AddrStrToPrefix(addr string) (netip.Prefix, error) {
if len(addr) == 0 {
return netip.Prefix{}, fmt.Errorf("empty address")
}
// Already a CIDR, just parse it.
if strings.Contains(addr, "/") {
return netip.ParsePrefix(addr)
}
// Otherwise it is a raw IP. Make it a /32 or /128 depending on family
ipa, err := netip.ParseAddr(addr)
if err != nil {
return netip.Prefix{}, err
}
return netip.PrefixFrom(ipa, ipa.BitLen()), nil
}
// AddrStrToCidrRange converts from string to CIDR proto
func AddrStrToCidrRange(addr string) (*core.CidrRange, error) {
prefix, err := AddrStrToPrefix(addr)
if err != nil {
return nil, err
}
return &core.CidrRange{
AddressPrefix: prefix.Addr().String(),
PrefixLen: &wrapperspb.UInt32Value{
Value: uint32(prefix.Bits()),
},
}, nil
}
// BuildAddress returns a SocketAddress with the given ip and port or uds.
func BuildAddress(bind string, port uint32) *core.Address {
address := BuildNetworkAddress(bind, port, istionetworking.TransportProtocolTCP)
if address != nil {
return address
}
return &core.Address{
Address: &core.Address_Pipe{
Pipe: &core.Pipe{
Path: strings.TrimPrefix(bind, model.UnixAddressPrefix),
},
},
}
}
// BuildAdditionalAddresses can add extra addresses to additional addresses for a listener
func BuildAdditionalAddresses(extrAddresses []string, listenPort uint32) []*listener.AdditionalAddress {
var additionalAddresses []*listener.AdditionalAddress
if len(extrAddresses) > 0 {
for _, exbd := range extrAddresses {
if exbd == "" {
continue
}
extraAddress := &listener.AdditionalAddress{
Address: BuildAddress(exbd, listenPort),
}
additionalAddresses = append(additionalAddresses, extraAddress)
}
}
return additionalAddresses
}
func BuildNetworkAddress(bind string, port uint32, transport istionetworking.TransportProtocol) *core.Address {
if port == 0 {
return nil
}
return &core.Address{
Address: &core.Address_SocketAddress{
SocketAddress: &core.SocketAddress{
Address: bind,
Protocol: transport.ToEnvoySocketProtocol(),
PortSpecifier: &core.SocketAddress_PortValue{
PortValue: port,
},
},
},
}
}
// SortVirtualHosts sorts a slice of virtual hosts by name.
//
// Envoy computes a hash of RDS to see if things have changed - hash is affected by order of elements in the filter. Therefore
// we sort virtual hosts by name before handing them back so the ordering is stable across HTTP Route Configs.
func SortVirtualHosts(hosts []*route.VirtualHost) {
if len(hosts) < 2 {
return
}
sort.SliceStable(hosts, func(i, j int) bool {
return hosts[i].Name < hosts[j].Name
})
}
// IsIstioVersionGE119 checks whether the given Istio version is greater than or equals 1.19.
func IsIstioVersionGE119(version *model.IstioVersion) bool {
return version == nil ||
version.Compare(&model.IstioVersion{Major: 1, Minor: 19, Patch: -1}) >= 0
}
// ConvertLocality converts '/' separated locality string to Locality struct.
func ConvertLocality(locality string) *core.Locality {
if locality == "" {
return &core.Locality{}
}
region, zone, subzone := label.SplitLocalityLabel(locality)
return &core.Locality{
Region: region,
Zone: zone,
SubZone: subzone,
}
}
// LocalityToString converts Locality struct to '/' separated locality string.
func LocalityToString(l *core.Locality) string {
if l == nil {
return ""
}
resp := l.Region
if l.Zone == "" {
return resp
}
resp += "/" + l.Zone
if l.SubZone == "" {
return resp
}
resp += "/" + l.SubZone
return resp
}
// GetFailoverPriorityLabels returns a byte array which contains failover priorities of the proxy.
func GetFailoverPriorityLabels(proxyLabels map[string]string, priorities []string) []byte {
var b bytes.Buffer
for _, key := range priorities {
b.WriteString(key)
b.WriteRune(':')
b.WriteString(proxyLabels[key])
b.WriteRune(' ')
}
return customBytes.CheckLen(b.Bytes(),
// IsLocalityEmpty checks if a locality is empty (checking region is good enough, based on how its initialized)
"/src/istio/pilot/pkg/networking/util/util.go:283:9 (May be slightly inaccurate) NEW_LINEb.Bytes()")
}
func IsLocalityEmpty(locality *core.Locality) bool {
if locality == nil || (len(locality.GetRegion()) == 0) {
return true
}
return false
}
func LocalityMatch(proxyLocality *core.Locality, ruleLocality string) bool {
ruleRegion, ruleZone, ruleSubzone := label.SplitLocalityLabel(ruleLocality)
regionMatch := ruleRegion == "*" || proxyLocality.GetRegion() == ruleRegion
zoneMatch := ruleZone == "*" || ruleZone == "" || proxyLocality.GetZone() == ruleZone
subzoneMatch := ruleSubzone == "*" || ruleSubzone == "" || proxyLocality.GetSubZone() == ruleSubzone
if regionMatch && zoneMatch && subzoneMatch {
return true
}
return false
}
func LbPriority(proxyLocality, endpointsLocality *core.Locality) int {
if proxyLocality.GetRegion() == endpointsLocality.GetRegion() {
if proxyLocality.GetZone() == endpointsLocality.GetZone() {
if proxyLocality.GetSubZone() == endpointsLocality.GetSubZone() {
return 0
}
return 1
}
return 2
}
return 3
}
// return a shallow copy ClusterLoadAssignment
func CloneClusterLoadAssignment(original *endpoint.ClusterLoadAssignment) *endpoint.ClusterLoadAssignment {
if original == nil {
return nil
}
out := &endpoint.ClusterLoadAssignment{}
out.ClusterName = original.ClusterName
out.Endpoints = cloneLocalityLbEndpoints(original.Endpoints)
out.Policy = original.Policy
return out
}
// return a shallow copy LocalityLbEndpoints
func cloneLocalityLbEndpoints(endpoints []*endpoint.LocalityLbEndpoints) []*endpoint.LocalityLbEndpoints {
out := make([]*endpoint.LocalityLbEndpoints, 0, len(endpoints))
for _, ep := range endpoints {
clone := CloneLocalityLbEndpoint(ep)
out = append(out, clone)
}
return out
}
// return a shallow copy of LocalityLbEndpoints
func CloneLocalityLbEndpoint(ep *endpoint.LocalityLbEndpoints) *endpoint.LocalityLbEndpoints {
clone := &endpoint.LocalityLbEndpoints{}
clone.Locality = ep.Locality
clone.LbEndpoints = ep.LbEndpoints
clone.Proximity = ep.Proximity
clone.Priority = ep.Priority
if ep.LoadBalancingWeight != nil {
clone.LoadBalancingWeight = &wrapperspb.UInt32Value{
Value: ep.GetLoadBalancingWeight().GetValue(),
}
}
return clone
}
// BuildConfigInfoMetadata builds core.Metadata struct containing the
// name.namespace of the config, the type, etc.
func BuildConfigInfoMetadata(config config.Meta) *core.Metadata {
return AddConfigInfoMetadata(nil, config)
}
// AddConfigInfoMetadata adds name.namespace of the config, the type, etc
// to the given core.Metadata struct, if metadata is not initialized, build a new metadata.
func AddConfigInfoMetadata(metadata *core.Metadata, config config.Meta) *core.Metadata {
if metadata == nil {
metadata = &core.Metadata{
FilterMetadata: map[string]*structpb.Struct{},
}
}
s := "/apis/" + config.GroupVersionKind.Group + "/" + config.GroupVersionKind.Version + "/namespaces/" + config.Namespace + "/" +
strcase.CamelCaseToKebabCase(config.GroupVersionKind.Kind) + "/" + config.Name
if _, ok := metadata.FilterMetadata[IstioMetadataKey]; !ok {
metadata.FilterMetadata[IstioMetadataKey] = &structpb.Struct{
Fields: map[string]*structpb.Value{},
}
}
metadata.FilterMetadata[IstioMetadataKey].Fields["config"] = &structpb.Value{
Kind: &structpb.Value_StringValue{
StringValue: s,
},
}
return metadata
}
// AddSubsetToMetadata will insert the subset name supplied. This should be called after the initial
// "istio" metadata has been created for the cluster. If the "istio" metadata field is not already
// defined, the subset information will not be added (to prevent adding this information where not
// needed). This is used for telemetry reporting.
func AddSubsetToMetadata(md *core.Metadata, subset string) {
if istioMeta, ok := md.FilterMetadata[IstioMetadataKey]; ok {
istioMeta.Fields["subset"] = &structpb.Value{
Kind: &structpb.Value_StringValue{
StringValue: subset,
},
}
}
}
// AddALPNOverrideToMetadata sets filter metadata `istio.alpn_override: "false"` in the given core.Metadata struct,
// when TLS mode is SIMPLE or MUTUAL. If metadata is not initialized, builds a new metadata.
func AddALPNOverrideToMetadata(metadata *core.Metadata, tlsMode networking.ClientTLSSettings_TLSmode) *core.Metadata {
if tlsMode != networking.ClientTLSSettings_SIMPLE && tlsMode != networking.ClientTLSSettings_MUTUAL {
return metadata
}
if metadata == nil {
metadata = &core.Metadata{
FilterMetadata: map[string]*structpb.Struct{},
}
}
if _, ok := metadata.FilterMetadata[IstioMetadataKey]; !ok {
metadata.FilterMetadata[IstioMetadataKey] = &structpb.Struct{
Fields: map[string]*structpb.Value{},
}
}
metadata.FilterMetadata[IstioMetadataKey].Fields["alpn_override"] = &structpb.Value{
Kind: &structpb.Value_StringValue{
StringValue: "false",
},
}
return metadata
}
// IsHTTPFilterChain returns true if the filter chain contains a HTTP connection manager filter
func IsHTTPFilterChain(filterChain *listener.FilterChain) bool {
for _, f := range filterChain.Filters {
if f.Name == wellknown.HTTPConnectionManager {
return true
}
}
return false
}
// MergeAnyWithAny merges a given any typed message into the given Any typed message by dynamically inferring the
// type of Any
func MergeAnyWithAny(dst *anypb.Any, src *anypb.Any) (*anypb.Any, error) {
// Assuming that Pilot is compiled with this type [which should always be the case]
var err error
// get an object of type used by this message
dstX, err := dst.UnmarshalNew()
if err != nil {
return nil, err
}
// get an object of type used by this message
srcX, err := src.UnmarshalNew()
if err != nil {
return nil, err
}
// Merge the two typed protos
merge.Merge(dstX, srcX)
// Convert the merged proto back to dst
retVal := protoconv.MessageToAny(dstX)
return retVal, nil
}
// AppendLbEndpointMetadata adds metadata values to a lb endpoint using the passed in metadata as base.
func AppendLbEndpointMetadata(istioMetadata *model.EndpointMetadata, envoyMetadata *core.Metadata,
) {
if !features.EndpointTelemetryLabel || !features.EnableTelemetryLabel {
return
}
if envoyMetadata.FilterMetadata == nil {
envoyMetadata.FilterMetadata = map[string]*structpb.Struct{}
}
if istioMetadata.TLSMode != "" && istioMetadata.TLSMode != model.DisabledTLSModeLabel {
envoyMetadata.FilterMetadata[EnvoyTransportSocketMetadataKey] = &structpb.Struct{
Fields: map[string]*structpb.Value{
model.TLSModeLabelShortname: {Kind: &structpb.Value_StringValue{StringValue: istioMetadata.TLSMode}},
},
}
}
// Add compressed telemetry metadata. Note this is a short term solution to make server workload metadata
// available at client sidecar, so that telemetry filter could use for metric labels. This is useful for two cases:
// server does not have sidecar injected, and request fails to reach server and thus metadata exchange does not happen.
// Due to performance concern, telemetry metadata is compressed into a semicolon separated string:
// workload-name;namespace;canonical-service-name;canonical-service-revision;cluster-id.
if features.EndpointTelemetryLabel {
// allow defaulting for non-injected cases
canonicalName, canonicalRevision := kubelabels.CanonicalService(istioMetadata.Labels, istioMetadata.WorkloadName)
// don't bother sending the default value in config
if (len(canonicalRevision) >= 1 && string(canonicalRevision[0]) == "l") && (len(canonicalRevision) >= 2 && string(canonicalRevision[1]) == "a") && (len(canonicalRevision) >= 3 && string(canonicalRevision[2]) == "t") && (len(canonicalRevision) >= 4 && string(canonicalRevision[3]) == "e") && (len(canonicalRevision) >= 5 && string(canonicalRevision[4]) == "s") && (len(canonicalRevision) >= 6 && string(canonicalRevision[5]) == "t") {
canonicalRevision = ""
}
var sb strings.Builder
sb.WriteString(istioMetadata.WorkloadName)
sb.WriteString(";")
sb.WriteString(istioMetadata.Namespace)
sb.WriteString(";")
sb.WriteString(canonicalName)
sb.WriteString(";")
sb.WriteString(canonicalRevision)
sb.WriteString(";")
sb.WriteString(istioMetadata.ClusterID.String())
addIstioEndpointLabel(envoyMetadata, "workload", &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: sb.String()}})
}
}
func addIstioEndpointLabel(metadata *core.Metadata, key string, val *structpb.Value) {
if _, ok := metadata.FilterMetadata[IstioMetadataKey]; !ok {
metadata.FilterMetadata[IstioMetadataKey] = &structpb.Struct{
Fields: map[string]*structpb.Value{},
}
}
metadata.FilterMetadata[IstioMetadataKey].Fields[key] = val
}
// IsAllowAnyOutbound checks if allow_any is enabled for outbound traffic
func IsAllowAnyOutbound(node *model.Proxy) bool {
return node.SidecarScope != nil &&
node.SidecarScope.OutboundTrafficPolicy != nil &&
node.SidecarScope.OutboundTrafficPolicy.Mode == networking.OutboundTrafficPolicy_ALLOW_ANY
}
func StringToExactMatch(in []string) []*matcher.StringMatcher {
if len(in) == 0 {
return nil
}
res := make([]*matcher.StringMatcher, 0, len(in))
for _, s := range in {
res = append(res, &matcher.StringMatcher{
MatchPattern: &matcher.StringMatcher_Exact{Exact: s},
})
}
return res
}
func StringToPrefixMatch(in []string) []*matcher.StringMatcher {
if len(in) == 0 {
return nil
}
res := make([]*matcher.StringMatcher, 0, len(in))
for _, s := range in {
res = append(res, &matcher.StringMatcher{
MatchPattern: &matcher.StringMatcher_Prefix{Prefix: s},
})
}
return res
}
func ConvertToEnvoyMatches(in []*networking.StringMatch) []*matcher.StringMatcher {
res := make([]*matcher.StringMatcher, 0, len(in))
for _, im := range in {
if em := ConvertToEnvoyMatch(im); em != nil {
res = append(res, em)
}
}
return res
}
func ConvertToEnvoyMatch(in *networking.StringMatch) *matcher.StringMatcher {
switch m := in.MatchType.(type) {
case *networking.StringMatch_Exact:
return &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Exact{Exact: m.Exact}}
case *networking.StringMatch_Prefix:
return &matcher.StringMatcher{MatchPattern: &matcher.StringMatcher_Prefix{Prefix: m.Prefix}}
case *networking.StringMatch_Regex:
return &matcher.StringMatcher{
MatchPattern: &matcher.StringMatcher_SafeRegex{
SafeRegex: &matcher.RegexMatcher{
Regex: m.Regex,
},
},
}
}
return nil
}
func CidrRangeSliceEqual(a, b []*core.CidrRange) bool {
if len(a) != len(b) {
return false
}
for i := range a {
netA, err := toMaskedPrefix(a[i])
if err != nil {
return false
}
netB, err := toMaskedPrefix(b[i])
if err != nil {
return false
}
if netA.Addr().String() != netB.Addr().String() {
return false
}
}
return true
}
func toMaskedPrefix(c *core.CidrRange) (netip.Prefix, error) {
ipp, err := netip.ParsePrefix(c.AddressPrefix + "/" + strconv.Itoa(int(c.PrefixLen.GetValue())))
if err != nil {
log.Errorf("failed to parse CidrRange %v as IPNet: %v", c, err)
}
return ipp.Masked(), err
}
// meshconfig ForwardClientCertDetails and the Envoy config enum are off by 1
// due to the UNDEFINED in the meshconfig ForwardClientCertDetails
func MeshConfigToEnvoyForwardClientCertDetails(c meshconfig.ForwardClientCertDetails) hcm.HttpConnectionManager_ForwardClientCertDetails {
return hcm.HttpConnectionManager_ForwardClientCertDetails(c - 1)
}
// ByteCount returns a human readable byte format
// Inspired by https://yourbasic.org/golang/formatting-byte-size-to-human-readable-format/
func ByteCount(b int) string {
const unit = 1000
if b < unit {
return fmt.Sprintf("%dB", b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f%cB",
float64(b)/float64(div), "kMGTPE"[exp])
}
// IPv6Compliant encloses ipv6 addresses in square brackets followed by port number in Host header/URIs
func IPv6Compliant(host string) string {
if strings.Contains(host, ":") {
return "[" + host + "]"
}
return host
}
// DomainName builds the domain name for a given host and port
func DomainName(host string, port int) string {
return net.JoinHostPort(host, strconv.Itoa(port))
}
// BuildInternalEndpoint builds an lb endpoint pointing to the internal listener named dest.
// If the metadata contains "tunnel.destination" that will become the "endpointId" to prevent deduplication.
func BuildInternalEndpoint(dest string, meta *core.Metadata) []*endpoint.LocalityLbEndpoints {
llb := []*endpoint.LocalityLbEndpoints{{
LbEndpoints: []*endpoint.LbEndpoint{BuildInternalLbEndpoint(dest, meta)},
}}
return llb
}
const OriginalDstMetadataKey = "envoy.filters.listener.original_dst"
// BuildInternalLbEndpoint builds an lb endpoint pointing to the internal listener named dest.
// If the metadata contains ORIGINAL_DST destination that will become the "endpointId" to prevent deduplication.
func BuildInternalLbEndpoint(dest string, meta *core.Metadata) *endpoint.LbEndpoint {
var endpointID string
if tunnel, ok := meta.GetFilterMetadata()[OriginalDstMetadataKey]; ok {
if dest, ok := tunnel.GetFields()["local"]; ok {
endpointID = dest.GetStringValue()
}
}
address := BuildInternalAddressWithIdentifier(dest, endpointID)
return &endpoint.LbEndpoint{
HostIdentifier: &endpoint.LbEndpoint_Endpoint{
Endpoint: &endpoint.Endpoint{
Address: address,
},
},
Metadata: meta,
}
}
func BuildInternalAddressWithIdentifier(name, identifier string) *core.Address {
return &core.Address{
Address: &core.Address_EnvoyInternalAddress{
EnvoyInternalAddress: &core.EnvoyInternalAddress{
AddressNameSpecifier: &core.EnvoyInternalAddress_ServerListenerName{
ServerListenerName: name,
},
EndpointId: identifier,
},
},
}
}
func BuildTunnelMetadataStruct(address string, port int, waypoint string) *structpb.Struct {
m := map[string]interface{}{
// logical destination behind the tunnel, on which policy and telemetry will be applied
"local": net.JoinHostPort(address, strconv.Itoa(port)),
}
if waypoint != "" {
m["waypoint"] = waypoint
}
st, _ := structpb.NewStruct(m)
return st
}
func BuildStatefulSessionFilter(svc *model.Service) *hcm.HttpFilter {
filterConfig := MaybeBuildStatefulSessionFilterConfig(svc)
if filterConfig == nil {
return nil
}
return &hcm.HttpFilter{
Name: StatefulSessionFilter,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(filterConfig),
},
}
}
func MaybeBuildStatefulSessionFilterConfig(svc *model.Service) *statefulsession.StatefulSession {
if svc == nil {
return nil
}
sessionCookie := svc.Attributes.Labels[features.PersistentSessionLabel]
sessionHeader := svc.Attributes.Labels[features.PersistentSessionHeaderLabel]
switch {
case sessionCookie != "":
cookieName, cookiePath, found := strings.Cut(sessionCookie, ":")
if !found {
cookiePath = "/"
}
return &statefulsession.StatefulSession{
SessionState: &core.TypedExtensionConfig{
Name: "envoy.http.stateful_session.cookie",
TypedConfig: protoconv.MessageToAny(&cookiev3.CookieBasedSessionState{
Cookie: &httpv3.Cookie{
Path: cookiePath,
Ttl: &durationpb.Duration{Seconds: 120},
Name: cookieName,
},
}),
},
}
case sessionHeader != "":
return &statefulsession.StatefulSession{
SessionState: &core.TypedExtensionConfig{
Name: "envoy.http.stateful_session.header",
TypedConfig: protoconv.MessageToAny(&headerv3.HeaderBasedSessionState{
Name: sessionHeader,
}),
},
}
}
return nil
}
// GetPortLevelTrafficPolicy return the port level traffic policy and true if it exists.
// Otherwise returns the original policy that applies to all destination ports.
func GetPortLevelTrafficPolicy(policy *networking.TrafficPolicy, port *model.Port) (*networking.TrafficPolicy, bool) {
if port == nil {
return policy, false
}
if policy == nil {
return nil, false
}
var portTrafficPolicy *networking.TrafficPolicy_PortTrafficPolicy
// Check if port level overrides exist, if yes override with them.
for _, p := range policy.PortLevelSettings {
if p.Port != nil && uint32(port.Port) == p.Port.Number {
// per the docs, port level policies do not inherit and instead to defaults if not provided
portTrafficPolicy = p
break
}
}
if portTrafficPolicy == nil {
return policy, false
}
// settings specified at the destination-level will not be inherited when
// overridden by port-level settings, i.e. default values will be applied
// to fields omitted in port-level traffic policies.
ret := &networking.TrafficPolicy{}
ret.ConnectionPool = portTrafficPolicy.ConnectionPool
ret.LoadBalancer = portTrafficPolicy.LoadBalancer
ret.OutlierDetection = portTrafficPolicy.OutlierDetection
ret.Tls = portTrafficPolicy.Tls
return ret, true
}
// MergeSubsetTrafficPolicy merges the destination and subset level traffic policy for the given port.
func MergeSubsetTrafficPolicy(original, subsetPolicy *networking.TrafficPolicy, port *model.Port) *networking.TrafficPolicy {
// First get DR port level traffic policy
original, _ = GetPortLevelTrafficPolicy(original, port)
if subsetPolicy == nil {
return original
}
subsetPolicy, hasPortLevel := GetPortLevelTrafficPolicy(subsetPolicy, port)
if original == nil {
return subsetPolicy
}
// merge DR with subset traffic policy
// Override with subset values.
mergedPolicy := ShallowcopyTrafficPolicy(original)
// settings specified at the destination-level will not be inherited when
// overridden by port-level settings, i.e. default values will be applied
// to fields omitted in port-level traffic policies.
if subsetPolicy.ConnectionPool != nil || hasPortLevel {
mergedPolicy.ConnectionPool = subsetPolicy.ConnectionPool
}
if subsetPolicy.OutlierDetection != nil || hasPortLevel {
mergedPolicy.OutlierDetection = subsetPolicy.OutlierDetection
}
if subsetPolicy.LoadBalancer != nil || hasPortLevel {
mergedPolicy.LoadBalancer = subsetPolicy.LoadBalancer
}
if subsetPolicy.Tls != nil || hasPortLevel {
mergedPolicy.Tls = subsetPolicy.Tls
}
if subsetPolicy.Tunnel != nil {
mergedPolicy.Tunnel = subsetPolicy.Tunnel
}
if subsetPolicy.ProxyProtocol != nil {
mergedPolicy.ProxyProtocol = subsetPolicy.ProxyProtocol
}
return mergedPolicy
}
// Shallowcopy a traffic policy, portLevelSettings are ignorred.
func ShallowcopyTrafficPolicy(original *networking.TrafficPolicy) *networking.TrafficPolicy {
if original == nil {
return nil
}
ret := &networking.TrafficPolicy{}
ret.ConnectionPool = original.ConnectionPool
ret.LoadBalancer = original.LoadBalancer
ret.OutlierDetection = original.OutlierDetection
ret.Tls = original.Tls
ret.Tunnel = original.Tunnel
ret.ProxyProtocol = original.ProxyProtocol
return ret
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authn
import (
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/labels"
)
// NoOverride is an alias for MTLSUnknown to more clearly convey intent for InboundMTLSSettings
const NoOverride = model.MTLSUnknown
// PolicyApplier is the interface provides essential functionalities to help config Envoy (xDS) to enforce
// authentication policy. Each version of authentication policy will implement this interface.
type PolicyApplier interface {
// InboundMTLSSettings returns inbound mTLS settings for a given workload port
InboundMTLSSettings(endpointPort uint32, node *model.Proxy, trustDomainAliases []string, modeOverride model.MutualTLSMode) MTLSSettings
// JwtFilter returns the JWT HTTP filter to enforce the underlying authentication policy.
// It may return nil, if no JWT validation is needed.
JwtFilter(useExtendedJwt, clearRouteCache bool) *hcm.HttpFilter
// AuthNFilter returns the (authn) HTTP filter to enforce the underlying authentication policy.
// It may return nil, if no authentication is needed.
AuthNFilter(forSidecar bool) *hcm.HttpFilter
// PortLevelSetting returns port level mTLS settings.
PortLevelSetting() map[uint32]model.MutualTLSMode
MtlsPolicy
}
type MtlsPolicy interface {
// GetMutualTLSModeForPort gets the mTLS mode for the given port. If there is no port level setting, it
// returns the inherited namespace/mesh level setting.
GetMutualTLSModeForPort(endpointPort uint32) model.MutualTLSMode
}
// NewPolicyApplier returns the appropriate (policy) applier, depends on the versions of the policy exists
// for the given service innstance.
func NewPolicyApplier(push *model.PushContext, namespace string, labels labels.Instance, isWaypoint bool) PolicyApplier {
return newPolicyApplier(
push.AuthnPolicies.GetRootNamespace(),
push.AuthnPolicies.GetJwtPoliciesForWorkload(namespace, labels, isWaypoint),
push.AuthnPolicies.GetPeerAuthenticationsForWorkload(namespace, labels, isWaypoint), push)
}
// NewMtlsPolicy returns a checker used to detect proxy mtls mode.
func NewMtlsPolicy(push *model.PushContext, namespace string, labels labels.Instance, isWaypoint bool) MtlsPolicy {
return newPolicyApplier(
push.AuthnPolicies.GetRootNamespace(),
nil,
push.AuthnPolicies.GetPeerAuthenticationsForWorkload(namespace, labels, isWaypoint), push)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authn
import (
"fmt"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
envoy_jwt "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/jwt_authn/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
tlsv3 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/emptypb"
authn_alpha "istio.io/api/authentication/v1alpha1"
authn_filter "istio.io/api/envoy/config/filter/http/authn/v2alpha1"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/api/security/v1beta1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking"
authn_utils "istio.io/istio/pilot/pkg/security/authn/utils"
authn_model "istio.io/istio/pilot/pkg/security/model"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pilot/pkg/xds/filters"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/security"
"istio.io/istio/pkg/jwt"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
)
// MTLSSettings describes the mTLS options for a filter chain
type MTLSSettings struct {
// Port is the port this option applies for
Port uint32
// Mode is the mTLS mode to use
Mode model.MutualTLSMode
// TCP describes the tls context to use for TCP filter chains
TCP *tlsv3.DownstreamTlsContext
// HTTP describes the tls context to use for HTTP filter chains
HTTP *tlsv3.DownstreamTlsContext
}
var authnLog = log.RegisterScope("authn", "authn debugging")
// Implementation of authn.PolicyApplier with v1beta1 API.
type policyApplier struct {
// processedJwtRules is the consolidate JWT rules from all jwtPolicies.
processedJwtRules []*v1beta1.JWTRule
consolidatedPeerPolicy MergedPeerAuthentication
push *model.PushContext
}
// newPolicyApplier returns new applier for v1beta1 authentication policies.
func newPolicyApplier(rootNamespace string,
jwtPolicies []*config.Config,
peerPolicies []*config.Config,
push *model.PushContext,
) PolicyApplier {
processedJwtRules := []*v1beta1.JWTRule{}
// TODO(diemtvu) should we need to deduplicate JWT with the same issuer.
// https://github.com/istio/istio/issues/19245
for idx := range jwtPolicies {
spec := jwtPolicies[idx].Spec.(*v1beta1.RequestAuthentication)
processedJwtRules = append(processedJwtRules, spec.JwtRules...)
}
// Sort the jwt rules by the issuer alphabetically to make the later-on generated filter
// config deterministic.
slices.SortBy(processedJwtRules, func(a *v1beta1.JWTRule) string {
return a.GetIssuer()
})
return policyApplier{
processedJwtRules: processedJwtRules,
consolidatedPeerPolicy: ComposePeerAuthentication(rootNamespace, peerPolicies),
push: push,
}
}
func (a policyApplier) JwtFilter(useExtendedJwt, clearRouteCache bool) *hcm.HttpFilter {
if len(a.processedJwtRules) == 0 {
return nil
}
filterConfigProto := convertToEnvoyJwtConfig(a.processedJwtRules, a.push, useExtendedJwt, clearRouteCache)
if filterConfigProto == nil {
return nil
}
return &hcm.HttpFilter{
Name: authn_model.EnvoyJwtFilterName,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: protoconv.MessageToAny(filterConfigProto)},
}
}
func defaultAuthnFilter() *authn_filter.FilterConfig {
return &authn_filter.FilterConfig{
Policy: &authn_alpha.Policy{},
// we can always set this field, it's no-op if mTLS is not used.
SkipValidateTrustDomain: true,
}
}
func (a policyApplier) setAuthnFilterForRequestAuthn(config *authn_filter.FilterConfig) *authn_filter.FilterConfig {
if len(a.processedJwtRules) == 0 {
// (beta) RequestAuthentication is not set for workload, do nothing.
authnLog.Debug("AuthnFilter: RequestAuthentication (beta policy) not found, keep settings with alpha API")
return config
}
if config == nil {
config = defaultAuthnFilter()
}
// This is obsoleted and not needed (payload is extracted from metadata). Reset the field to remove
// any artifacts from alpha applier.
config.JwtOutputPayloadLocations = nil
p := config.Policy
// Reset origins to use with beta API
// nolint: staticcheck
p.Origins = []*authn_alpha.OriginAuthenticationMethod{}
// Always set to true for beta API, as it doesn't doe rejection on missing token.
// nolint: staticcheck
p.OriginIsOptional = true
// Always bind request.auth.principal from JWT origin. In v2 policy, authorization config specifies what principal to
// choose from instead, rather than in authn config.
// nolint: staticcheck
p.PrincipalBinding = authn_alpha.PrincipalBinding_USE_ORIGIN
// nolint: staticcheck
for _, jwt := range a.processedJwtRules {
p.Origins = append(p.Origins, &authn_alpha.OriginAuthenticationMethod{
Jwt: &authn_alpha.Jwt{
// used for getting the filter data, and all other fields are irrelevant.
Issuer: jwt.GetIssuer(),
},
})
}
return config
}
// AuthNFilter returns the Istio authn filter config:
// - If RequestAuthentication is used, it overwrite the settings for request principal validation and extraction based on the new API.
// - If RequestAuthentication is used, principal binding is always set to ORIGIN.
func (a policyApplier) AuthNFilter(forSidecar bool) *hcm.HttpFilter {
var filterConfigProto *authn_filter.FilterConfig
// Override the config with request authentication, if applicable.
filterConfigProto = a.setAuthnFilterForRequestAuthn(filterConfigProto)
if filterConfigProto == nil {
return nil
}
// disable clear route cache for sidecars because the JWT claim based routing is only supported on gateways.
filterConfigProto.DisableClearRouteCache = forSidecar
// Note: in previous Istio versions, the authn filter also handled PeerAuthentication, to extract principal.
// This has been modified to rely on the TCP filter
return &hcm.HttpFilter{
Name: filters.AuthnFilterName,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: protoconv.MessageToAny(filterConfigProto)},
}
}
func (a policyApplier) InboundMTLSSettings(
endpointPort uint32,
node *model.Proxy,
trustDomainAliases []string,
modeOverride model.MutualTLSMode,
) MTLSSettings {
effectiveMTLSMode := modeOverride
if effectiveMTLSMode == model.MTLSUnknown {
effectiveMTLSMode = a.GetMutualTLSModeForPort(endpointPort)
}
authnLog.Debugf("InboundFilterChain: build inbound filter change for %v:%d in %s mode", node.ID, endpointPort, effectiveMTLSMode)
var mc *meshconfig.MeshConfig
if a.push != nil {
mc = a.push.Mesh
}
// Configure TLS version based on meshconfig TLS API.
// This is used to configure TLS version for inbound filter chain of ISTIO MUTUAL cases.
// For MUTUAL and SIMPLE TLS modes specified via ServerTLSSettings in Sidecar or Gateway,
// TLS version is configured in the BuildListenerContext.
minTLSVersion := authn_utils.GetMinTLSVersion(mc.GetMeshMTLS().GetMinProtocolVersion())
return MTLSSettings{
Port: endpointPort,
Mode: effectiveMTLSMode,
TCP: authn_utils.BuildInboundTLS(effectiveMTLSMode, node, networking.ListenerProtocolTCP,
trustDomainAliases, minTLSVersion, mc),
HTTP: authn_utils.BuildInboundTLS(effectiveMTLSMode, node, networking.ListenerProtocolHTTP,
trustDomainAliases, minTLSVersion, mc),
}
}
// convertToEnvoyJwtConfig converts a list of JWT rules into Envoy JWT filter config to enforce it.
// Each rule is expected corresponding to one JWT issuer (provider).
// The behavior of the filter should reject all requests with invalid token. On the other hand,
// if no token provided, the request is allowed.
func convertToEnvoyJwtConfig(jwtRules []*v1beta1.JWTRule, push *model.PushContext, useExtendedJwt, clearRouteCache bool) *envoy_jwt.JwtAuthentication {
if len(jwtRules) == 0 {
return nil
}
providers := map[string]*envoy_jwt.JwtProvider{}
// Each element of innerAndList is the requirement for each provider, in the form of
// {provider OR `allow_missing`}
// This list will be ANDed (if have more than one provider) for the final requirement.
innerAndList := []*envoy_jwt.JwtRequirement{}
// This is an (or) list for all providers. This will be OR with the innerAndList above so
// it can pass the requirement in the case that providers share the same location.
outterOrList := []*envoy_jwt.JwtRequirement{}
for i, jwtRule := range jwtRules {
provider := &envoy_jwt.JwtProvider{
Issuer: jwtRule.Issuer,
Audiences: jwtRule.Audiences,
Forward: jwtRule.ForwardOriginalToken,
ForwardPayloadHeader: jwtRule.OutputPayloadToHeader,
PayloadInMetadata: jwtRule.Issuer,
}
if useExtendedJwt {
provider.PayloadInMetadata = filters.EnvoyJwtFilterPayload
provider.NormalizePayloadInMetadata = &envoy_jwt.JwtProvider_NormalizePayload{
SpaceDelimitedClaims: []string{"scope", "permission"},
}
provider.ClearRouteCache = clearRouteCache
}
for _, claimAndHeader := range jwtRule.OutputClaimToHeaders {
provider.ClaimToHeaders = append(provider.ClaimToHeaders, &envoy_jwt.JwtClaimToHeader{
HeaderName: claimAndHeader.Header,
ClaimName: claimAndHeader.Claim,
})
}
for _, location := range jwtRule.FromHeaders {
provider.FromHeaders = append(provider.FromHeaders, &envoy_jwt.JwtHeader{
Name: location.Name,
ValuePrefix: location.Prefix,
})
}
provider.FromParams = jwtRule.FromParams
provider.FromCookies = jwtRule.FromCookies
authnLog.Debugf("JwksFetchMode is set to: %v", features.JwksFetchMode)
// Use Envoy remote jwks if jwksUri is not empty and JwksFetchMode not Istiod. Parse the jwksUri to get the
// cluster name, generate the jwt filter config using remote Jwks.
// If failed to parse the cluster name, only fallback to let istiod to fetch the jwksUri when
// remoteJwksMode is Hybrid.
if features.JwksFetchMode != jwt.Istiod && jwtRule.JwksUri != "" {
jwksInfo, err := security.ParseJwksURI(jwtRule.JwksUri)
if err != nil {
authnLog.Errorf("Failed to parse jwt rule jwks uri %v", err)
}
_, cluster, err := model.LookupCluster(push, jwksInfo.Hostname.String(), jwksInfo.Port)
authnLog.Debugf("Look up cluster result: %v", cluster)
if err == nil && len(cluster) > 0 {
// This is a case of URI pointing to mesh cluster. Setup Remote Jwks and let Envoy fetch the key.
provider.JwksSourceSpecifier = &envoy_jwt.JwtProvider_RemoteJwks{
RemoteJwks: &envoy_jwt.RemoteJwks{
HttpUri: &core.HttpUri{
Uri: jwtRule.JwksUri,
HttpUpstreamType: &core.HttpUri_Cluster{
Cluster: cluster,
},
Timeout: &durationpb.Duration{Seconds: 5},
},
CacheDuration: &durationpb.Duration{Seconds: 5 * 60},
},
}
} else if features.JwksFetchMode == jwt.Hybrid {
provider.JwksSourceSpecifier = push.JwtKeyResolver.BuildLocalJwks(jwtRule.JwksUri, jwtRule.Issuer, "")
} else {
model.IncLookupClusterFailures("jwks")
// Log error and create remote JWKs with fake cluster
authnLog.Errorf("Failed to look up Envoy cluster %v. "+
"Please create ServiceEntry to register external JWKs server or "+
"set PILOT_JWT_ENABLE_REMOTE_JWKS to hybrid/istiod mode.", err)
provider.JwksSourceSpecifier = &envoy_jwt.JwtProvider_RemoteJwks{
RemoteJwks: &envoy_jwt.RemoteJwks{
HttpUri: &core.HttpUri{
Uri: jwtRule.JwksUri,
HttpUpstreamType: &core.HttpUri_Cluster{
Cluster: model.BuildSubsetKey(model.TrafficDirectionOutbound, "", jwksInfo.Hostname, jwksInfo.Port),
},
Timeout: &durationpb.Duration{Seconds: 5},
},
CacheDuration: &durationpb.Duration{Seconds: 5 * 60},
},
}
}
} else {
// Use inline jwks as existing flow, either jwtRule.jwks is empty or let istiod to fetch the jwtRule.jwksUri
provider.JwksSourceSpecifier = push.JwtKeyResolver.BuildLocalJwks(jwtRule.JwksUri, jwtRule.Issuer, jwtRule.Jwks)
}
name := fmt.Sprintf("origins-%d", i)
providers[name] = provider
innerAndList = append(innerAndList, &envoy_jwt.JwtRequirement{
RequiresType: &envoy_jwt.JwtRequirement_RequiresAny{
RequiresAny: &envoy_jwt.JwtRequirementOrList{
Requirements: []*envoy_jwt.JwtRequirement{
{
RequiresType: &envoy_jwt.JwtRequirement_ProviderName{
ProviderName: name,
},
},
{
RequiresType: &envoy_jwt.JwtRequirement_AllowMissing{
AllowMissing: &emptypb.Empty{},
},
},
},
},
},
})
outterOrList = append(outterOrList, &envoy_jwt.JwtRequirement{
RequiresType: &envoy_jwt.JwtRequirement_ProviderName{
ProviderName: name,
},
})
}
// If there is only one provider, simply use an OR of {provider, `allow_missing`}.
if len(innerAndList) == 1 {
return &envoy_jwt.JwtAuthentication{
Rules: []*envoy_jwt.RequirementRule{
{
Match: &route.RouteMatch{
PathSpecifier: &route.RouteMatch_Prefix{
Prefix: "/",
},
},
RequirementType: &envoy_jwt.RequirementRule_Requires{
Requires: innerAndList[0],
},
},
},
Providers: providers,
BypassCorsPreflight: true,
}
}
// If there are more than one provider, filter should OR of
// {P1, P2 .., AND of {OR{P1, allow_missing}, OR{P2, allow_missing} ...}}
// where the innerAnd enforce a token, if provided, must be valid, and the
// outer OR aids the case where providers share the same location (as
// it will always fail with the innerAND).
outterOrList = append(outterOrList, &envoy_jwt.JwtRequirement{
RequiresType: &envoy_jwt.JwtRequirement_RequiresAll{
RequiresAll: &envoy_jwt.JwtRequirementAndList{
Requirements: innerAndList,
},
},
})
return &envoy_jwt.JwtAuthentication{
Rules: []*envoy_jwt.RequirementRule{
{
Match: &route.RouteMatch{
PathSpecifier: &route.RouteMatch_Prefix{
Prefix: "/",
},
},
RequirementType: &envoy_jwt.RequirementRule_Requires{
Requires: &envoy_jwt.JwtRequirement{
RequiresType: &envoy_jwt.JwtRequirement_RequiresAny{
RequiresAny: &envoy_jwt.JwtRequirementOrList{
Requirements: outterOrList,
},
},
},
},
},
},
Providers: providers,
BypassCorsPreflight: true,
}
}
func (a policyApplier) PortLevelSetting() map[uint32]model.MutualTLSMode {
return a.consolidatedPeerPolicy.PerPort
}
func (a policyApplier) GetMutualTLSModeForPort(endpointPort uint32) model.MutualTLSMode {
if portMtls, ok := a.consolidatedPeerPolicy.PerPort[endpointPort]; ok {
return portMtls
}
return a.consolidatedPeerPolicy.Mode
}
type MergedPeerAuthentication struct {
// Mode is the overall mode of policy. May be overridden by PerPort
Mode model.MutualTLSMode
// PerPort is the per-port policy
PerPort map[uint32]model.MutualTLSMode
}
// ComposePeerAuthentication returns the effective PeerAuthentication given the list of applicable
// configs. This list should contains at most 1 mesh-level and 1 namespace-level configs.
// Workload-level configs should not be in root namespace (this should be guaranteed by the caller,
// though they will be safely ignored in this function). If the input config list is empty, returns
// a default policy set to a PERMISSIVE.
// If there is at least one applicable config, returns should not be nil, and is a combined policy
// based on following rules:
// - It should have the setting from the most narrow scope (i.e workload-level is preferred over
// namespace-level, which is preferred over mesh-level).
// - When there are more than one policy in the same scope (i.e workload-level), the oldest one win.
// - UNSET will be replaced with the setting from the parent. I.e UNSET port-level config will be
// replaced with config from workload-level, UNSET in workload-level config will be replaced with
// one in namespace-level and so on.
func ComposePeerAuthentication(rootNamespace string, configs []*config.Config) MergedPeerAuthentication {
var meshCfg, namespaceCfg, workloadCfg *config.Config
// Initial outputPolicy is set to a PERMISSIVE.
outputPolicy := MergedPeerAuthentication{
Mode: model.MTLSPermissive,
}
for _, cfg := range configs {
spec := cfg.Spec.(*v1beta1.PeerAuthentication)
if spec.Selector == nil || len(spec.Selector.MatchLabels) == 0 {
// Namespace-level or mesh-level policy
if cfg.Namespace == rootNamespace {
if meshCfg == nil || cfg.CreationTimestamp.Before(meshCfg.CreationTimestamp) {
authnLog.Debugf("Switch selected mesh policy to %s.%s (%v)", cfg.Name, cfg.Namespace, cfg.CreationTimestamp)
meshCfg = cfg
}
} else {
if namespaceCfg == nil || cfg.CreationTimestamp.Before(namespaceCfg.CreationTimestamp) {
authnLog.Debugf("Switch selected namespace policy to %s.%s (%v)", cfg.Name, cfg.Namespace, cfg.CreationTimestamp)
namespaceCfg = cfg
}
}
} else if cfg.Namespace != rootNamespace {
// Workload-level policy, aka the one with selector and not in root namespace.
if workloadCfg == nil || cfg.CreationTimestamp.Before(workloadCfg.CreationTimestamp) {
authnLog.Debugf("Switch selected workload policy to %s.%s (%v)", cfg.Name, cfg.Namespace, cfg.CreationTimestamp)
workloadCfg = cfg
}
}
}
// Process in mesh, namespace, workload order to resolve inheritance (UNSET)
if meshCfg != nil && !isMtlsModeUnset(meshCfg.Spec.(*v1beta1.PeerAuthentication).Mtls) {
// If mesh policy is defined, update parent policy to mesh policy.
outputPolicy.Mode = model.ConvertToMutualTLSMode(meshCfg.Spec.(*v1beta1.PeerAuthentication).Mtls.Mode)
}
if namespaceCfg != nil && !isMtlsModeUnset(namespaceCfg.Spec.(*v1beta1.PeerAuthentication).Mtls) {
// If namespace policy is defined, update output policy to namespace policy. This means namespace
// policy overwrite mesh policy.
outputPolicy.Mode = model.ConvertToMutualTLSMode(namespaceCfg.Spec.(*v1beta1.PeerAuthentication).Mtls.Mode)
}
var workloadPolicy *v1beta1.PeerAuthentication
if workloadCfg != nil {
workloadPolicy = workloadCfg.Spec.(*v1beta1.PeerAuthentication)
}
if workloadPolicy != nil && !isMtlsModeUnset(workloadPolicy.Mtls) {
// If workload policy is defined, update parent policy to workload policy.
outputPolicy.Mode = model.ConvertToMutualTLSMode(workloadPolicy.Mtls.Mode)
}
if workloadPolicy != nil && workloadPolicy.PortLevelMtls != nil {
outputPolicy.PerPort = make(map[uint32]model.MutualTLSMode, len(workloadPolicy.PortLevelMtls))
for port, mtls := range workloadPolicy.PortLevelMtls {
if isMtlsModeUnset(mtls) {
// Inherit from workload level.
outputPolicy.PerPort[port] = outputPolicy.Mode
} else {
outputPolicy.PerPort[port] = model.ConvertToMutualTLSMode(mtls.Mode)
}
}
}
return outputPolicy
}
func isMtlsModeUnset(mtls *v1beta1.PeerAuthentication_MutualTLS) bool {
return mtls == nil || mtls.Mode == v1beta1.PeerAuthentication_MutualTLS_UNSET
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/networking/util"
authn_model "istio.io/istio/pilot/pkg/security/model"
protovalue "istio.io/istio/pkg/proto"
)
// SupportedCiphers for server side TLS configuration.
var SupportedCiphers = []string{
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"AES256-GCM-SHA384",
"AES128-GCM-SHA256",
}
// BuildInboundTLS returns the TLS context corresponding to the mTLS mode.
func BuildInboundTLS(mTLSMode model.MutualTLSMode, node *model.Proxy,
protocol networking.ListenerProtocol, trustDomainAliases []string, minTLSVersion tls.TlsParameters_TlsProtocol,
mc *meshconfig.MeshConfig,
) *tls.DownstreamTlsContext {
if mTLSMode == model.MTLSDisable || mTLSMode == model.MTLSUnknown {
return nil
}
ctx := &tls.DownstreamTlsContext{
CommonTlsContext: &tls.CommonTlsContext{},
RequireClientCertificate: protovalue.BoolTrue,
}
if protocol == networking.ListenerProtocolTCP && features.MetadataExchange {
// For TCP with mTLS, we advertise "istio-peer-exchange" from client and
// expect the same from server. This is so that secure metadata exchange
// transfer can take place between sidecars for TCP with mTLS.
if features.DisableMxALPN {
ctx.CommonTlsContext.AlpnProtocols = util.ALPNDownstream
} else {
ctx.CommonTlsContext.AlpnProtocols = util.ALPNDownstreamWithMxc
}
} else {
// Note that in the PERMISSIVE mode, we match filter chain on "istio" ALPN,
// which is used to differentiate between service mesh and legacy traffic.
//
// Client sidecar outbound cluster's TLSContext.ALPN must include "istio".
//
// Server sidecar filter chain's FilterChainMatch.ApplicationProtocols must
// include "istio" for the secure traffic, but its TLSContext.ALPN must not
// include "istio", which would interfere with negotiation of the underlying
// protocol, e.g. HTTP/2.
ctx.CommonTlsContext.AlpnProtocols = util.ALPNHttp
}
ciphers := SupportedCiphers
if mc != nil && mc.MeshMTLS != nil && mc.MeshMTLS.CipherSuites != nil {
ciphers = mc.MeshMTLS.CipherSuites
}
// Set Minimum TLS version to match the default client version and allowed strong cipher suites for sidecars.
ctx.CommonTlsContext.TlsParams = &tls.TlsParameters{
CipherSuites: ciphers,
TlsMinimumProtocolVersion: minTLSVersion,
TlsMaximumProtocolVersion: tls.TlsParameters_TLSv1_3,
}
authn_model.ApplyToCommonTLSContext(ctx.CommonTlsContext, node, []string{}, /*subjectAltNames*/
trustDomainAliases, ctx.RequireClientCertificate.Value)
return ctx
}
// GetMinTLSVersion returns the minimum TLS version for workloads based on the mesh config.
func GetMinTLSVersion(ver meshconfig.MeshConfig_TLSConfig_TLSProtocol) tls.TlsParameters_TlsProtocol {
switch ver {
case meshconfig.MeshConfig_TLSConfig_TLSV1_3:
return tls.TlsParameters_TLSv1_3
default:
return tls.TlsParameters_TLSv1_2
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package builder
import (
"fmt"
"strconv"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3"
rbachttp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
rbactcp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/rbac/v3"
"github.com/hashicorp/go-multierror"
"istio.io/api/annotation"
"istio.io/istio/pilot/pkg/model"
authzmodel "istio.io/istio/pilot/pkg/security/authz/model"
"istio.io/istio/pilot/pkg/security/trustdomain"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/wellknown"
)
var rbacPolicyMatchNever = &rbacpb.Policy{
Permissions: []*rbacpb.Permission{{Rule: &rbacpb.Permission_NotRule{
NotRule: &rbacpb.Permission{Rule: &rbacpb.Permission_Any{Any: true}},
}}},
Principals: []*rbacpb.Principal{{Identifier: &rbacpb.Principal_NotId{
NotId: &rbacpb.Principal{Identifier: &rbacpb.Principal_Any{Any: true}},
}}},
}
// General setting to control behavior
type Option struct {
IsCustomBuilder bool
UseFilterState bool
UseExtendedJwt bool
}
// Builder builds Istio authorization policy to Envoy filters.
type Builder struct {
trustDomainBundle trustdomain.Bundle
option Option
// populated when building for CUSTOM action.
customPolicies []model.AuthorizationPolicy
extensions map[string]*builtExtAuthz
// populated when building for ALLOW/DENY/AUDIT action.
denyPolicies []model.AuthorizationPolicy
allowPolicies []model.AuthorizationPolicy
auditPolicies []model.AuthorizationPolicy
// logger emits logs about policies
logger *AuthzLogger
}
// New returns a new builder for the given workload with the authorization policy.
// Returns nil if none of the authorization policies are enabled for the workload.
func New(trustDomainBundle trustdomain.Bundle, push *model.PushContext, policies model.AuthorizationPoliciesResult, option Option) *Builder {
if option.IsCustomBuilder {
if len(policies.Custom) == 0 {
return nil
}
return &Builder{
customPolicies: policies.Custom,
extensions: processExtensionProvider(push),
trustDomainBundle: trustDomainBundle,
option: option,
}
}
if len(policies.Deny) == 0 && len(policies.Allow) == 0 && len(policies.Audit) == 0 {
return nil
}
return &Builder{
denyPolicies: policies.Deny,
allowPolicies: policies.Allow,
auditPolicies: policies.Audit,
trustDomainBundle: trustDomainBundle,
option: option,
}
}
// BuildHTTP returns the HTTP filters built from the authorization policy.
func (b Builder) BuildHTTP() []*hcm.HttpFilter {
b.logger = &AuthzLogger{}
defer b.logger.Report()
if b.option.IsCustomBuilder {
// Use the DENY action so that a HTTP rule is properly handled when generating for TCP filter chain.
if configs := b.build(b.customPolicies, rbacpb.RBAC_DENY, false); configs != nil {
b.logger.AppendDebugf("built %d HTTP filters for CUSTOM action", len(configs.http))
return configs.http
}
return nil
}
var filters []*hcm.HttpFilter
if configs := b.build(b.auditPolicies, rbacpb.RBAC_LOG, false); configs != nil {
b.logger.AppendDebugf("built %d HTTP filters for AUDIT action", len(configs.http))
filters = append(filters, configs.http...)
}
if configs := b.build(b.denyPolicies, rbacpb.RBAC_DENY, false); configs != nil {
b.logger.AppendDebugf("built %d HTTP filters for DENY action", len(configs.http))
filters = append(filters, configs.http...)
}
if configs := b.build(b.allowPolicies, rbacpb.RBAC_ALLOW, false); configs != nil {
b.logger.AppendDebugf("built %d HTTP filters for ALLOW action", len(configs.http))
filters = append(filters, configs.http...)
}
return filters
}
// BuildTCP returns the TCP filters built from the authorization policy.
func (b Builder) BuildTCP() []*listener.Filter {
b.logger = &AuthzLogger{}
defer b.logger.Report()
if b.option.IsCustomBuilder {
if configs := b.build(b.customPolicies, rbacpb.RBAC_DENY, true); configs != nil {
b.logger.AppendDebugf("built %d TCP filters for CUSTOM action", len(configs.tcp))
return configs.tcp
}
return nil
}
var filters []*listener.Filter
if configs := b.build(b.auditPolicies, rbacpb.RBAC_LOG, true); configs != nil {
b.logger.AppendDebugf("built %d TCP filters for AUDIT action", len(configs.tcp))
filters = append(filters, configs.tcp...)
}
if configs := b.build(b.denyPolicies, rbacpb.RBAC_DENY, true); configs != nil {
b.logger.AppendDebugf("built %d TCP filters for DENY action", len(configs.tcp))
filters = append(filters, configs.tcp...)
}
if configs := b.build(b.allowPolicies, rbacpb.RBAC_ALLOW, true); configs != nil {
b.logger.AppendDebugf("built %d TCP filters for ALLOW action", len(configs.tcp))
filters = append(filters, configs.tcp...)
}
return filters
}
type builtConfigs struct {
http []*hcm.HttpFilter
tcp []*listener.Filter
}
func (b Builder) isDryRun(policy model.AuthorizationPolicy) bool {
dryRun := false
if val, ok := policy.Annotations[annotation.IoIstioDryRun.Name]; ok {
var err error
dryRun, err = strconv.ParseBool(val)
if err != nil {
b.logger.AppendError(fmt.Errorf("failed to parse the value of %s: %v", annotation.IoIstioDryRun.Name, err))
}
}
return dryRun
}
func shadowRuleStatPrefix(rule *rbacpb.RBAC) string {
switch rule.GetAction() {
case rbacpb.RBAC_ALLOW:
return authzmodel.RBACShadowRulesAllowStatPrefix
case rbacpb.RBAC_DENY:
return authzmodel.RBACShadowRulesDenyStatPrefix
default:
return ""
}
}
func (b Builder) build(policies []model.AuthorizationPolicy, action rbacpb.RBAC_Action, forTCP bool) *builtConfigs {
if len(policies) == 0 {
return nil
}
enforceRules := &rbacpb.RBAC{
Action: action,
Policies: map[string]*rbacpb.Policy{},
}
shadowRules := &rbacpb.RBAC{
Action: action,
Policies: map[string]*rbacpb.Policy{},
}
var providers []string
filterType := "HTTP"
if forTCP {
filterType = "TCP"
}
hasEnforcePolicy, hasDryRunPolicy := false, false
for _, policy := range policies {
var currentRule *rbacpb.RBAC
if b.isDryRun(policy) {
currentRule = shadowRules
hasDryRunPolicy = true
} else {
currentRule = enforceRules
hasEnforcePolicy = true
}
if b.option.IsCustomBuilder {
providers = append(providers, policy.Spec.GetProvider().GetName())
}
for i, rule := range policy.Spec.Rules {
// The name will later be used by ext_authz filter to get the evaluation result from dynamic metadata.
name := policyName(policy.Namespace, policy.Name, i, b.option)
if rule == nil {
b.logger.AppendError(fmt.Errorf("skipped nil rule %s", name))
continue
}
m, err := authzmodel.New(rule, b.option.UseExtendedJwt)
if err != nil {
b.logger.AppendError(multierror.Prefix(err, fmt.Sprintf("skipped invalid rule %s:", name)))
continue
}
m.MigrateTrustDomain(b.trustDomainBundle)
if len(b.trustDomainBundle.TrustDomains) > 1 {
b.logger.AppendDebugf("patched source principal with trust domain aliases %v", b.trustDomainBundle.TrustDomains)
}
generated, err := m.Generate(forTCP, !b.option.UseFilterState, action)
if err != nil {
b.logger.AppendDebugf("skipped rule %s on TCP filter chain: %v", name, err)
continue
}
if generated != nil {
currentRule.Policies[name] = generated
b.logger.AppendDebugf("generated config from rule %s on %s filter chain successfully", name, filterType)
}
}
if len(policy.Spec.Rules) == 0 {
// Generate an explicit policy that never matches.
name := policyName(policy.Namespace, policy.Name, 0, b.option)
b.logger.AppendDebugf("generated config from policy %s on %s filter chain successfully", name, filterType)
currentRule.Policies[name] = rbacPolicyMatchNever
}
}
if !hasEnforcePolicy {
enforceRules = nil
}
if !hasDryRunPolicy {
shadowRules = nil
}
if forTCP {
return &builtConfigs{tcp: b.buildTCP(enforceRules, shadowRules, providers)}
}
return &builtConfigs{http: b.buildHTTP(enforceRules, shadowRules, providers)}
}
func (b Builder) buildHTTP(rules *rbacpb.RBAC, shadowRules *rbacpb.RBAC, providers []string) []*hcm.HttpFilter {
if !b.option.IsCustomBuilder {
rbac := &rbachttp.RBAC{
Rules: rules,
ShadowRules: shadowRules,
ShadowRulesStatPrefix: shadowRuleStatPrefix(shadowRules),
}
return []*hcm.HttpFilter{
{
Name: wellknown.HTTPRoleBasedAccessControl,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: protoconv.MessageToAny(rbac)},
},
}
}
extauthz, err := getExtAuthz(b.extensions, providers)
if err != nil {
b.logger.AppendError(multierror.Prefix(err, "failed to process CUSTOM action, will generate deny configs for the specified rules:"))
rbac := &rbachttp.RBAC{Rules: getBadCustomDenyRules(rules)}
return []*hcm.HttpFilter{
{
Name: wellknown.HTTPRoleBasedAccessControl,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: protoconv.MessageToAny(rbac)},
},
}
}
// Add the RBAC filter in shadow mode so that it only evaluates the matching rules for CUSTOM action but not enforce it.
// The evaluation result is stored in the dynamic metadata keyed by the policy name. And then the ext_authz filter
// can utilize these metadata to trigger the enforcement conditionally.
// See https://docs.google.com/document/d/1V4mCQCw7mlGp0zSQQXYoBdbKMDnkPOjeyUb85U07iSI/edit#bookmark=kix.jdq8u0an2r6s
// for more details.
rbac := &rbachttp.RBAC{
ShadowRules: rules,
ShadowRulesStatPrefix: authzmodel.RBACExtAuthzShadowRulesStatPrefix,
}
return []*hcm.HttpFilter{
{
Name: wellknown.HTTPRoleBasedAccessControl,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: protoconv.MessageToAny(rbac)},
},
{
Name: wellknown.HTTPExternalAuthorization,
ConfigType: &hcm.HttpFilter_TypedConfig{TypedConfig: protoconv.MessageToAny(extauthz.http)},
},
}
}
func (b Builder) buildTCP(rules *rbacpb.RBAC, shadowRules *rbacpb.RBAC, providers []string) []*listener.Filter {
if !b.option.IsCustomBuilder {
rbac := &rbactcp.RBAC{
Rules: rules,
StatPrefix: authzmodel.RBACTCPFilterStatPrefix,
ShadowRules: shadowRules,
ShadowRulesStatPrefix: shadowRuleStatPrefix(shadowRules),
}
return []*listener.Filter{
{
Name: wellknown.RoleBasedAccessControl,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(rbac)},
},
}
}
extauthz, err := getExtAuthz(b.extensions, providers)
if err != nil {
b.logger.AppendError(multierror.Prefix(err, "failed to process CUSTOM action, will generate deny configs for the specified rules:"))
rbac := &rbactcp.RBAC{
Rules: getBadCustomDenyRules(rules),
StatPrefix: authzmodel.RBACTCPFilterStatPrefix,
}
return []*listener.Filter{
{
Name: wellknown.RoleBasedAccessControl,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(rbac)},
},
}
} else if extauthz.tcp == nil {
b.logger.AppendDebugf("ignored CUSTOM action with HTTP provider on TCP filter chain")
return nil
}
rbac := &rbactcp.RBAC{
ShadowRules: rules,
StatPrefix: authzmodel.RBACTCPFilterStatPrefix,
ShadowRulesStatPrefix: authzmodel.RBACExtAuthzShadowRulesStatPrefix,
}
return []*listener.Filter{
{
Name: wellknown.RoleBasedAccessControl,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(rbac)},
},
{
Name: wellknown.ExternalAuthorization,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: protoconv.MessageToAny(extauthz.tcp)},
},
}
}
func getBadCustomDenyRules(rules *rbacpb.RBAC) *rbacpb.RBAC {
rbac := &rbacpb.RBAC{
Action: rbacpb.RBAC_DENY,
Policies: map[string]*rbacpb.Policy{},
}
for _, key := range maps.Keys(rules.Policies) {
rbac.Policies[key+badCustomActionSuffix] = rules.Policies[key]
}
return rbac
}
func policyName(namespace, name string, rule int, option Option) string {
prefix := ""
if option.IsCustomBuilder {
prefix = extAuthzMatchPrefix + "-"
}
return fmt.Sprintf("%sns[%s]-policy[%s]-rule[%d]", prefix, namespace, name, rule)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package builder
import (
"fmt"
"net/url"
"sort"
"strconv"
"strings"
"github.com/davecgh/go-spew/spew"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
extauthzhttp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/ext_authz/v3"
extauthztcp "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/ext_authz/v3"
envoy_type_matcher_v3 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
envoytypev3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
"github.com/hashicorp/go-multierror"
"google.golang.org/protobuf/types/known/durationpb"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/model"
authzmodel "istio.io/istio/pilot/pkg/security/authz/model"
"istio.io/istio/pkg/config/validation"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/wellknown"
)
const (
extAuthzMatchPrefix = "istio-ext-authz"
badCustomActionSuffix = `-deny-due-to-bad-CUSTOM-action`
)
var supportedStatus = func() []int {
var supported []int
for code := range envoytypev3.StatusCode_name {
supported = append(supported, int(code))
}
sort.Ints(supported)
return supported
}()
type builtExtAuthz struct {
http *extauthzhttp.ExtAuthz
tcp *extauthztcp.ExtAuthz
err error
}
func processExtensionProvider(push *model.PushContext) map[string]*builtExtAuthz {
resolved := map[string]*builtExtAuthz{}
for i, config := range push.Mesh.ExtensionProviders {
var errs error
if config.Name == "" {
errs = multierror.Append(errs, fmt.Errorf("extension provider name must not be empty, found empty at index: %d", i))
} else if _, found := resolved[config.Name]; found {
errs = multierror.Append(errs, fmt.Errorf("extension provider name must be unique, found duplicate: %s", config.Name))
}
var parsed *builtExtAuthz
var err error
// TODO(yangminzhu): Refactor and cache the ext_authz config.
switch p := config.Provider.(type) {
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyExtAuthzHttp:
if err = validation.ValidateExtensionProviderEnvoyExtAuthzHTTP(p.EnvoyExtAuthzHttp); err == nil {
parsed, err = buildExtAuthzHTTP(push, p.EnvoyExtAuthzHttp)
}
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyExtAuthzGrpc:
if err = validation.ValidateExtensionProviderEnvoyExtAuthzGRPC(p.EnvoyExtAuthzGrpc); err == nil {
parsed, err = buildExtAuthzGRPC(push, p.EnvoyExtAuthzGrpc)
}
default:
continue
}
if err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, fmt.Sprintf("failed to parse extension provider %q:", config.Name)))
}
if parsed == nil {
parsed = &builtExtAuthz{}
}
parsed.err = errs
resolved[config.Name] = parsed
}
if authzLog.DebugEnabled() {
authzLog.Debugf("Resolved extension providers: %v", spew.Sdump(resolved))
}
return resolved
}
func notAllTheSame(names []string) bool {
for i := 1; i < len(names); i++ {
if names[i-1] != names[i] {
return true
}
}
return false
}
func getExtAuthz(resolved map[string]*builtExtAuthz, providers []string) (*builtExtAuthz, error) {
if resolved == nil {
return nil, fmt.Errorf("extension provider is either invalid or undefined")
}
if len(providers) < 1 {
return nil, fmt.Errorf("no provider specified in authorization policy")
}
if notAllTheSame(providers) {
return nil, fmt.Errorf("only 1 provider can be used per workload, found multiple providers: %v", providers)
}
provider := providers[0]
ret, found := resolved[provider]
if !found {
var li []string
for p := range resolved {
li = append(li, p)
}
return nil, fmt.Errorf("available providers are %v but found %q", li, provider)
} else if ret.err != nil {
return nil, fmt.Errorf("found errors in provider %s: %v", provider, ret.err)
}
return ret, nil
}
func buildExtAuthzHTTP(push *model.PushContext,
config *meshconfig.MeshConfig_ExtensionProvider_EnvoyExternalAuthorizationHttpProvider,
) (*builtExtAuthz, error) {
var errs error
port, err := parsePort(config.Port)
if err != nil {
errs = multierror.Append(errs, err)
}
hostname, cluster, err := model.LookupCluster(push, config.Service, port)
if err != nil {
model.IncLookupClusterFailures("authz")
errs = multierror.Append(errs, err)
}
status, err := parseStatusOnError(config.StatusOnError)
if err != nil {
errs = multierror.Append(errs, err)
}
if config.PathPrefix != "" {
if _, err := url.Parse(config.PathPrefix); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, fmt.Sprintf("invalid pathPrefix %q:", config.PathPrefix)))
}
if !strings.HasPrefix(config.PathPrefix, "/") {
errs = multierror.Append(errs, fmt.Errorf("pathPrefix must begin with `/`, found: %q", config.PathPrefix))
}
}
checkWildcard := func(field string, values []string) {
for _, val := range values {
if val == "*" {
errs = multierror.Append(errs, fmt.Errorf("a single wildcard (\"*\") is not supported, change it to either prefix or suffix match: %s", field))
}
}
}
checkWildcard("IncludeRequestHeadersInCheck", config.IncludeRequestHeadersInCheck)
//nolint: staticcheck
checkWildcard("IncludeHeadersInCheck", config.IncludeHeadersInCheck)
checkWildcard("HeadersToDownstreamOnDeny", config.HeadersToDownstreamOnDeny)
checkWildcard("HeadersToDownstreamOnAllow", config.HeadersToDownstreamOnAllow)
checkWildcard("HeadersToUpstreamOnAllow", config.HeadersToUpstreamOnAllow)
if errs != nil {
return nil, errs
}
return generateHTTPConfig(hostname, cluster, status, config), nil
}
func buildExtAuthzGRPC(push *model.PushContext,
config *meshconfig.MeshConfig_ExtensionProvider_EnvoyExternalAuthorizationGrpcProvider,
) (*builtExtAuthz, error) {
var errs error
port, err := parsePort(config.Port)
if err != nil {
errs = multierror.Append(errs, err)
}
hostname, cluster, err := model.LookupCluster(push, config.Service, port)
if err != nil {
errs = multierror.Append(errs, err)
}
status, err := parseStatusOnError(config.StatusOnError)
if err != nil {
errs = multierror.Append(errs, err)
}
if errs != nil {
return nil, errs
}
return generateGRPCConfig(cluster, hostname, config, status), nil
}
func parsePort(port uint32) (int, error) {
if 1 <= port && port <= 65535 {
return int(port), nil
}
return 0, fmt.Errorf("port must be in the range [1, 65535], found: %d", port)
}
func parseStatusOnError(status string) (*envoytypev3.HttpStatus, error) {
if status == "" {
return nil, nil
}
code, err := strconv.ParseInt(status, 10, 32)
if err != nil {
return nil, multierror.Prefix(err, fmt.Sprintf("invalid statusOnError %q:", status))
}
if _, found := envoytypev3.StatusCode_name[int32(code)]; !found {
return nil, fmt.Errorf("unsupported statusOnError %s, supported values: %v", status, supportedStatus)
}
return &envoytypev3.HttpStatus{Code: envoytypev3.StatusCode(code)}, nil
}
func generateHTTPConfig(hostname, cluster string, status *envoytypev3.HttpStatus,
config *meshconfig.MeshConfig_ExtensionProvider_EnvoyExternalAuthorizationHttpProvider,
) *builtExtAuthz {
service := &extauthzhttp.HttpService{
PathPrefix: config.PathPrefix,
ServerUri: &core.HttpUri{
// Timeout is required.
Timeout: timeoutOrDefault(config.Timeout),
// Uri is required but actually not used in the ext_authz filter.
Uri: fmt.Sprintf("http://%s", hostname),
HttpUpstreamType: &core.HttpUri_Cluster{
Cluster: cluster,
},
},
}
allowedHeaders := generateHeaders(config.IncludeRequestHeadersInCheck)
if allowedHeaders == nil {
// IncludeHeadersInCheck is deprecated, only use it if IncludeRequestHeadersInCheck is not set.
// TODO: Remove the IncludeHeadersInCheck field before promoting to beta.
//nolint: staticcheck
allowedHeaders = generateHeaders(config.IncludeHeadersInCheck)
}
var headersToAdd []*core.HeaderValue
additionalHeaders := maps.Keys(config.IncludeAdditionalHeadersInCheck)
sort.Strings(additionalHeaders)
for _, k := range additionalHeaders {
headersToAdd = append(headersToAdd, &core.HeaderValue{
Key: k,
Value: config.IncludeAdditionalHeadersInCheck[k],
})
}
if len(headersToAdd) != 0 {
service.AuthorizationRequest = &extauthzhttp.AuthorizationRequest{
HeadersToAdd: headersToAdd,
}
}
if len(config.HeadersToUpstreamOnAllow) > 0 || len(config.HeadersToDownstreamOnDeny) > 0 ||
len(config.HeadersToDownstreamOnAllow) > 0 {
service.AuthorizationResponse = &extauthzhttp.AuthorizationResponse{
AllowedUpstreamHeaders: generateHeaders(config.HeadersToUpstreamOnAllow),
AllowedClientHeaders: generateHeaders(config.HeadersToDownstreamOnDeny),
AllowedClientHeadersOnSuccess: generateHeaders(config.HeadersToDownstreamOnAllow),
}
}
http := &extauthzhttp.ExtAuthz{
StatusOnError: status,
FailureModeAllow: config.FailOpen,
TransportApiVersion: core.ApiVersion_V3,
Services: &extauthzhttp.ExtAuthz_HttpService{
HttpService: service,
},
FilterEnabledMetadata: generateFilterMatcher(wellknown.HTTPRoleBasedAccessControl),
WithRequestBody: withBodyRequest(config.IncludeRequestBodyInCheck),
}
if allowedHeaders != nil {
http.AllowedHeaders = allowedHeaders
}
return &builtExtAuthz{http: http}
}
func generateGRPCConfig(
cluster string,
hostname string,
config *meshconfig.MeshConfig_ExtensionProvider_EnvoyExternalAuthorizationGrpcProvider,
status *envoytypev3.HttpStatus,
) *builtExtAuthz {
grpc := &core.GrpcService{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{
ClusterName: cluster,
Authority: hostname,
},
},
Timeout: timeoutOrDefault(config.Timeout),
}
http := &extauthzhttp.ExtAuthz{
StatusOnError: status,
FailureModeAllow: config.FailOpen,
Services: &extauthzhttp.ExtAuthz_GrpcService{
GrpcService: grpc,
},
FilterEnabledMetadata: generateFilterMatcher(wellknown.HTTPRoleBasedAccessControl),
TransportApiVersion: core.ApiVersion_V3,
WithRequestBody: withBodyRequest(config.IncludeRequestBodyInCheck),
}
tcp := &extauthztcp.ExtAuthz{
StatPrefix: "tcp.",
FailureModeAllow: config.FailOpen,
TransportApiVersion: core.ApiVersion_V3,
GrpcService: grpc,
FilterEnabledMetadata: generateFilterMatcher(wellknown.RoleBasedAccessControl),
}
return &builtExtAuthz{http: http, tcp: tcp}
}
func generateHeaders(headers []string) *envoy_type_matcher_v3.ListStringMatcher {
if len(headers) == 0 {
return nil
}
var patterns []*envoy_type_matcher_v3.StringMatcher
for _, header := range headers {
pattern := &envoy_type_matcher_v3.StringMatcher{
IgnoreCase: true,
}
if strings.HasPrefix(header, "*") {
pattern.MatchPattern = &envoy_type_matcher_v3.StringMatcher_Suffix{
Suffix: strings.TrimPrefix(header, "*"),
}
} else if strings.HasSuffix(header, "*") {
pattern.MatchPattern = &envoy_type_matcher_v3.StringMatcher_Prefix{
Prefix: strings.TrimSuffix(header, "*"),
}
} else {
pattern.MatchPattern = &envoy_type_matcher_v3.StringMatcher_Exact{
Exact: header,
}
}
patterns = append(patterns, pattern)
}
return &envoy_type_matcher_v3.ListStringMatcher{Patterns: patterns}
}
func generateFilterMatcher(name string) *envoy_type_matcher_v3.MetadataMatcher {
return &envoy_type_matcher_v3.MetadataMatcher{
Filter: name,
Path: []*envoy_type_matcher_v3.MetadataMatcher_PathSegment{
{
Segment: &envoy_type_matcher_v3.MetadataMatcher_PathSegment_Key{
Key: authzmodel.RBACExtAuthzShadowRulesStatPrefix + authzmodel.RBACShadowEffectivePolicyID,
},
},
},
Value: &envoy_type_matcher_v3.ValueMatcher{
MatchPattern: &envoy_type_matcher_v3.ValueMatcher_StringMatch{
StringMatch: &envoy_type_matcher_v3.StringMatcher{
MatchPattern: &envoy_type_matcher_v3.StringMatcher_Prefix{
Prefix: extAuthzMatchPrefix,
},
},
},
},
}
}
func timeoutOrDefault(t *durationpb.Duration) *durationpb.Duration {
if t == nil {
// Default timeout is 600s.
return &durationpb.Duration{Seconds: 600}
}
return t
}
func withBodyRequest(config *meshconfig.MeshConfig_ExtensionProvider_EnvoyExternalAuthorizationRequestBody) *extauthzhttp.BufferSettings {
if config == nil {
return nil
}
return &extauthzhttp.BufferSettings{
MaxRequestBytes: config.MaxRequestBytes,
AllowPartialMessage: config.AllowPartialMessage,
PackAsBytes: config.PackAsBytes,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package builder
import (
"fmt"
"strings"
"github.com/hashicorp/go-multierror"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/istiomultierror"
)
var authzLog = log.RegisterScope("authorization", "Istio Authorization Policy")
type AuthzLogger struct {
debugMsg []string
errMsg *multierror.Error
}
func (al *AuthzLogger) AppendDebugf(format string, args ...any) {
al.debugMsg = append(al.debugMsg, fmt.Sprintf(format, args...))
}
func (al *AuthzLogger) AppendError(err error) {
al.errMsg = multierror.Append(al.errMsg, err)
}
func (al *AuthzLogger) Report() {
if al.errMsg != nil {
al.errMsg.ErrorFormat = istiomultierror.MultiErrorFormat()
authzLog.Errorf("Processed authorization policy: %s", al.errMsg)
}
if authzLog.DebugEnabled() && len(al.debugMsg) != 0 {
out := strings.Join(al.debugMsg, "\n\t* ")
authzLog.Debugf("Processed authorization policy with details:\n\t* %v", out)
} else {
authzLog.Debugf("Processed authorization policy")
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package matcher
import (
"regexp"
"strings"
routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
)
// HeaderMatcher converts a key, value string pair to a corresponding HeaderMatcher.
func HeaderMatcher(k, v string) *routepb.HeaderMatcher {
// We must check "*" first to make sure we'll generate a non empty value in the prefix/suffix case.
// Empty prefix/suffix value is invalid in HeaderMatcher.
if v == "*" {
return &routepb.HeaderMatcher{
Name: k,
HeaderMatchSpecifier: &routepb.HeaderMatcher_PresentMatch{
PresentMatch: true,
},
}
} else if strings.HasPrefix(v, "*") {
return &routepb.HeaderMatcher{
Name: k,
HeaderMatchSpecifier: &routepb.HeaderMatcher_StringMatch{
StringMatch: StringMatcherSuffix(v[1:], false),
},
}
} else if strings.HasSuffix(v, "*") {
return &routepb.HeaderMatcher{
Name: k,
HeaderMatchSpecifier: &routepb.HeaderMatcher_StringMatch{
StringMatch: StringMatcherPrefix(v[:len(v)-1], false),
},
}
}
return &routepb.HeaderMatcher{
Name: k,
HeaderMatchSpecifier: &routepb.HeaderMatcher_StringMatch{
StringMatch: StringMatcherExact(v, false),
},
}
}
// HostMatcherWithRegex creates a host matcher for a host using regex for proxies before 1.11.
func HostMatcherWithRegex(k, v string) *routepb.HeaderMatcher {
var regex string
if v == "*" {
return &routepb.HeaderMatcher{
Name: k,
HeaderMatchSpecifier: &routepb.HeaderMatcher_PresentMatch{
PresentMatch: true,
},
}
} else if strings.HasPrefix(v, "*") {
regex = `.*` + regexp.QuoteMeta(v[1:])
} else if strings.HasSuffix(v, "*") {
regex = regexp.QuoteMeta(v[:len(v)-1]) + `.*`
} else {
regex = regexp.QuoteMeta(v)
}
return &routepb.HeaderMatcher{
Name: k,
HeaderMatchSpecifier: &routepb.HeaderMatcher_StringMatch{
StringMatch: StringMatcherRegex(`(?i)` + regex),
},
}
}
// HostMatcher creates a host matcher for a host.
func HostMatcher(k, v string) *routepb.HeaderMatcher {
// We must check "*" first to make sure we'll generate a non empty value in the prefix/suffix case.
// Empty prefix/suffix value is invalid in HeaderMatcher.
if v == "*" {
return &routepb.HeaderMatcher{
Name: k,
HeaderMatchSpecifier: &routepb.HeaderMatcher_PresentMatch{
PresentMatch: true,
},
}
} else if strings.HasPrefix(v, "*") {
return &routepb.HeaderMatcher{
Name: k,
HeaderMatchSpecifier: &routepb.HeaderMatcher_StringMatch{
StringMatch: StringMatcherSuffix(v[1:], true),
},
}
} else if strings.HasSuffix(v, "*") {
return &routepb.HeaderMatcher{
Name: k,
HeaderMatchSpecifier: &routepb.HeaderMatcher_StringMatch{
StringMatch: StringMatcherPrefix(v[:len(v)-1], true),
},
}
}
return &routepb.HeaderMatcher{
Name: k,
HeaderMatchSpecifier: &routepb.HeaderMatcher_StringMatch{
StringMatch: StringMatcherExact(v, true),
},
}
}
// PathMatcher creates a path matcher for a path.
func PathMatcher(path string) *matcher.PathMatcher {
return &matcher.PathMatcher{
Rule: &matcher.PathMatcher_Path{
Path: StringMatcher(path),
},
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package matcher
import (
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
)
// MetadataStringMatcher creates a metadata string matcher for the given filter, key and the
// string matcher.
func MetadataStringMatcher(filter, key string, m *matcher.StringMatcher) *matcher.MetadataMatcher {
return &matcher.MetadataMatcher{
Filter: filter,
Path: []*matcher.MetadataMatcher_PathSegment{
{
Segment: &matcher.MetadataMatcher_PathSegment_Key{
Key: key,
},
},
},
Value: &matcher.ValueMatcher{
MatchPattern: &matcher.ValueMatcher_StringMatch{
StringMatch: m,
},
},
}
}
// MetadataListMatcher creates a metadata list matcher for the given path keys and value.
func MetadataListMatcher(filter string, keys []string, value *matcher.StringMatcher, useExtendedJwt bool) *matcher.MetadataMatcher {
listMatcher := &matcher.ListMatcher{
MatchPattern: &matcher.ListMatcher_OneOf{
OneOf: &matcher.ValueMatcher{
MatchPattern: &matcher.ValueMatcher_StringMatch{
StringMatch: value,
},
},
},
}
paths := make([]*matcher.MetadataMatcher_PathSegment, 0, len(keys))
for _, k := range keys {
paths = append(paths, &matcher.MetadataMatcher_PathSegment{
Segment: &matcher.MetadataMatcher_PathSegment_Key{
Key: k,
},
})
}
out := &matcher.MetadataMatcher{
Filter: filter,
Path: paths,
}
if useExtendedJwt {
out.Value = &matcher.ValueMatcher{
MatchPattern: &matcher.ValueMatcher_OrMatch{
OrMatch: &matcher.OrMatcher{
ValueMatchers: []*matcher.ValueMatcher{
{
MatchPattern: &matcher.ValueMatcher_ListMatch{
ListMatch: listMatcher,
},
},
{
MatchPattern: &matcher.ValueMatcher_StringMatch{
StringMatch: value,
},
},
},
},
},
}
} else {
out.Value = &matcher.ValueMatcher{
MatchPattern: &matcher.ValueMatcher_ListMatch{
ListMatch: listMatcher,
},
}
}
return out
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package matcher
import (
"strings"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
)
// StringMatcher creates a string matcher for v.
func StringMatcher(v string) *matcher.StringMatcher {
return StringMatcherWithPrefix(v, "")
}
// StringMatcherRegex creates a regex string matcher for regex.
func StringMatcherRegex(regex string) *matcher.StringMatcher {
return &matcher.StringMatcher{
MatchPattern: &matcher.StringMatcher_SafeRegex{
SafeRegex: &matcher.RegexMatcher{
Regex: regex,
},
},
}
}
// StringMatcherPrefix create a string matcher for prefix matching.
func StringMatcherPrefix(prefix string, ignoreCase bool) *matcher.StringMatcher {
return &matcher.StringMatcher{
IgnoreCase: ignoreCase,
MatchPattern: &matcher.StringMatcher_Prefix{
Prefix: prefix,
},
}
}
// StringMatcherSuffix create a string matcher for suffix matching.
func StringMatcherSuffix(suffix string, ignoreCase bool) *matcher.StringMatcher {
return &matcher.StringMatcher{
IgnoreCase: ignoreCase,
MatchPattern: &matcher.StringMatcher_Suffix{
Suffix: suffix,
},
}
}
// StringMatcherExact create a string matcher for exact matching.
func StringMatcherExact(exact string, ignoreCase bool) *matcher.StringMatcher {
return &matcher.StringMatcher{
IgnoreCase: ignoreCase,
MatchPattern: &matcher.StringMatcher_Exact{
Exact: exact,
},
}
}
// StringMatcherWithPrefix creates a string matcher for v with the extra prefix inserted to the
// created string matcher, note the prefix is ignored if v is wildcard ("*").
// The wildcard "*" will be generated as ".+" instead of ".*".
func StringMatcherWithPrefix(v, prefix string) *matcher.StringMatcher {
switch {
// Check if v is "*" first to make sure we won't generate an empty prefix/suffix StringMatcher,
// the Envoy StringMatcher doesn't allow empty prefix/suffix.
case v == "*":
return StringMatcherRegex(".+")
case strings.HasPrefix(v, "*"):
if prefix == "" {
return StringMatcherSuffix(strings.TrimPrefix(v, "*"), false)
}
return StringMatcherRegex(prefix + ".*" + strings.TrimPrefix(v, "*"))
case strings.HasSuffix(v, "*"):
return StringMatcherPrefix(prefix+strings.TrimSuffix(v, "*"), false)
default:
return StringMatcherExact(prefix+v, false)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strings"
rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3"
matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/security/authz/matcher"
"istio.io/istio/pilot/pkg/xds/filters"
"istio.io/istio/pkg/spiffe"
)
type generator interface {
permission(key, value string, forTCP bool) (*rbacpb.Permission, error)
principal(key, value string, forTCP bool, useAuthenticated bool) (*rbacpb.Principal, error)
}
type destIPGenerator struct{}
func (destIPGenerator) permission(_, value string, _ bool) (*rbacpb.Permission, error) {
cidrRange, err := util.AddrStrToCidrRange(value)
if err != nil {
return nil, err
}
return permissionDestinationIP(cidrRange), nil
}
func (destIPGenerator) principal(_, _ string, _ bool, _ bool) (*rbacpb.Principal, error) {
return nil, fmt.Errorf("unimplemented")
}
type destPortGenerator struct{}
func (destPortGenerator) permission(_, value string, _ bool) (*rbacpb.Permission, error) {
portValue, err := convertToPort(value)
if err != nil {
return nil, err
}
return permissionDestinationPort(portValue), nil
}
func (destPortGenerator) principal(_, _ string, _ bool, _ bool) (*rbacpb.Principal, error) {
return nil, fmt.Errorf("unimplemented")
}
type connSNIGenerator struct{}
func (connSNIGenerator) permission(_, value string, _ bool) (*rbacpb.Permission, error) {
m := matcher.StringMatcher(value)
return permissionRequestedServerName(m), nil
}
func (connSNIGenerator) principal(_, _ string, _ bool, _ bool) (*rbacpb.Principal, error) {
return nil, fmt.Errorf("unimplemented")
}
type envoyFilterGenerator struct {
useExtendedJwt bool
}
func (efg envoyFilterGenerator) permission(key, value string, _ bool) (*rbacpb.Permission, error) {
// Split key of format "experimental.envoy.filters.a.b[c]" to "envoy.filters.a.b" and "c".
parts := strings.SplitN(strings.TrimSuffix(strings.TrimPrefix(key, "experimental."), "]"), "[", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("invalid key: %v", key)
}
// If value is of format [v], create a list matcher.
// Else, if value is of format v, create a string matcher.
if strings.HasPrefix(value, "[") && strings.HasSuffix(value, "]") {
m := matcher.MetadataListMatcher(parts[0], parts[1:], matcher.StringMatcher(strings.Trim(value, "[]")), efg.useExtendedJwt)
return permissionMetadata(m), nil
}
m := matcher.MetadataStringMatcher(parts[0], parts[1], matcher.StringMatcher(value))
return permissionMetadata(m), nil
}
func (envoyFilterGenerator) principal(_, _ string, _ bool, _ bool) (*rbacpb.Principal, error) {
return nil, fmt.Errorf("unimplemented")
}
type srcIPGenerator struct{}
func (srcIPGenerator) permission(_, _ string, _ bool) (*rbacpb.Permission, error) {
return nil, fmt.Errorf("unimplemented")
}
func (srcIPGenerator) principal(_, value string, _ bool, _ bool) (*rbacpb.Principal, error) {
cidr, err := util.AddrStrToCidrRange(value)
if err != nil {
return nil, err
}
return principalDirectRemoteIP(cidr), nil
}
type remoteIPGenerator struct{}
func (remoteIPGenerator) permission(_, _ string, _ bool) (*rbacpb.Permission, error) {
return nil, fmt.Errorf("unimplemented")
}
func (remoteIPGenerator) principal(_, value string, _ bool, _ bool) (*rbacpb.Principal, error) {
cidr, err := util.AddrStrToCidrRange(value)
if err != nil {
return nil, err
}
return principalRemoteIP(cidr), nil
}
type srcNamespaceGenerator struct{}
func (srcNamespaceGenerator) permission(_, _ string, _ bool) (*rbacpb.Permission, error) {
return nil, fmt.Errorf("unimplemented")
}
func (srcNamespaceGenerator) principal(_, value string, _ bool, useAuthenticated bool) (*rbacpb.Principal, error) {
v := strings.Replace(value, "*", ".*", -1)
m := matcher.StringMatcherRegex(fmt.Sprintf(".*/ns/%s/.*", v))
return principalAuthenticated(m, useAuthenticated), nil
}
type srcPrincipalGenerator struct{}
func (srcPrincipalGenerator) permission(_, _ string, _ bool) (*rbacpb.Permission, error) {
return nil, fmt.Errorf("unimplemented")
}
func (srcPrincipalGenerator) principal(key, value string, _ bool, useAuthenticated bool) (*rbacpb.Principal, error) {
m := matcher.StringMatcherWithPrefix(value, spiffe.URIPrefix)
return principalAuthenticated(m, useAuthenticated), nil
}
type requestPrincipalGenerator struct {
useExtendedJwt bool
}
func (requestPrincipalGenerator) permission(_, _ string, _ bool) (*rbacpb.Permission, error) {
return nil, fmt.Errorf("unimplemented")
}
var matchAny = matcher.StringMatcherRegex(".+")
func (rpg requestPrincipalGenerator) principal(key, value string, forTCP bool, _ bool) (*rbacpb.Principal, error) {
if forTCP {
return nil, fmt.Errorf("%q is HTTP only", key)
}
if rpg.useExtendedJwt {
iss, sub, found := strings.Cut(value, "/")
var matchIss, matchSub *matcherpb.StringMatcher
switch {
case value == "*":
matchIss = matchAny
matchSub = matchAny
case strings.HasPrefix(value, "*"):
if found {
if iss == "*" {
matchIss = matchAny
} else {
matchIss = matcher.StringMatcherSuffix(strings.TrimPrefix(iss, "*"), false)
}
matchSub = matcher.StringMatcherExact(sub, false)
} else {
matchIss = matchAny
matchSub = matcher.StringMatcherSuffix(strings.TrimPrefix(value, "*"), false)
}
case strings.HasSuffix(value, "*"):
if found {
matchIss = matcher.StringMatcherExact(iss, false)
if sub == "*" {
matchSub = matchAny
} else {
matchSub = matcher.StringMatcherPrefix(strings.TrimSuffix(sub, "*"), false)
}
} else {
matchIss = matcher.StringMatcherPrefix(strings.TrimSuffix(value, "*"), false)
matchSub = matchAny
}
default:
matchSub = matcher.StringMatcherExact(sub, false)
matchIss = matcher.StringMatcherExact(iss, false)
}
im := MetadataStringMatcherForJWTClaim("iss", matchIss)
sm := MetadataStringMatcherForJWTClaim("sub", matchSub)
return principalAnd([]*rbacpb.Principal{principalMetadata(im), principalMetadata(sm)}), nil
}
m := matcher.MetadataStringMatcher(filters.AuthnFilterName, key, matcher.StringMatcher(value))
return principalMetadata(m), nil
}
type requestAudiencesGenerator struct {
useExtendedJwt bool
}
func (requestAudiencesGenerator) permission(key, value string, forTCP bool) (*rbacpb.Permission, error) {
return requestPrincipalGenerator{}.permission(key, value, forTCP)
}
func (rag requestAudiencesGenerator) principal(key, value string, forTCP bool, useAuthenticated bool) (*rbacpb.Principal, error) {
if forTCP {
return nil, fmt.Errorf("%q is HTTP only", key)
}
if rag.useExtendedJwt {
return principalMetadata(MetadataStringMatcherForJWTClaim("aud", matcher.StringMatcher(value))), nil
}
m := matcher.MetadataStringMatcher(filters.AuthnFilterName, key, matcher.StringMatcher(value))
return principalMetadata(m), nil
}
type requestPresenterGenerator struct {
useExtendedJwt bool
}
func (requestPresenterGenerator) permission(key, value string, forTCP bool) (*rbacpb.Permission, error) {
return requestPrincipalGenerator{}.permission(key, value, forTCP)
}
func (rpg requestPresenterGenerator) principal(key, value string, forTCP bool, useAuthenticated bool) (*rbacpb.Principal, error) {
if forTCP {
return nil, fmt.Errorf("%q is HTTP only", key)
}
if rpg.useExtendedJwt {
return principalMetadata(MetadataMatcherForJWTClaims([]string{"azp"}, matcher.StringMatcher(value), true)), nil
}
m := matcher.MetadataStringMatcher(filters.AuthnFilterName, key, matcher.StringMatcher(value))
return principalMetadata(m), nil
}
type requestHeaderGenerator struct{}
func (requestHeaderGenerator) permission(_, _ string, _ bool) (*rbacpb.Permission, error) {
return nil, fmt.Errorf("unimplemented")
}
func (requestHeaderGenerator) principal(key, value string, forTCP bool, _ bool) (*rbacpb.Principal, error) {
if forTCP {
return nil, fmt.Errorf("%q is HTTP only", key)
}
header, err := extractNameInBrackets(strings.TrimPrefix(key, attrRequestHeader))
if err != nil {
return nil, err
}
m := matcher.HeaderMatcher(header, value)
return principalHeader(m), nil
}
type requestClaimGenerator struct {
useExtendedJwt bool
}
func (requestClaimGenerator) permission(_, _ string, _ bool) (*rbacpb.Permission, error) {
return nil, fmt.Errorf("unimplemented")
}
func (rcg requestClaimGenerator) principal(key, value string, forTCP bool, _ bool) (*rbacpb.Principal, error) {
if forTCP {
return nil, fmt.Errorf("%q is HTTP only", key)
}
claims, err := extractNameInNestedBrackets(strings.TrimPrefix(key, attrRequestClaims))
if err != nil {
return nil, err
}
// Generate a metadata list matcher for the given path keys and value.
// On proxy side, the value should be of list type.
m := MetadataMatcherForJWTClaims(claims, matcher.StringMatcher(value), rcg.useExtendedJwt)
return principalMetadata(m), nil
}
type hostGenerator struct{}
func (hg hostGenerator) permission(key, value string, forTCP bool) (*rbacpb.Permission, error) {
if forTCP {
return nil, fmt.Errorf("%q is HTTP only", key)
}
return permissionHeader(matcher.HostMatcher(hostHeader, value)), nil
}
func (hostGenerator) principal(key, value string, forTCP bool, _ bool) (*rbacpb.Principal, error) {
return nil, fmt.Errorf("unimplemented")
}
type pathGenerator struct{}
func (g pathGenerator) permission(key, value string, forTCP bool) (*rbacpb.Permission, error) {
if forTCP {
return nil, fmt.Errorf("%q is HTTP only", key)
}
m := matcher.PathMatcher(value)
return permissionPath(m), nil
}
func (pathGenerator) principal(key, value string, forTCP bool, _ bool) (*rbacpb.Principal, error) {
return nil, fmt.Errorf("unimplemented")
}
type methodGenerator struct{}
func (methodGenerator) permission(key, value string, forTCP bool) (*rbacpb.Permission, error) {
if forTCP {
return nil, fmt.Errorf("%q is HTTP only", key)
}
m := matcher.HeaderMatcher(methodHeader, value)
return permissionHeader(m), nil
}
func (methodGenerator) principal(key, value string, forTCP bool, _ bool) (*rbacpb.Principal, error) {
return nil, fmt.Errorf("unimplemented")
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strings"
rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3"
authzpb "istio.io/api/security/v1beta1"
"istio.io/istio/pilot/pkg/security/trustdomain"
)
const (
RBACTCPFilterStatPrefix = "tcp."
RBACShadowEngineResult = "shadow_engine_result"
RBACShadowEffectivePolicyID = "shadow_effective_policy_id"
RBACShadowRulesAllowStatPrefix = "istio_dry_run_allow_"
RBACShadowRulesDenyStatPrefix = "istio_dry_run_deny_"
RBACExtAuthzShadowRulesStatPrefix = "istio_ext_authz_"
attrRequestHeader = "request.headers" // header name is surrounded by brackets, e.g. "request.headers[User-Agent]".
attrSrcIP = "source.ip" // supports both single ip and cidr, e.g. "10.1.2.3" or "10.1.0.0/16".
attrRemoteIP = "remote.ip" // original client ip determined from x-forwarded-for or proxy protocol.
attrSrcNamespace = "source.namespace" // e.g. "default".
attrSrcPrincipal = "source.principal" // source identity, e,g, "cluster.local/ns/default/sa/productpage".
attrRequestPrincipal = "request.auth.principal" // authenticated principal of the request.
attrRequestAudiences = "request.auth.audiences" // intended audience(s) for this authentication information.
attrRequestPresenter = "request.auth.presenter" // authorized presenter of the credential.
attrRequestClaims = "request.auth.claims" // claim name is surrounded by brackets, e.g. "request.auth.claims[iss]".
attrDestIP = "destination.ip" // supports both single ip and cidr, e.g. "10.1.2.3" or "10.1.0.0/16".
attrDestPort = "destination.port" // must be in the range [0, 65535].
attrConnSNI = "connection.sni" // server name indication, e.g. "www.example.com".
attrEnvoyFilter = "experimental.envoy.filters." // an experimental attribute for checking Envoy Metadata directly.
// Internal names used to generate corresponding Envoy matcher.
methodHeader = ":method"
pathMatcher = "path-matcher"
hostHeader = ":authority"
)
type rule struct {
key string
values []string
notValues []string
g generator
}
type ruleList struct {
rules []*rule
}
// Model represents a single rule from an authorization policy. The conditions of the rule are consolidated into
// permission or principal to align with the Envoy RBAC filter API.
type Model struct {
permissions []ruleList
principals []ruleList
}
// New returns a model representing a single authorization policy.
func New(r *authzpb.Rule, useExtendedJwt bool) (*Model, error) {
m := Model{}
basePermission := ruleList{}
basePrincipal := ruleList{}
// Each condition in the when needs to be consolidated into either permission or principal.
for _, when := range r.When {
k := when.Key
switch {
case k == attrDestIP:
basePermission.appendLast(destIPGenerator{}, k, when.Values, when.NotValues)
case k == attrDestPort:
basePermission.appendLast(destPortGenerator{}, k, when.Values, when.NotValues)
case k == attrConnSNI:
basePermission.appendLast(connSNIGenerator{}, k, when.Values, when.NotValues)
case strings.HasPrefix(k, attrEnvoyFilter):
basePermission.appendLast(envoyFilterGenerator{useExtendedJwt: useExtendedJwt}, k, when.Values, when.NotValues)
case k == attrSrcIP:
basePrincipal.appendLast(srcIPGenerator{}, k, when.Values, when.NotValues)
case k == attrRemoteIP:
basePrincipal.appendLast(remoteIPGenerator{}, k, when.Values, when.NotValues)
case k == attrSrcNamespace:
basePrincipal.appendLast(srcNamespaceGenerator{}, k, when.Values, when.NotValues)
case k == attrSrcPrincipal:
basePrincipal.appendLast(srcPrincipalGenerator{}, k, when.Values, when.NotValues)
case k == attrRequestPrincipal:
basePrincipal.appendLast(requestPrincipalGenerator{useExtendedJwt: useExtendedJwt}, k, when.Values, when.NotValues)
case k == attrRequestAudiences:
basePrincipal.appendLast(requestAudiencesGenerator{useExtendedJwt: useExtendedJwt}, k, when.Values, when.NotValues)
case k == attrRequestPresenter:
basePrincipal.appendLast(requestPresenterGenerator{useExtendedJwt: useExtendedJwt}, k, when.Values, when.NotValues)
case strings.HasPrefix(k, attrRequestHeader):
basePrincipal.appendLast(requestHeaderGenerator{}, k, when.Values, when.NotValues)
case strings.HasPrefix(k, attrRequestClaims):
basePrincipal.appendLast(requestClaimGenerator{useExtendedJwt: useExtendedJwt}, k, when.Values, when.NotValues)
default:
return nil, fmt.Errorf("unknown attribute %s", when.Key)
}
}
for _, from := range r.From {
merged := basePrincipal.copy()
if s := from.Source; s != nil {
merged.insertFront(srcIPGenerator{}, attrSrcIP, s.IpBlocks, s.NotIpBlocks)
merged.insertFront(remoteIPGenerator{}, attrRemoteIP, s.RemoteIpBlocks, s.NotRemoteIpBlocks)
merged.insertFront(srcNamespaceGenerator{}, attrSrcNamespace, s.Namespaces, s.NotNamespaces)
merged.insertFront(requestPrincipalGenerator{useExtendedJwt: useExtendedJwt}, attrRequestPrincipal, s.RequestPrincipals, s.NotRequestPrincipals)
merged.insertFront(srcPrincipalGenerator{}, attrSrcPrincipal, s.Principals, s.NotPrincipals)
}
m.principals = append(m.principals, merged)
}
if len(r.From) == 0 {
m.principals = append(m.principals, basePrincipal)
}
for _, to := range r.To {
merged := basePermission.copy()
if o := to.Operation; o != nil {
merged.insertFront(destPortGenerator{}, attrDestPort, o.Ports, o.NotPorts)
merged.insertFront(pathGenerator{}, pathMatcher, o.Paths, o.NotPaths)
merged.insertFront(methodGenerator{}, methodHeader, o.Methods, o.NotMethods)
merged.insertFront(hostGenerator{}, hostHeader, o.Hosts, o.NotHosts)
}
m.permissions = append(m.permissions, merged)
}
if len(r.To) == 0 {
m.permissions = append(m.permissions, basePermission)
}
return &m, nil
}
// MigrateTrustDomain replaces the trust domain in source principal based on the trust domain aliases information.
func (m *Model) MigrateTrustDomain(tdBundle trustdomain.Bundle) {
for _, p := range m.principals {
for _, r := range p.rules {
if r.key == attrSrcPrincipal {
if len(r.values) != 0 {
r.values = tdBundle.ReplaceTrustDomainAliases(r.values)
}
if len(r.notValues) != 0 {
r.notValues = tdBundle.ReplaceTrustDomainAliases(r.notValues)
}
}
}
}
}
// Generate generates the Envoy RBAC config from the model.
func (m Model) Generate(forTCP bool, useAuthenticated bool, action rbacpb.RBAC_Action) (*rbacpb.Policy, error) {
var permissions []*rbacpb.Permission
for _, rl := range m.permissions {
permission, err := generatePermission(rl, forTCP, action)
if err != nil {
return nil, err
}
permissions = append(permissions, permission)
}
if len(permissions) == 0 {
return nil, fmt.Errorf("must have at least 1 permission")
}
var principals []*rbacpb.Principal
for _, rl := range m.principals {
principal, err := generatePrincipal(rl, forTCP, useAuthenticated, action)
if err != nil {
return nil, err
}
principals = append(principals, principal)
}
if len(principals) == 0 {
return nil, fmt.Errorf("must have at least 1 principal")
}
return &rbacpb.Policy{
Permissions: permissions,
Principals: principals,
}, nil
}
func generatePermission(rl ruleList, forTCP bool, action rbacpb.RBAC_Action) (*rbacpb.Permission, error) {
var and []*rbacpb.Permission
for _, r := range rl.rules {
ret, err := r.permission(forTCP, action)
if err != nil {
return nil, err
}
and = append(and, ret...)
}
if len(and) == 0 {
and = append(and, permissionAny())
}
return permissionAnd(and), nil
}
func generatePrincipal(rl ruleList, forTCP bool, useAuthenticated bool, action rbacpb.RBAC_Action) (*rbacpb.Principal, error) {
var and []*rbacpb.Principal
for _, r := range rl.rules {
ret, err := r.principal(forTCP, useAuthenticated, action)
if err != nil {
return nil, err
}
and = append(and, ret...)
}
if len(and) == 0 {
and = append(and, principalAny())
}
return principalAnd(and), nil
}
func (r rule) permission(forTCP bool, action rbacpb.RBAC_Action) ([]*rbacpb.Permission, error) {
var permissions []*rbacpb.Permission
var or []*rbacpb.Permission
for _, value := range r.values {
p, err := r.g.permission(r.key, value, forTCP)
if err := r.checkError(action, err); err != nil {
return nil, err
}
if p != nil {
or = append(or, p)
}
}
if len(or) > 0 {
permissions = append(permissions, permissionOr(or))
}
or = nil
for _, notValue := range r.notValues {
p, err := r.g.permission(r.key, notValue, forTCP)
if err := r.checkError(action, err); err != nil {
return nil, err
}
if p != nil {
or = append(or, p)
}
}
if len(or) > 0 {
permissions = append(permissions, permissionNot(permissionOr(or)))
}
return permissions, nil
}
func (r rule) principal(forTCP bool, useAuthenticated bool, action rbacpb.RBAC_Action) ([]*rbacpb.Principal, error) {
var principals []*rbacpb.Principal
var or []*rbacpb.Principal
for _, value := range r.values {
p, err := r.g.principal(r.key, value, forTCP, useAuthenticated)
if err := r.checkError(action, err); err != nil {
return nil, err
}
if p != nil {
or = append(or, p)
}
}
if len(or) > 0 {
principals = append(principals, principalOr(or))
}
or = nil
for _, notValue := range r.notValues {
p, err := r.g.principal(r.key, notValue, forTCP, useAuthenticated)
if err := r.checkError(action, err); err != nil {
return nil, err
}
if p != nil {
or = append(or, p)
}
}
if len(or) > 0 {
principals = append(principals, principalNot(principalOr(or)))
}
return principals, nil
}
func (r rule) checkError(action rbacpb.RBAC_Action, err error) error {
if action == rbacpb.RBAC_ALLOW {
// Return the error as-is for allow policy. This will make all rules in the current permission ignored, effectively
// result in a smaller allow policy (i.e. less likely to allow a request).
return err
}
// Ignore the error for a deny or audit policy. This will make the current rule ignored and continue the generation of
// the next rule, effectively resulting in a wider deny or audit policy (i.e. more likely to deny or audit a request).
return nil
}
func (p *ruleList) copy() ruleList {
r := ruleList{}
r.rules = append([]*rule{}, p.rules...)
return r
}
func (p *ruleList) insertFront(g generator, key string, values, notValues []string) {
if len(values) == 0 && len(notValues) == 0 {
return
}
r := &rule{
key: key,
values: values,
notValues: notValues,
g: g,
}
p.rules = append([]*rule{r}, p.rules...)
}
func (p *ruleList) appendLast(g generator, key string, values, notValues []string) {
if len(values) == 0 && len(notValues) == 0 {
return
}
r := &rule{
key: key,
values: values,
notValues: notValues,
g: g,
}
p.rules = append(p.rules, r)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3"
routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
)
func permissionAny() *rbacpb.Permission {
return &rbacpb.Permission{
Rule: &rbacpb.Permission_Any{
Any: true,
},
}
}
func permissionAnd(permission []*rbacpb.Permission) *rbacpb.Permission {
return &rbacpb.Permission{
Rule: &rbacpb.Permission_AndRules{
AndRules: &rbacpb.Permission_Set{
Rules: permission,
},
},
}
}
func permissionOr(permission []*rbacpb.Permission) *rbacpb.Permission {
return &rbacpb.Permission{
Rule: &rbacpb.Permission_OrRules{
OrRules: &rbacpb.Permission_Set{
Rules: permission,
},
},
}
}
func permissionNot(permission *rbacpb.Permission) *rbacpb.Permission {
return &rbacpb.Permission{
Rule: &rbacpb.Permission_NotRule{
NotRule: permission,
},
}
}
func permissionDestinationIP(cidr *core.CidrRange) *rbacpb.Permission {
return &rbacpb.Permission{
Rule: &rbacpb.Permission_DestinationIp{
DestinationIp: cidr,
},
}
}
func permissionDestinationPort(port uint32) *rbacpb.Permission {
return &rbacpb.Permission{
Rule: &rbacpb.Permission_DestinationPort{
DestinationPort: port,
},
}
}
func permissionRequestedServerName(name *matcher.StringMatcher) *rbacpb.Permission {
return &rbacpb.Permission{
Rule: &rbacpb.Permission_RequestedServerName{
RequestedServerName: name,
},
}
}
func permissionMetadata(metadata *matcher.MetadataMatcher) *rbacpb.Permission {
return &rbacpb.Permission{
Rule: &rbacpb.Permission_Metadata{
Metadata: metadata,
},
}
}
func permissionHeader(header *routepb.HeaderMatcher) *rbacpb.Permission {
return &rbacpb.Permission{
Rule: &rbacpb.Permission_Header{
Header: header,
},
}
}
func permissionPath(path *matcher.PathMatcher) *rbacpb.Permission {
return &rbacpb.Permission{
Rule: &rbacpb.Permission_UrlPath{
UrlPath: path,
},
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
rbacpb "github.com/envoyproxy/go-control-plane/envoy/config/rbac/v3"
routepb "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
matcher "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
)
func principalAny() *rbacpb.Principal {
return &rbacpb.Principal{
Identifier: &rbacpb.Principal_Any{
Any: true,
},
}
}
func principalOr(principals []*rbacpb.Principal) *rbacpb.Principal {
return &rbacpb.Principal{
Identifier: &rbacpb.Principal_OrIds{
OrIds: &rbacpb.Principal_Set{
Ids: principals,
},
},
}
}
func principalAnd(principals []*rbacpb.Principal) *rbacpb.Principal {
return &rbacpb.Principal{
Identifier: &rbacpb.Principal_AndIds{
AndIds: &rbacpb.Principal_Set{
Ids: principals,
},
},
}
}
func principalNot(principal *rbacpb.Principal) *rbacpb.Principal {
return &rbacpb.Principal{
Identifier: &rbacpb.Principal_NotId{
NotId: principal,
},
}
}
func principalAuthenticated(name *matcher.StringMatcher, useAuthenticated bool) *rbacpb.Principal {
if useAuthenticated {
return &rbacpb.Principal{
Identifier: &rbacpb.Principal_Authenticated_{
Authenticated: &rbacpb.Principal_Authenticated{
PrincipalName: name,
},
},
}
}
return &rbacpb.Principal{
Identifier: &rbacpb.Principal_FilterState{
FilterState: &matcher.FilterStateMatcher{
Key: "io.istio.peer_principal",
Matcher: &matcher.FilterStateMatcher_StringMatch{
StringMatch: name,
},
},
},
}
}
func principalDirectRemoteIP(cidr *core.CidrRange) *rbacpb.Principal {
return &rbacpb.Principal{
Identifier: &rbacpb.Principal_DirectRemoteIp{
DirectRemoteIp: cidr,
},
}
}
func principalRemoteIP(cidr *core.CidrRange) *rbacpb.Principal {
return &rbacpb.Principal{
Identifier: &rbacpb.Principal_RemoteIp{
RemoteIp: cidr,
},
}
}
func principalMetadata(metadata *matcher.MetadataMatcher) *rbacpb.Principal {
return &rbacpb.Principal{
Identifier: &rbacpb.Principal_Metadata{
Metadata: metadata,
},
}
}
func principalHeader(header *routepb.HeaderMatcher) *rbacpb.Principal {
return &rbacpb.Principal{
Identifier: &rbacpb.Principal_Header{
Header: header,
},
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"fmt"
"strconv"
"strings"
matcherpb "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
"istio.io/istio/pilot/pkg/security/authz/matcher"
"istio.io/istio/pilot/pkg/xds/filters"
)
// convertToPort converts a port string to a uint32.
func convertToPort(v string) (uint32, error) {
p, err := strconv.ParseUint(v, 10, 32)
if err != nil || p > 65535 {
return 0, fmt.Errorf("invalid port %s: %v", v, err)
}
return uint32(p), nil
}
func extractNameInBrackets(s string) (string, error) {
if !strings.HasPrefix(s, "[") || !strings.HasSuffix(s, "]") {
return "", fmt.Errorf("expecting format [<NAME>], but found %s", s)
}
return strings.TrimPrefix(strings.TrimSuffix(s, "]"), "["), nil
}
func extractNameInNestedBrackets(s string) ([]string, error) {
var claims []string
findEndBracket := func(begin int) int {
if begin >= len(s) || s[begin] != '[' {
return -1
}
for i := begin + 1; i < len(s); i++ {
if s[i] == '[' {
return -1
}
if s[i] == ']' {
return i
}
}
return -1
}
for begin := 0; begin < len(s); {
end := findEndBracket(begin)
if end == -1 {
ret, err := extractNameInBrackets(s)
if err != nil {
return nil, err
}
return []string{ret}, nil
}
claims = append(claims, s[begin+1:end])
begin = end + 1
}
return claims, nil
}
func MetadataStringMatcherForJWTClaim(claim string, m *matcherpb.StringMatcher) *matcherpb.MetadataMatcher {
return &matcherpb.MetadataMatcher{
Filter: filters.EnvoyJwtFilterName,
Path: []*matcherpb.MetadataMatcher_PathSegment{
{
Segment: &matcherpb.MetadataMatcher_PathSegment_Key{
Key: filters.EnvoyJwtFilterPayload,
},
},
{
Segment: &matcherpb.MetadataMatcher_PathSegment_Key{
Key: claim,
},
},
},
Value: &matcherpb.ValueMatcher{
MatchPattern: &matcherpb.ValueMatcher_StringMatch{
StringMatch: m,
},
},
}
}
// MetadataMatcherForJWTClaims is a convenient method for generating metadata matcher for JWT claims.
func MetadataMatcherForJWTClaims(claims []string, value *matcherpb.StringMatcher, useExtendedJwt bool) *matcherpb.MetadataMatcher {
if useExtendedJwt {
return matcher.MetadataListMatcher(filters.EnvoyJwtFilterName, append([]string{filters.EnvoyJwtFilterPayload}, claims...), value, true)
}
return matcher.MetadataListMatcher(filters.AuthnFilterName, append([]string{attrRequestClaims}, claims...), value, false)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"strings"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
"google.golang.org/protobuf/types/known/durationpb"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/model/credentials"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/spiffe"
)
const (
// SDSClusterName is the name of the cluster for SDS connections
SDSClusterName = "sds-grpc"
// SDSDefaultResourceName is the default name in sdsconfig, used for fetching normal key/cert.
SDSDefaultResourceName = "default"
// SDSRootResourceName is the sdsconfig name for root CA, used for fetching root cert.
SDSRootResourceName = "ROOTCA"
// K8sSAJwtFileName is the token volume mount file name for k8s jwt token.
K8sSAJwtFileName = "/var/run/secrets/kubernetes.io/serviceaccount/token"
// K8sSATrustworthyJwtFileName is the token volume mount file name for k8s trustworthy jwt token.
K8sSATrustworthyJwtFileName = "/var/run/secrets/tokens/istio-token"
// K8sSAJwtTokenHeaderKey is the request header key for k8s jwt token.
// Binary header name must has suffix "-bin", according to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md.
K8sSAJwtTokenHeaderKey = "istio_sds_credentials_header-bin"
// SdsCaSuffix is the suffix of the sds resource name for root CA.
SdsCaSuffix = credentials.SdsCaSuffix
// EnvoyJwtFilterName is the name of the Envoy JWT filter. This should be the same as the name defined
// in https://github.com/envoyproxy/envoy/blob/v1.9.1/source/extensions/filters/http/well_known_names.h#L48
EnvoyJwtFilterName = "envoy.filters.http.jwt_authn"
)
var SDSAdsConfig = &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_Ads{
Ads: &core.AggregatedConfigSource{},
},
// We intentionally do *not* set InitialFetchTimeout to 0s here, as this is used for
// credentialName SDS which may refer to secrets which do not exist. We do not want to block the
// entire listener/cluster in these cases.
ResourceApiVersion: core.ApiVersion_V3,
}
// ConstructSdsSecretConfigForCredential constructs SDS secret configuration used
// from certificates referenced by credentialName in DestinationRule or Gateway.
// Currently this is served by a local SDS server, but in the future replaced by
// Istiod SDS server.
func ConstructSdsSecretConfigForCredential(name string, credentialSocketExist bool) *tls.SdsSecretConfig {
if name == "" {
return nil
}
if name == credentials.BuiltinGatewaySecretTypeURI {
return ConstructSdsSecretConfig(SDSDefaultResourceName)
}
if name == credentials.BuiltinGatewaySecretTypeURI+SdsCaSuffix {
return ConstructSdsSecretConfig(SDSRootResourceName)
}
// if credentialSocketExist exists and credentialName is using SDSExternalCredentialPrefix
// SDS will be served via SDSExternalClusterName
if credentialSocketExist && strings.HasPrefix(name, security.SDSExternalCredentialPrefix) {
return ConstructSdsSecretConfigForCredentialSocket(name)
}
return &tls.SdsSecretConfig{
Name: credentials.ToResourceName(name),
SdsConfig: SDSAdsConfig,
}
}
// ConstructSdsSecretConfigForCredentialSocket constructs SDS Secret Configuration based on CredentialNameSocketPath
// if CredentialNameSocketPath exists, use a static cluster 'sds-external'
func ConstructSdsSecretConfigForCredentialSocket(name string) *tls.SdsSecretConfig {
return &tls.SdsSecretConfig{
Name: name,
SdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{
ApiConfigSource: &core.ApiConfigSource{
ApiType: core.ApiConfigSource_GRPC,
SetNodeOnFirstMessageOnly: true,
TransportApiVersion: core.ApiVersion_V3,
GrpcServices: []*core.GrpcService{
{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: security.SDSExternalClusterName},
},
},
},
},
},
ResourceApiVersion: core.ApiVersion_V3,
},
}
}
// Preconfigured SDS configs to avoid excessive memory allocations
var (
defaultSDSConfig = &tls.SdsSecretConfig{
Name: SDSDefaultResourceName,
SdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{
ApiConfigSource: &core.ApiConfigSource{
ApiType: core.ApiConfigSource_GRPC,
SetNodeOnFirstMessageOnly: true,
TransportApiVersion: core.ApiVersion_V3,
GrpcServices: []*core.GrpcService{
{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName},
},
},
},
},
},
ResourceApiVersion: core.ApiVersion_V3,
InitialFetchTimeout: durationpb.New(time.Second * 0),
},
}
rootSDSConfig = &tls.SdsSecretConfig{
Name: SDSRootResourceName,
SdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{
ApiConfigSource: &core.ApiConfigSource{
ApiType: core.ApiConfigSource_GRPC,
SetNodeOnFirstMessageOnly: true,
TransportApiVersion: core.ApiVersion_V3,
GrpcServices: []*core.GrpcService{
{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName},
},
},
},
},
},
ResourceApiVersion: core.ApiVersion_V3,
InitialFetchTimeout: durationpb.New(time.Second * 0),
},
}
)
// ConstructSdsSecretConfig constructs SDS Secret Configuration for workload proxy.
func ConstructSdsSecretConfig(name string) *tls.SdsSecretConfig {
if name == "" {
return nil
}
if name == SDSDefaultResourceName {
return defaultSDSConfig
}
if name == SDSRootResourceName {
return rootSDSConfig
}
cfg := &tls.SdsSecretConfig{
Name: name,
SdsConfig: &core.ConfigSource{
ConfigSourceSpecifier: &core.ConfigSource_ApiConfigSource{
ApiConfigSource: &core.ApiConfigSource{
SetNodeOnFirstMessageOnly: true,
ApiType: core.ApiConfigSource_GRPC,
TransportApiVersion: core.ApiVersion_V3,
GrpcServices: []*core.GrpcService{
{
TargetSpecifier: &core.GrpcService_EnvoyGrpc_{
EnvoyGrpc: &core.GrpcService_EnvoyGrpc{ClusterName: SDSClusterName},
},
},
},
},
},
ResourceApiVersion: core.ApiVersion_V3,
},
}
return cfg
}
func AppendURIPrefixToTrustDomain(trustDomainAliases []string) []string {
res := make([]string, 0, len(trustDomainAliases))
for _, td := range trustDomainAliases {
res = append(res, spiffe.URIPrefix+td+"/")
}
return res
}
// ApplyToCommonTLSContext completes the commonTlsContext
func ApplyToCommonTLSContext(tlsContext *tls.CommonTlsContext, proxy *model.Proxy,
subjectAltNames []string, trustDomainAliases []string, validateClient bool,
) {
// These are certs being mounted from within the pod. Rather than reading directly in Envoy,
// which does not support rotation, we will serve them over SDS by reading the files.
// We should check if these certs have values, if yes we should use them or otherwise fall back to defaults.
res := security.SdsCertificateConfig{
CertificatePath: proxy.Metadata.TLSServerCertChain,
PrivateKeyPath: proxy.Metadata.TLSServerKey,
CaCertificatePath: proxy.Metadata.TLSServerRootCert,
}
// TODO: if subjectAltName ends with *, create a prefix match as well.
// TODO: if user explicitly specifies SANs - should we alter his explicit config by adding all spifee aliases?
matchSAN := util.StringToExactMatch(subjectAltNames)
if len(trustDomainAliases) > 0 {
matchSAN = append(matchSAN, util.StringToPrefixMatch(AppendURIPrefixToTrustDomain(trustDomainAliases))...)
}
// configure server listeners with SDS.
if validateClient {
tlsContext.ValidationContextType = &tls.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &tls.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: &tls.CertificateValidationContext{MatchSubjectAltNames: matchSAN},
ValidationContextSdsSecretConfig: ConstructSdsSecretConfig(model.GetOrDefault(res.GetRootResourceName(), SDSRootResourceName)),
},
}
}
tlsContext.TlsCertificateSdsSecretConfigs = []*tls.SdsSecretConfig{
ConstructSdsSecretConfig(model.GetOrDefault(res.GetResourceName(), SDSDefaultResourceName)),
}
}
// ApplyCustomSDSToClientCommonTLSContext applies the customized sds to CommonTlsContext
// Used for building upstream TLS context for egress gateway's TLS/mTLS origination
func ApplyCustomSDSToClientCommonTLSContext(tlsContext *tls.CommonTlsContext,
tlsOpts *networking.ClientTLSSettings, credentialSocketExist bool,
) {
if tlsOpts.Mode == networking.ClientTLSSettings_MUTUAL {
// create SDS config for gateway to fetch key/cert from agent.
tlsContext.TlsCertificateSdsSecretConfigs = []*tls.SdsSecretConfig{
ConstructSdsSecretConfigForCredential(tlsOpts.CredentialName, credentialSocketExist),
}
}
// If the InsecureSkipVerify is true, there is no need to configure CA Cert and SAN.
if tlsOpts.GetInsecureSkipVerify().GetValue() {
return
}
// create SDS config for gateway to fetch certificate validation context
// at gateway agent.
defaultValidationContext := &tls.CertificateValidationContext{
MatchSubjectAltNames: util.StringToExactMatch(tlsOpts.SubjectAltNames),
}
tlsContext.ValidationContextType = &tls.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &tls.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: defaultValidationContext,
ValidationContextSdsSecretConfig: ConstructSdsSecretConfigForCredential(
tlsOpts.CredentialName+SdsCaSuffix, credentialSocketExist),
},
}
}
// ApplyCredentialSDSToServerCommonTLSContext applies the credentialName sds (Gateway/DestinationRule) to CommonTlsContext
// Used for building both gateway/sidecar TLS context
func ApplyCredentialSDSToServerCommonTLSContext(tlsContext *tls.CommonTlsContext,
tlsOpts *networking.ServerTLSSettings, credentialSocketExist bool,
) {
// create SDS config for gateway/sidecar to fetch key/cert from agent.
tlsContext.TlsCertificateSdsSecretConfigs = []*tls.SdsSecretConfig{
ConstructSdsSecretConfigForCredential(tlsOpts.CredentialName, credentialSocketExist),
}
// If tls mode is MUTUAL/OPTIONAL_MUTUAL, create SDS config for gateway/sidecar to fetch certificate validation context
// at gateway agent. Otherwise, use the static certificate validation context config.
if tlsOpts.Mode == networking.ServerTLSSettings_MUTUAL || tlsOpts.Mode == networking.ServerTLSSettings_OPTIONAL_MUTUAL {
defaultValidationContext := &tls.CertificateValidationContext{
MatchSubjectAltNames: util.StringToExactMatch(tlsOpts.SubjectAltNames),
VerifyCertificateSpki: tlsOpts.VerifyCertificateSpki,
VerifyCertificateHash: tlsOpts.VerifyCertificateHash,
}
tlsContext.ValidationContextType = &tls.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &tls.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: defaultValidationContext,
ValidationContextSdsSecretConfig: ConstructSdsSecretConfigForCredential(
tlsOpts.CredentialName+SdsCaSuffix, credentialSocketExist),
},
}
} else if len(tlsOpts.SubjectAltNames) > 0 {
tlsContext.ValidationContextType = &tls.CommonTlsContext_ValidationContext{
ValidationContext: &tls.CertificateValidationContext{
MatchSubjectAltNames: util.StringToExactMatch(tlsOpts.SubjectAltNames),
},
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trustdomain
import (
"fmt"
"strings"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config/constants"
istiolog "istio.io/istio/pkg/log"
)
var authzLog = istiolog.RegisterScope("authorization", "Istio Authorization Policy")
type Bundle struct {
// Contain the local trust domain and its aliases.
// The trust domain corresponds to the trust root of a system.
// Refer to [SPIFFE-ID](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain)
// The trust domain aliases represent the aliases of `trust_domain`.
// For example, if we have
// trustDomain: td1, trustDomainAliases: ["td2", "td3"]
// Any service with the identity `td1/ns/foo/sa/a-service-account`, `td2/ns/foo/sa/a-service-account`,
// or `td3/ns/foo/sa/a-service-account` will be treated the same in the Istio mesh.
TrustDomains []string
}
// NewBundle returns a new trust domain bundle.
func NewBundle(trustDomain string, trustDomainAliases []string) Bundle {
return Bundle{
// Put the new trust domain to the beginning of the list to avoid changing existing tests.
TrustDomains: append([]string{trustDomain}, trustDomainAliases...),
}
}
// ReplaceTrustDomainAliases checks the existing principals and returns a list of new principals
// with the current trust domain and its aliases.
// For example, for a user "bar" in namespace "foo".
// If the local trust domain is "td2" and its alias is "td1" (migrating from td1 to td2),
// replaceTrustDomainAliases returns ["td2/ns/foo/sa/bar", "td1/ns/foo/sa/bar]].
func (t Bundle) ReplaceTrustDomainAliases(principals []string) []string {
principalsIncludingAliases := []string{}
for _, principal := range principals {
isTrustDomainBeingEnforced := isTrustDomainBeingEnforced(principal)
// Return the existing principals if the policy doesn't care about the trust domain.
if !isTrustDomainBeingEnforced {
principalsIncludingAliases = append(principalsIncludingAliases, principal)
continue
}
trustDomainFromPrincipal, err := getTrustDomainFromSpiffeIdentity(principal)
if err != nil {
authzLog.Errorf("unexpected incorrect Spiffe format: %s", principal)
principalsIncludingAliases = append(principalsIncludingAliases, principal)
continue
}
// Only generate configuration if the extracted trust domain from the policy is part of the trust domain list,
// or if the extracted/existing trust domain is "cluster.local", which is a pointer to the local trust domain
// and its aliases.
if stringMatch(trustDomainFromPrincipal, t.TrustDomains) || trustDomainFromPrincipal == constants.DefaultClusterLocalDomain {
// Generate configuration for trust domain and trust domain aliases.
principalsIncludingAliases = append(principalsIncludingAliases, t.replaceTrustDomains(principal, trustDomainFromPrincipal)...)
} else {
msg := fmt.Sprintf("Trust domain %s from principal %s does not match the current trust "+
"domain or its aliases", trustDomainFromPrincipal, principal)
// when SkipValidateTrustDomain is being used the message isn't very meaningful so we'll log it at a lower level
// logging it at this level may help users who are looking to disable skipping validation understand if it's safe
if !features.SkipValidateTrustDomain {
authzLog.Warn(msg)
} else {
authzLog.Debug(msg)
}
// If the trust domain from the existing doesn't match with the new trust domain aliases or "cluster.local",
// keep the policy as it is.
principalsIncludingAliases = append(principalsIncludingAliases, principal)
}
}
return principalsIncludingAliases
}
// replaceTrustDomains replace the given principal's trust domain with the trust domains from the
// trustDomains list and return the new principals.
func (t Bundle) replaceTrustDomains(principal, trustDomainFromPrincipal string) []string {
principalsForAliases := []string{}
for _, td := range t.TrustDomains {
// If the trust domain has a prefix * (e.g. *local from *local/ns/foo/sa/bar), keep the principal
// as-is for the matched trust domain. For others, replace the trust domain with the new trust domain
// or alias.
var newPrincipal string
var err error
if suffixMatch(td, trustDomainFromPrincipal) {
newPrincipal = principal
} else {
newPrincipal, err = replaceTrustDomainInPrincipal(td, principal)
if err != nil {
authzLog.Errorf("Failed to replace trust domain with %s from principal %s: %v", td, principal, err)
continue
}
}
// Check to make sure we don't generate duplicated principals. This happens when trust domain
// has a * prefix. For example, "*-td" can match with "old-td" and "new-td", but we only want
// to keep the principal as-is in the generated config, .i.e. *-td.
if !isKeyInList(newPrincipal, principalsForAliases) {
principalsForAliases = append(principalsForAliases, newPrincipal)
}
}
return principalsForAliases
}
// replaceTrustDomainInPrincipal returns a new SPIFFE identity with the new trust domain.
// The trust domain corresponds to the trust root of a system.
// Refer to
// [SPIFFE-ID](https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain)
// In Istio authorization, an identity is presented in the format:
// <trust-domain>/ns/<some-namespace>/sa/<some-service-account>
func replaceTrustDomainInPrincipal(trustDomain string, principal string) (string, error) {
identityParts := strings.Split(principal, "/")
// A valid SPIFFE identity in authorization has no SPIFFE:// prefix.
// It is presented as <trust-domain>/ns/<some-namespace>/sa/<some-service-account>
if len(identityParts) != 5 {
return "", fmt.Errorf("wrong SPIFFE format: %s", principal)
}
return fmt.Sprintf("%s/%s", trustDomain, strings.Join(identityParts[1:], "/")), nil
}
// isTrustDomainBeingEnforced checks whether the trust domain is being checked in the filter or not.
// For example, in the principal "*/ns/foo/sa/bar", the trust domain is * and it matches to any trust domain,
// so it won't be checked in the filter.
func isTrustDomainBeingEnforced(principal string) bool {
identityParts := strings.Split(principal, "/")
if len(identityParts) != 5 {
// If a principal is mis-configured and doesn't follow Spiffe format, e.g. "sa/bar",
// there is really no trust domain from the principal, so the trust domain is also considered not being enforced.
return false
}
// Check if the first part of the spiffe string is "*" (as opposed to *-something or "").
return identityParts[0] != "*"
}
// getTrustDomainFromSpiffeIdentity gets the trust domain from the given principal and expects
// principal to have the right SPIFFE format.
func getTrustDomainFromSpiffeIdentity(principal string) (string, error) {
identityParts := strings.Split(principal, "/")
// A valid SPIFFE identity in authorization has no SPIFFE:// prefix.
// It is presented as <trust-domain>/ns/<some-namespace>/sa/<some-service-account>
if len(identityParts) != 5 {
return "", fmt.Errorf("wrong SPIFFE format: %s", principal)
}
trustDomain := identityParts[0]
return trustDomain, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trustdomain
import "strings"
// stringMatch checks if a string is in a list, it supports four types of string matches:
// 1. Exact match.
// 2. Wild character match. "*" matches any string.
// 3. Prefix match. For example, "book*" matches "bookstore", "bookshop", etc.
// 4. Suffix match. For example, "*/review" matches "/bookstore/review", "/products/review", etc.
// This is an extensive version of model.stringMatch(). The pattern can be in the string or the list.
func stringMatch(a string, list []string) bool {
for _, s := range list {
if a == s || s == "*" || prefixMatch(a, s) || prefixMatch(s, a) || suffixMatch(a, s) || suffixMatch(s, a) {
return true
}
}
return false
}
// prefixMatch checks if pattern is a prefix match and if string a has the given prefix.
func prefixMatch(a string, pattern string) bool {
if !strings.HasSuffix(pattern, "*") {
return false
}
pattern = strings.TrimSuffix(pattern, "*")
return strings.HasPrefix(a, pattern)
}
// suffixMatch checks if pattern is a suffix match and if string a has the given suffix.
func suffixMatch(a string, pattern string) bool {
if !strings.HasPrefix(pattern, "*") {
return false
}
pattern = strings.TrimPrefix(pattern, "*")
return strings.HasSuffix(a, pattern)
}
// isKeyInList it's fine to use this naive implementation for searching in a very short list.
func isKeyInList(key string, list []string) bool {
for _, l := range list {
if key == l {
return true
}
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"sync"
"time"
"istio.io/istio/pkg/log"
)
type Component func(stop <-chan struct{}) error
// Instance is a server that is composed a number of Component tasks.
type Instance interface {
// Start this Server. Any components that were already added
// will be run immediately. If any error is returned,
// Start will terminate and return the error immediately.
//
// Once all startup components have been run, starts a polling
// loop to continue monitoring for new components and returns nil.
Start(stop <-chan struct{}) error
// RunComponent adds the given component to the server's run queue.
RunComponent(name string, t Component)
// RunComponentAsync runs the given component asynchronously.
RunComponentAsync(name string, t Component)
// RunComponentAsyncAndWait runs the given component asynchronously. When
// the server Instance is shutting down, it will wait for the component
// to complete before exiting.
// Note: this is best effort; a process can die at any time.
RunComponentAsyncAndWait(name string, t Component)
// Wait for this server Instance to shutdown.
Wait()
}
var _ Instance = &instance{}
// New creates a new server Instance.
func New() Instance {
return &instance{
done: make(chan struct{}),
components: make(chan task, 1000), // should be enough?
}
}
type instance struct {
components chan task
done chan struct{}
// requiredTerminations keeps track of tasks that should block instance exit
// if they are not stopped. This allows important cleanup tasks to be completed.
// Note: this is still best effort; a process can die at any time.
requiredTerminations sync.WaitGroup
}
func (i *instance) Start(stop <-chan struct{}) error {
shutdown := func() {
close(i.done)
}
// First, drain all startup tasks and immediately return if any fail.
for startupDone := false; !startupDone; {
select {
case next := <-i.components:
t0 := time.Now()
if err := next.task(stop); err != nil {
// Startup error: terminate and return the error.
shutdown()
return err
}
runtime := time.Since(t0)
log := log.WithLabels("name", next.name, "runtime", runtime)
log.Debugf("started task")
if runtime > time.Second {
log.Warnf("slow startup task")
}
default:
// We've drained all of the initial tasks.
// Break out of the loop and run asynchronously.
startupDone = true
}
}
// Start the run loop to continue tasks added after the instance is started.
go func() {
for {
select {
case <-stop:
// Wait for any tasks that are required for termination.
i.requiredTerminations.Wait()
// Indicate that this instance is not terminated.
shutdown()
return
case next := <-i.components:
t0 := time.Now()
if err := next.task(stop); err != nil {
logComponentError(next.name, err)
}
runtime := time.Since(t0)
log := log.WithLabels("name", next.name, "runtime", runtime)
log.Debugf("started post-start task")
if runtime > time.Second {
log.Warnf("slow post-start task")
}
}
}
}()
return nil
}
type task struct {
name string
task Component
}
func (i *instance) RunComponent(name string, t Component) {
select {
case <-i.done:
log.Warnf("attempting to run a new component %q after the server was shutdown", name)
default:
i.components <- task{name, t}
}
}
func (i *instance) RunComponentAsync(name string, task Component) {
i.RunComponent(name, func(stop <-chan struct{}) error {
go func() {
err := task(stop)
if err != nil {
logComponentError(name, err)
}
}()
return nil
})
}
func (i *instance) RunComponentAsyncAndWait(name string, task Component) {
i.RunComponent(name, func(stop <-chan struct{}) error {
i.requiredTerminations.Add(1)
go func() {
err := task(stop)
if err != nil {
logComponentError(name, err)
}
i.requiredTerminations.Done()
}()
return nil
})
}
func (i *instance) Wait() {
<-i.done
}
func logComponentError(name string, err error) {
log.Errorf("failure in server component %q: %v", name, err)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aggregate
import (
"net/netip"
"sync"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/workloadapi/security"
)
// The aggregate controller does not implement serviceregistry.Instance since it may be comprised of various
// providers and clusters.
var (
_ model.ServiceDiscovery = &Controller{}
_ model.AggregateController = &Controller{}
)
// Controller aggregates data across different registries and monitors for changes
type Controller struct {
meshHolder mesh.Holder
// The lock is used to protect the registries and controller's running status.
storeLock sync.RWMutex
registries []*registryEntry
// indicates whether the controller has run.
// if true, all the registries added later should be run manually.
running bool
handlers model.ControllerHandlers
handlersByCluster map[cluster.ID]*model.ControllerHandlers
model.NetworkGatewaysHandler
}
func (c *Controller) Waypoint(scope model.WaypointScope) []netip.Addr {
if !features.EnableAmbientControllers {
return nil
}
var res []netip.Addr
for _, p := range c.GetRegistries() {
res = append(res, p.Waypoint(scope)...)
}
return res
}
func (c *Controller) WorkloadsForWaypoint(scope model.WaypointScope) []*model.WorkloadInfo {
if !features.EnableAmbientControllers {
return nil
}
var res []*model.WorkloadInfo
for _, p := range c.GetRegistries() {
res = append(res, p.WorkloadsForWaypoint(scope)...)
}
return res
}
func (c *Controller) AdditionalPodSubscriptions(proxy *model.Proxy, addr, cur sets.String) sets.String {
if !features.EnableAmbientControllers {
return nil
}
res := sets.New[string]()
for _, p := range c.GetRegistries() {
res = res.Merge(p.AdditionalPodSubscriptions(proxy, addr, cur))
}
return res
}
func (c *Controller) Policies(requested sets.Set[model.ConfigKey]) []*security.Authorization {
var res []*security.Authorization
if !features.EnableAmbientControllers {
return res
}
for _, p := range c.GetRegistries() {
res = append(res, p.Policies(requested)...)
}
return res
}
func (c *Controller) AddressInformation(addresses sets.String) ([]*model.AddressInfo, sets.String) {
i := []*model.AddressInfo{}
if !features.EnableAmbientControllers {
return i, nil
}
removed := sets.String{}
for _, p := range c.GetRegistries() {
wis, r := p.AddressInformation(addresses)
i = append(i, wis...)
removed.Merge(r)
}
// We may have 'removed' it in one registry but found it in another
for _, wl := range i {
// TODO(@hzxuzhonghu) This is not right for workload, we may search workload by ip, but the resource name is uid.
if removed.Contains(wl.ResourceName()) {
removed.Delete(wl.ResourceName())
}
}
return i, removed
}
type registryEntry struct {
serviceregistry.Instance
// stop if not nil is the per-registry stop chan. If null, the server stop chan should be used to Run the registry.
stop <-chan struct{}
}
type Options struct {
MeshHolder mesh.Holder
}
// NewController creates a new Aggregate controller
func NewController(opt Options) *Controller {
return &Controller{
registries: make([]*registryEntry, 0),
meshHolder: opt.MeshHolder,
running: false,
handlersByCluster: map[cluster.ID]*model.ControllerHandlers{},
}
}
func (c *Controller) addRegistry(registry serviceregistry.Instance, stop <-chan struct{}) {
c.registries = append(c.registries, ®istryEntry{Instance: registry, stop: stop})
// Observe the registry for events.
registry.AppendNetworkGatewayHandler(c.NotifyGatewayHandlers)
registry.AppendServiceHandler(c.handlers.NotifyServiceHandlers)
registry.AppendServiceHandler(func(prev, curr *model.Service, event model.Event) {
for _, handlers := range c.getClusterHandlers() {
handlers.NotifyServiceHandlers(prev, curr, event)
}
})
}
func (c *Controller) getClusterHandlers() []*model.ControllerHandlers {
c.storeLock.Lock()
defer c.storeLock.Unlock()
return maps.Values(c.handlersByCluster)
}
// AddRegistry adds registries into the aggregated controller.
// If the aggregated controller is already Running, the given registry will never be started.
func (c *Controller) AddRegistry(registry serviceregistry.Instance) {
c.storeLock.Lock()
defer c.storeLock.Unlock()
c.addRegistry(registry, nil)
}
// AddRegistryAndRun adds registries into the aggregated controller and makes sure it is Run.
// If the aggregated controller is running, the given registry is Run immediately.
// Otherwise, the given registry is Run when the aggregate controller is Run, using the given stop.
func (c *Controller) AddRegistryAndRun(registry serviceregistry.Instance, stop <-chan struct{}) {
if stop == nil {
log.Warnf("nil stop channel passed to AddRegistryAndRun for registry %s/%s", registry.Provider(), registry.Cluster())
}
c.storeLock.Lock()
defer c.storeLock.Unlock()
c.addRegistry(registry, stop)
if c.running {
go registry.Run(stop)
}
}
// DeleteRegistry deletes specified registry from the aggregated controller
func (c *Controller) DeleteRegistry(clusterID cluster.ID, providerID provider.ID) {
c.storeLock.Lock()
defer c.storeLock.Unlock()
if len(c.registries) == 0 {
log.Warnf("Registry list is empty, nothing to delete")
return
}
index, ok := c.getRegistryIndex(clusterID, providerID)
if !ok {
log.Warnf("Registry %s/%s is not found in the registries list, nothing to delete", providerID, clusterID)
return
}
c.registries[index] = nil
c.registries = append(c.registries[:index], c.registries[index+1:]...)
log.Infof("%s registry for the cluster %s has been deleted.", providerID, clusterID)
}
// GetRegistries returns a copy of all registries
func (c *Controller) GetRegistries() []serviceregistry.Instance {
c.storeLock.RLock()
defer c.storeLock.RUnlock()
// copy registries to prevent race, no need to deep copy here.
out := make([]serviceregistry.Instance, len(c.registries))
for i := range c.registries {
out[i] = c.registries[i]
}
return out
}
func (c *Controller) getRegistryIndex(clusterID cluster.ID, provider provider.ID) (int, bool) {
for i, r := range c.registries {
if r.Cluster().Equals(clusterID) && r.Provider() == provider {
return i, true
}
}
return 0, false
}
// Services lists services from all platforms
func (c *Controller) Services() []*model.Service {
// smap is a map of hostname (string) to service index, used to identify services that
// are installed in multiple clusters.
smap := make(map[host.Name]int)
index := 0
services := make([]*model.Service, 0)
// Locking Registries list while walking it to prevent inconsistent results
for _, r := range c.GetRegistries() {
svcs := r.Services()
if r.Provider() != provider.Kubernetes {
index += len(svcs)
services = append(services, svcs...)
} else {
for _, s := range svcs {
previous, ok := smap[s.Hostname]
if !ok {
// First time we see a service. The result will have a single service per hostname
// The first cluster will be listed first, so the services in the primary cluster
// will be used for default settings. If a service appears in multiple clusters,
// the order is less clear.
smap[s.Hostname] = index
index++
services = append(services, s)
} else {
// We must deepcopy before merge, and after merging, the ClusterVips length will be >= 2.
// This is an optimization to prevent deepcopy multi-times
if services[previous].ClusterVIPs.Len() < 2 {
// Deep copy before merging, otherwise there is a case
// a service in remote cluster can be deleted, but the ClusterIP left.
services[previous] = services[previous].DeepCopy()
}
// If it is seen second time, that means it is from a different cluster, update cluster VIPs.
mergeService(services[previous], s, r)
}
}
}
}
return services
}
// GetService retrieves a service by hostname if exists
func (c *Controller) GetService(hostname host.Name) *model.Service {
var out *model.Service
for _, r := range c.GetRegistries() {
service := r.GetService(hostname)
if service == nil {
continue
}
if r.Provider() != provider.Kubernetes {
return service
}
if out == nil {
out = service.DeepCopy()
} else {
// If we are seeing the service for the second time, it means it is available in multiple clusters.
mergeService(out, service, r)
}
}
return out
}
// mergeService only merges two clusters' k8s services
func mergeService(dst, src *model.Service, srcRegistry serviceregistry.Instance) {
if !src.Ports.Equals(dst.Ports) {
log.Debugf("service %s defined from cluster %s is different from others", src.Hostname, srcRegistry.Cluster())
}
// Prefer the k8s HostVIPs where possible
clusterID := srcRegistry.Cluster()
if len(dst.ClusterVIPs.GetAddressesFor(clusterID)) == 0 {
newAddresses := src.ClusterVIPs.GetAddressesFor(clusterID)
dst.ClusterVIPs.SetAddressesFor(clusterID, newAddresses)
}
}
// NetworkGateways merges the service-based cross-network gateways from each registry.
func (c *Controller) NetworkGateways() []model.NetworkGateway {
var gws []model.NetworkGateway
for _, r := range c.GetRegistries() {
gws = append(gws, r.NetworkGateways()...)
}
return gws
}
func (c *Controller) MCSServices() []model.MCSServiceInfo {
var out []model.MCSServiceInfo
for _, r := range c.GetRegistries() {
out = append(out, r.MCSServices()...)
}
return out
}
func nodeClusterID(node *model.Proxy) cluster.ID {
if node.Metadata == nil || node.Metadata.ClusterID == "" {
return ""
}
return node.Metadata.ClusterID
}
// Skip the service registry when there won't be a match
// because the proxy is in a different cluster.
func skipSearchingRegistryForProxy(nodeClusterID cluster.ID, r serviceregistry.Instance) bool {
// Always search non-kube (usually serviceentry) registry.
// Check every registry if cluster ID isn't specified.
if r.Provider() != provider.Kubernetes || nodeClusterID == "" {
return false
}
return !r.Cluster().Equals(nodeClusterID)
}
// GetProxyServiceTargets lists service instances co-located with a given proxy
func (c *Controller) GetProxyServiceTargets(node *model.Proxy) []model.ServiceTarget {
out := make([]model.ServiceTarget, 0)
nodeClusterID := nodeClusterID(node)
for _, r := range c.GetRegistries() {
if skipSearchingRegistryForProxy(nodeClusterID, r) {
log.Debugf("GetProxyServiceTargets(): not searching registry %v: proxy %v CLUSTER_ID is %v",
r.Cluster(), node.ID, nodeClusterID)
continue
}
instances := r.GetProxyServiceTargets(node)
if len(instances) > 0 {
out = append(out, instances...)
}
}
return out
}
func (c *Controller) GetProxyWorkloadLabels(proxy *model.Proxy) labels.Instance {
clusterID := nodeClusterID(proxy)
for _, r := range c.GetRegistries() {
// If proxy clusterID unset, we may find incorrect workload label.
// This can not happen in k8s env.
if clusterID == "" || clusterID == r.Cluster() {
lbls := r.GetProxyWorkloadLabels(proxy)
if lbls != nil {
return lbls
}
}
}
return nil
}
// Run starts all the controllers
func (c *Controller) Run(stop <-chan struct{}) {
c.storeLock.Lock()
for _, r := range c.registries {
// prefer the per-registry stop channel
registryStop := stop
if s := r.stop; s != nil {
registryStop = s
}
go r.Run(registryStop)
}
c.running = true
c.storeLock.Unlock()
<-stop
log.Info("Registry Aggregator terminated")
}
// HasSynced returns true when all registries have synced
func (c *Controller) HasSynced() bool {
for _, r := range c.GetRegistries() {
if !r.HasSynced() {
log.Debugf("registry %s is syncing", r.Cluster())
return false
}
}
return true
}
func (c *Controller) AppendServiceHandler(f model.ServiceHandler) {
c.handlers.AppendServiceHandler(f)
}
func (c *Controller) AppendWorkloadHandler(f func(*model.WorkloadInstance, model.Event)) {
// Currently, it is not used.
// Note: take care when you want to enable it, it will register the handlers to all registries
// c.handlers.AppendWorkloadHandler(f)
}
func (c *Controller) AppendServiceHandlerForCluster(id cluster.ID, f model.ServiceHandler) {
c.storeLock.Lock()
defer c.storeLock.Unlock()
handler, ok := c.handlersByCluster[id]
if !ok {
c.handlersByCluster[id] = &model.ControllerHandlers{}
handler = c.handlersByCluster[id]
}
handler.AppendServiceHandler(f)
}
func (c *Controller) UnRegisterHandlersForCluster(id cluster.ID) {
c.storeLock.Lock()
defer c.storeLock.Unlock()
delete(c.handlersByCluster, id)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package serviceregistry
import (
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/cluster"
)
// Instance of a service registry. A single service registry combines the capabilities of service discovery
// and the controller for managing asynchronous events.
type Instance interface {
model.Controller
model.ServiceDiscovery
// Provider backing this service registry (i.e. Kubernetes etc.)
Provider() provider.ID
// Cluster for which the service registry applies. Only needed for multicluster systems.
Cluster() cluster.ID
}
var _ Instance = &Simple{}
type DiscoveryController interface {
model.Controller
model.ServiceDiscovery
}
// Simple Instance implementation, where fields are set individually.
type Simple struct {
ProviderID provider.ID
ClusterID cluster.ID
DiscoveryController
}
func (r Simple) Provider() provider.ID {
return r.ProviderID
}
func (r Simple) Cluster() cluster.ID {
return r.ClusterID
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"net/netip"
"strings"
"sync"
"google.golang.org/protobuf/proto"
v1 "k8s.io/api/core/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
k8sbeta "sigs.k8s.io/gateway-api/apis/v1beta1"
"istio.io/api/networking/v1alpha3"
apiv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/kube"
"istio.io/istio/pilot/pkg/serviceregistry/serviceentry"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/kind"
kubeutil "istio.io/istio/pkg/kube"
kubelabels "istio.io/istio/pkg/kube/labels"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/spiffe"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/workloadapi"
)
type AmbientIndex interface {
Lookup(key string) []*model.AddressInfo
All() []*model.AddressInfo
WorkloadsForWaypoint(scope model.WaypointScope) []*model.WorkloadInfo
Waypoint(scope model.WaypointScope) []netip.Addr
CalculateUpdatedWorkloads(pods map[string]*v1.Pod,
workloadEntries map[networkAddress]*apiv1alpha3.WorkloadEntry,
seEndpoints map[*apiv1alpha3.ServiceEntry]sets.Set[*v1alpha3.WorkloadEntry],
c *Controller) map[model.ConfigKey]struct{}
HandleSelectedNamespace(ns string, pods []*v1.Pod, services []*v1.Service, c *Controller)
}
// AmbientIndexImpl maintains an index of ambient WorkloadInfo objects by various keys.
// These are intentionally pre-computed based on events such that lookups are efficient.
type AmbientIndexImpl struct {
mu sync.RWMutex
// byService indexes by Service namespaced hostname. A given Service can map to
// many workloads associated, indexed by workload uid.
byService map[string]map[string]*model.WorkloadInfo
// byPod indexes by network/podIP address.
// NOTE: prefer byUID to iterate over all workloads.
byPod map[networkAddress]*model.WorkloadInfo
// byWorkloadEntry indexes by WorkloadEntry IP address.
// NOTE: avoid using this index for anything other than Lookup().
// this map is incomplete and lacks workloads without an address
// (i.e. multi network workloads proxying remote service)
byWorkloadEntry map[networkAddress]*model.WorkloadInfo
// byUID indexes all workloads by their uid
byUID map[string]*model.WorkloadInfo
// serviceByAddr are indexed by the network/clusterIP
serviceByAddr map[networkAddress]*model.ServiceInfo
// serviceByNamespacedHostname are indexed by the namespace/hostname
serviceByNamespacedHostname map[string]*model.ServiceInfo
// TODO(nmittler): Add serviceByHostname to support on-demand for DNS.
// Map of Scope -> address
waypoints map[model.WaypointScope]*workloadapi.GatewayAddress
// map of service entry name/namespace to the service entry.
// used on pod updates to add VIPs to pods from service entries.
// also used on service entry updates to cleanup any old VIPs from pods/workloads maps.
servicesMap map[types.NamespacedName]*apiv1alpha3.ServiceEntry
}
func workloadToAddressInfo(w *workloadapi.Workload) *model.AddressInfo {
return &model.AddressInfo{
Address: &workloadapi.Address{
Type: &workloadapi.Address_Workload{
Workload: w,
},
},
}
}
func serviceToAddressInfo(s *workloadapi.Service) *model.AddressInfo {
return &model.AddressInfo{
Address: &workloadapi.Address{
Type: &workloadapi.Address_Service{
Service: s,
},
},
}
}
// name format: <cluster>/<group>/<kind>/<namespace>/<name></section-name>
func (c *Controller) generatePodUID(p *v1.Pod) string {
return c.clusterID.String() + "//" + "Pod/" + p.Namespace + "/" + p.Name
}
// Lookup finds the list of AddressInfos for a given key.
// network/IP -> return associated pod Workload or the Service and its corresponding Workloads
// namespace/hostname -> return the Service and its corresponding Workloads
//
// NOTE: As an interface method of AmbientIndex, this locks the index.
func (a *AmbientIndexImpl) Lookup(key string) []*model.AddressInfo {
a.mu.RLock()
defer a.mu.RUnlock()
// uid is primary key, attempt lookup first
if wl, f := a.byUID[key]; f {
return []*model.AddressInfo{workloadToAddressInfo(wl.Workload)}
}
network, ip, found := strings.Cut(key, "/")
if !found {
log.Warnf(`key (%v) did not contain the expected "/" character`, key)
return nil
}
res := make([]*model.AddressInfo, 0)
if _, err := netip.ParseAddr(ip); err != nil {
// this must be namespace/hostname format
// lookup Service and any Workloads for that Service for each of the network addresses
if svc, f := a.serviceByNamespacedHostname[key]; f {
res = append(res, serviceToAddressInfo(svc.Service))
for _, wl := range a.byService[key] {
res = append(res, workloadToAddressInfo(wl.Workload))
}
}
return res
}
networkAddr := networkAddress{network: network, ip: ip}
// First look at pod...
if p, f := a.byPod[networkAddr]; f {
return []*model.AddressInfo{workloadToAddressInfo(p.Workload)}
}
// Next, look at WorkloadEntries
if w, f := a.byWorkloadEntry[networkAddr]; f {
return []*model.AddressInfo{workloadToAddressInfo(w.Workload)}
}
// Fallback to service. Note: these IP ranges should be non-overlapping
// When a Service lookup is performed, but it and its workloads are returned
if s, exists := a.serviceByAddr[networkAddr]; exists {
res = append(res, serviceToAddressInfo(s.Service))
for _, wl := range a.byService[s.ResourceName()] {
res = append(res, workloadToAddressInfo(wl.Workload))
}
}
return res
}
func (a *AmbientIndexImpl) dropWorkloadFromService(namespacedHostname string, workloadUID string) {
wls := a.byService[namespacedHostname]
delete(wls, workloadUID)
}
func (a *AmbientIndexImpl) insertWorkloadToService(namespacedHostname string, workload *model.WorkloadInfo) {
if _, ok := a.byService[namespacedHostname]; !ok {
a.byService[namespacedHostname] = map[string]*model.WorkloadInfo{}
}
a.byService[namespacedHostname][workload.Uid] = workload
}
func (a *AmbientIndexImpl) updateWaypoint(sa model.WaypointScope, addr *workloadapi.GatewayAddress, isDelete bool) map[model.ConfigKey]struct{} {
updates := sets.New[model.ConfigKey]()
a.updateWaypointForWorkload(a.byUID, sa, addr, isDelete, updates)
return updates
}
// All return all known addresses. Result is un-ordered
//
// NOTE: As an interface method of AmbientIndex, this locks the index.
func (a *AmbientIndexImpl) All() []*model.AddressInfo {
a.mu.RLock()
defer a.mu.RUnlock()
res := make([]*model.AddressInfo, 0, len(a.byUID)+len(a.serviceByNamespacedHostname))
for _, wl := range a.byUID {
netAddrs := networkAddressFromWorkload(wl)
// Workload without an (optional) address is possible and should still be included
if len(netAddrs) == 0 {
res = append(res, workloadToAddressInfo(wl.Workload))
continue
}
// We need to determine whether we encounter, while iterating over the Workloads, a WorkloadEntry that has
// a network address similar to another Workload (Pod).
// In such cases we don't include the WorkloadEntry in the result and warn the users about it.
// WorkloadEntry will have a single network address
netAddr := netAddrs[0]
p := a.byPod[netAddr]
we := a.byWorkloadEntry[netAddr]
if p != nil && we != nil && wl.GetUid() != p.GetUid() {
log.Warnf("Skipping WorkloadEntry %s as in Ambient it can't have the same address of another workload on the same network", wl.GetName())
continue
}
res = append(res, workloadToAddressInfo(wl.Workload))
}
for _, s := range a.serviceByNamespacedHostname {
res = append(res, serviceToAddressInfo(s.Service))
}
return res
}
// WorkloadsForWaypoint returns all workload information matching the scope.
//
// NOTE: As an interface method of AmbientIndex, this locks the index.
func (a *AmbientIndexImpl) WorkloadsForWaypoint(scope model.WaypointScope) []*model.WorkloadInfo {
a.mu.RLock()
defer a.mu.RUnlock()
var res []*model.WorkloadInfo
// TODO: try to precompute
workloads := maps.Values(a.byUID)
workloads = model.SortWorkloadsByCreationTime(workloads)
for _, w := range workloads {
if a.matchesScope(scope, w) {
res = append(res, w)
}
}
return res
}
func (c *Controller) WorkloadsForWaypoint(scope model.WaypointScope) []*model.WorkloadInfo {
return c.ambientIndex.WorkloadsForWaypoint(scope)
}
// Waypoint returns the addresses of the waypoints matching the scope.
//
// NOTE: As an interface method of AmbientIndex, this locks the index.
func (a *AmbientIndexImpl) Waypoint(scope model.WaypointScope) []netip.Addr {
a.mu.RLock()
defer a.mu.RUnlock()
// TODO need to handle case where waypoints are dualstack/have multiple addresses
if addr, f := a.waypoints[scope]; f {
switch address := addr.Destination.(type) {
case *workloadapi.GatewayAddress_Address:
if ip, ok := netip.AddrFromSlice(address.Address.GetAddress()); ok {
return []netip.Addr{ip}
}
case *workloadapi.GatewayAddress_Hostname:
// TODO
}
}
// Now look for namespace-wide
scope.ServiceAccount = ""
if addr, f := a.waypoints[scope]; f {
switch address := addr.Destination.(type) {
case *workloadapi.GatewayAddress_Address:
if ip, ok := netip.AddrFromSlice(address.Address.GetAddress()); ok {
return []netip.Addr{ip}
}
case *workloadapi.GatewayAddress_Hostname:
// TODO
}
}
return nil
}
// Waypoint finds all waypoint IP addresses for a given scope. Performs first a Namespace+ServiceAccount
// then falls back to any Namespace wide waypoints
func (c *Controller) Waypoint(scope model.WaypointScope) []netip.Addr {
return c.ambientIndex.Waypoint(scope)
}
func (a *AmbientIndexImpl) matchesScope(scope model.WaypointScope, w *model.WorkloadInfo) bool {
if w.Namespace != scope.Namespace {
return false
}
// Filter out waypoints.
if w.Labels[constants.ManagedGatewayLabel] == constants.ManagedGatewayMeshControllerLabel {
return false
}
if len(scope.ServiceAccount) == 0 {
// We are a namespace wide waypoint. SA scope take precedence.
// Check if there is one for this workloads service account
if _, f := a.waypoints[model.WaypointScope{Namespace: scope.Namespace, ServiceAccount: w.ServiceAccount}]; f {
return false
}
return true
}
return w.ServiceAccount == scope.ServiceAccount
}
func (c *Controller) constructService(svc *v1.Service) *model.ServiceInfo {
ports := make([]*workloadapi.Port, 0, len(svc.Spec.Ports))
for _, p := range svc.Spec.Ports {
ports = append(ports, &workloadapi.Port{
ServicePort: uint32(p.Port),
TargetPort: uint32(p.TargetPort.IntVal),
})
}
// TODO this is only checking one controller - we may be missing service vips for instances in another cluster
vips := getVIPs(svc)
addrs := make([]*workloadapi.NetworkAddress, 0, len(vips))
for _, vip := range vips {
addrs = append(addrs, &workloadapi.NetworkAddress{
Network: c.Network(vip, make(labels.Instance, 0)).String(),
Address: netip.MustParseAddr(vip).AsSlice(),
})
}
return &model.ServiceInfo{
Service: &workloadapi.Service{
Name: svc.Name,
Namespace: svc.Namespace,
Hostname: c.hostname(svc),
Addresses: addrs,
Ports: ports,
},
}
}
func (c *Controller) hostname(svc *v1.Service) string {
return string(kube.ServiceHostname(svc.Name, svc.Namespace, c.opts.DomainSuffix))
}
func (c *Controller) namespacedHostname(svc *v1.Service) string {
return namespacedHostname(svc.Namespace, c.hostname(svc))
}
func namespacedHostname(namespace, hostname string) string {
return namespace + "/" + hostname
}
// NOTE: Mutex is locked prior to being called.
func (a *AmbientIndexImpl) extractWorkload(p *v1.Pod, c *Controller) *model.WorkloadInfo {
if p == nil || !IsPodRunning(p) || p.Spec.HostNetwork {
return nil
}
var waypoint *workloadapi.GatewayAddress
if p.Labels[constants.ManagedGatewayLabel] == constants.ManagedGatewayMeshControllerLabel {
// Waypoints do not have waypoints
} else {
// First check for a waypoint for our SA explicit
found := false
if waypoint, found = a.waypoints[model.WaypointScope{Namespace: p.Namespace, ServiceAccount: p.Spec.ServiceAccountName}]; !found {
// if there are none, check namespace wide waypoints
waypoint = a.waypoints[model.WaypointScope{Namespace: p.Namespace}]
}
}
policies := c.selectorAuthorizationPolicies(p.Namespace, p.Labels)
policies = append(policies, c.convertedSelectorPeerAuthentications(p.Namespace, p.Labels)...)
wl := a.constructWorkload(p, waypoint, policies, c)
if wl == nil {
return nil
}
return &model.WorkloadInfo{
Workload: wl,
Labels: p.Labels,
Source: model.WorkloadSourcePod,
CreationTime: p.CreationTimestamp.Time,
}
}
func (c *Controller) setupIndex() *AmbientIndexImpl {
idx := AmbientIndexImpl{
byService: map[string]map[string]*model.WorkloadInfo{},
byPod: map[networkAddress]*model.WorkloadInfo{},
byWorkloadEntry: map[networkAddress]*model.WorkloadInfo{},
byUID: map[string]*model.WorkloadInfo{},
waypoints: map[model.WaypointScope]*workloadapi.GatewayAddress{},
serviceByAddr: map[networkAddress]*model.ServiceInfo{},
serviceByNamespacedHostname: map[string]*model.ServiceInfo{},
servicesMap: map[types.NamespacedName]*apiv1alpha3.ServiceEntry{},
}
podHandler := func(old, pod *v1.Pod, ev model.Event) error {
log.Debugf("ambient podHandler pod %s/%s, event %v", pod.Namespace, pod.Name, ev)
idx.mu.Lock()
defer idx.mu.Unlock()
updates := idx.handlePod(old, pod, ev, c)
if len(updates) > 0 {
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
ConfigsUpdated: updates,
Reason: model.NewReasonStats(model.AmbientUpdate),
})
}
return nil
}
registerHandlers[*v1.Pod](c, c.podsClient, "", podHandler, nil)
// We only handle WLE and SE from config cluster, otherwise we could get duplicate workload from remote clusters.
if c.configCluster {
// Handle WorkloadEntries.
c.configController.RegisterEventHandler(gvk.WorkloadEntry, func(oldCfg config.Config, newCfg config.Config, ev model.Event) {
var oldWkEntrySpec *v1alpha3.WorkloadEntry
if ev == model.EventUpdate {
oldWkEntrySpec = serviceentry.ConvertWorkloadEntry(oldCfg)
}
var oldWkEntry *apiv1alpha3.WorkloadEntry
if oldWkEntrySpec != nil {
oldWkEntry = &apiv1alpha3.WorkloadEntry{
ObjectMeta: oldCfg.ToObjectMeta(),
Spec: *oldWkEntrySpec.DeepCopy(),
}
}
newWkEntrySpec := serviceentry.ConvertWorkloadEntry(newCfg)
var newWkEntry *apiv1alpha3.WorkloadEntry
if newWkEntrySpec != nil {
newWkEntry = &apiv1alpha3.WorkloadEntry{
ObjectMeta: newCfg.ToObjectMeta(),
Spec: *newWkEntrySpec.DeepCopy(),
}
}
idx.mu.Lock()
defer idx.mu.Unlock()
updates := idx.handleWorkloadEntry(oldWkEntry, newWkEntry, ev == model.EventDelete, c)
if len(updates) > 0 {
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
Full: false,
ConfigsUpdated: updates,
Reason: model.NewReasonStats(model.AmbientUpdate),
})
}
})
// Handle ServiceEntries.
c.configController.RegisterEventHandler(gvk.ServiceEntry, func(_ config.Config, newCfg config.Config, ev model.Event) {
newSvcEntrySpec := serviceentry.ConvertServiceEntry(newCfg)
var newSvcEntry *apiv1alpha3.ServiceEntry
if newSvcEntrySpec != nil {
newSvcEntry = &apiv1alpha3.ServiceEntry{
ObjectMeta: newCfg.ToObjectMeta(),
Spec: *newSvcEntrySpec.DeepCopy(),
}
}
idx.mu.Lock()
defer idx.mu.Unlock()
updates := idx.handleServiceEntry(newSvcEntry, ev, c)
if len(updates) > 0 {
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
Full: false,
ConfigsUpdated: updates,
Reason: model.NewReasonStats(model.AmbientUpdate),
})
}
})
}
c.configController.RegisterEventHandler(gvk.AuthorizationPolicy, c.AuthorizationPolicyHandler)
c.configController.RegisterEventHandler(gvk.PeerAuthentication, c.PeerAuthenticationHandler)
serviceHandler := func(old, svc *v1.Service, ev model.Event) error {
log.Debugf("ambient serviceHandler service %s/%s, event %v", svc.Namespace, svc.Name, ev)
var updates sets.Set[model.ConfigKey]
idx.mu.Lock()
defer idx.mu.Unlock()
switch ev {
case model.EventAdd:
updates = idx.handleService(svc, ev, c)
case model.EventUpdate:
// TODO(hzxuzhonghu): handle svc update within `handleService`, so that we donot need to check event type here.
updates = idx.handleService(old, model.EventDelete, c)
updates2 := idx.handleService(svc, ev, c)
if updates == nil {
updates = updates2
} else {
updates.Union(updates2)
}
case model.EventDelete:
updates = idx.handleService(svc, ev, c)
}
if len(updates) > 0 {
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
ConfigsUpdated: updates,
Reason: model.NewReasonStats(model.AmbientUpdate),
})
}
return nil
}
registerHandlers[*v1.Service](c, c.services, "", serviceHandler, nil)
kubeGatewayHandler := func(old, newGateway *k8sbeta.Gateway, ev model.Event) error {
log.Debugf("ambient kubeGatewayHandler gateway %s/%s, event %v", newGateway.Namespace, newGateway.Name, ev)
idx.handleKubeGateway(old, newGateway, ev, c)
return nil
}
// initNetworkManager initializes the gatewayResourceClient, it should not be re-initialized in setupIndex
registerHandlers[*k8sbeta.Gateway](c, c.gatewayResourceClient, "", kubeGatewayHandler, nil)
return &idx
}
// NOTE: Mutex is locked prior to being called.
func (a *AmbientIndexImpl) handlePod(old, p *v1.Pod, ev model.Event, c *Controller) sets.Set[model.ConfigKey] {
if old != nil {
// compare only labels and pod phase, which are what we care about
if maps.Equal(old.Labels, p.Labels) &&
maps.Equal(old.Annotations, p.Annotations) &&
old.Status.Phase == p.Status.Phase &&
IsPodReady(old) == IsPodReady(p) {
return nil
}
}
updates := sets.New[model.ConfigKey]()
var wl *model.WorkloadInfo
if ev != model.EventDelete {
wl = a.extractWorkload(p, c)
}
uid := c.generatePodUID(p)
oldWl := a.byUID[uid]
if wl == nil {
a.updateWorkloadIndexes(oldWl, nil, updates)
return updates
}
if oldWl != nil && proto.Equal(wl.Workload, oldWl.Workload) {
log.Debugf("%v: no change, skipping", wl.ResourceName())
return updates
}
a.updateWorkloadIndexes(oldWl, wl, updates)
return updates
}
// updateWorkloadIndexes, given and old and new instance, updates the various indexes for workloads.
// Any changes are reported in `updates`.
func (a *AmbientIndexImpl) updateWorkloadIndexes(oldWl *model.WorkloadInfo, newWl *model.WorkloadInfo, updates sets.Set[model.ConfigKey]) {
if newWl == nil {
if oldWl == nil {
// No change needed
return
}
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: oldWl.ResourceName()})
for _, addr := range networkAddressFromWorkload(oldWl) {
if oldWl.Source == model.WorkloadSourcePod {
delete(a.byPod, addr)
} else {
delete(a.byWorkloadEntry, addr)
}
}
delete(a.byUID, oldWl.Uid)
// If we already knew about this workload, we need to make sure we drop all service references as well
for namespacedHostname := range oldWl.Services {
a.dropWorkloadFromService(namespacedHostname, oldWl.ResourceName())
}
return
}
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: newWl.ResourceName()})
for _, networkAddr := range networkAddressFromWorkload(newWl) {
if newWl.Source == model.WorkloadSourcePod {
a.byPod[networkAddr] = newWl
} else {
a.byWorkloadEntry[networkAddr] = newWl
}
}
a.byUID[newWl.Uid] = newWl
if oldWl != nil {
// For updates, we will drop the service and then add the new ones back. This could be optimized
for namespacedHostname := range oldWl.Services {
a.dropWorkloadFromService(namespacedHostname, oldWl.ResourceName())
}
}
// Update the service indexes as well, as needed
for namespacedHostname := range newWl.Services {
a.insertWorkloadToService(namespacedHostname, newWl)
}
}
func networkAddressFromWorkload(wl *model.WorkloadInfo) []networkAddress {
networkAddrs := make([]networkAddress, 0, len(wl.Addresses))
for _, addr := range wl.Addresses {
ip, _ := netip.AddrFromSlice(addr)
networkAddrs = append(networkAddrs, networkAddress{network: wl.Network, ip: ip.String()})
}
return networkAddrs
}
func toInternalNetworkAddresses(nwAddrs []*workloadapi.NetworkAddress) []networkAddress {
networkAddrs := make([]networkAddress, 0, len(nwAddrs))
for _, addr := range nwAddrs {
if ip, ok := netip.AddrFromSlice(addr.Address); ok {
networkAddrs = append(networkAddrs, networkAddress{
ip: ip.String(),
network: addr.Network,
})
}
}
return networkAddrs
}
// NOTE: Mutex is locked prior to being called.
func (a *AmbientIndexImpl) handleService(svc *v1.Service, ev model.Event, c *Controller) sets.Set[model.ConfigKey] {
updates := sets.New[model.ConfigKey]()
si := c.constructService(svc)
networkAddrs := toInternalNetworkAddresses(si.GetAddresses())
pods := c.getPodsInService(svc)
for _, p := range pods {
// Can be nil if it's not ready, hostNetwork, etc
uid := c.generatePodUID(p)
oldWl := a.byUID[uid]
wl := a.extractWorkload(p, c)
a.updateWorkloadIndexes(oldWl, wl, updates)
}
if features.EnableK8SServiceSelectWorkloadEntries {
workloadEntries := c.getSelectedWorkloadEntries(svc.GetNamespace(), svc.Spec.Selector)
for _, w := range workloadEntries {
uid := c.generateWorkloadEntryUID(w.Namespace, w.Name)
oldWl := a.byUID[uid]
wl := a.extractWorkloadEntry(w, c)
a.updateWorkloadIndexes(oldWl, wl, updates)
}
}
namespacedName := si.ResourceName()
if ev == model.EventDelete {
for _, networkAddr := range networkAddrs {
delete(a.serviceByAddr, networkAddr)
}
delete(a.serviceByNamespacedHostname, si.ResourceName())
// Cleanup byService fully here. We don't use DeleteCleanupLast so we can distinguish between an empty service and missing service.
delete(a.byService, namespacedName)
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: namespacedName})
} else {
for _, networkAddr := range networkAddrs {
a.serviceByAddr[networkAddr] = si
}
a.serviceByNamespacedHostname[namespacedName] = si
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: namespacedName})
}
return updates
}
func (a *AmbientIndexImpl) handleKubeGateway(_, gateway *k8sbeta.Gateway, event model.Event, c *Controller) {
// gateway.Status.Addresses should only be populated once the Waypoint's deployment has at least 1 ready pod, it should never be removed after going ready
// ignore Kubernetes Gateways which aren't waypoints
// TODO: should this be WaypointGatewayClass or matches a label?
if gateway.Spec.GatewayClassName == constants.WaypointGatewayClassName && len(gateway.Status.Addresses) > 0 {
scope := model.WaypointScope{Namespace: gateway.Namespace, ServiceAccount: gateway.Annotations[constants.WaypointServiceAccount]}
waypointPort := uint32(15008)
for _, l := range gateway.Spec.Listeners {
if l.Protocol == k8sbeta.ProtocolType(protocol.HBONE) {
waypointPort = uint32(l.Port)
}
}
ip, err := netip.ParseAddr(gateway.Status.Addresses[0].Value)
if err != nil {
// This should be a transient error when upgrading, when the Kube Gateway status is updated it should write an IP address
log.Errorf("Unable to parse IP address in status of %v/%v/%v", gvk.KubernetesGateway, gateway.Namespace, gateway.Name)
return
}
addr := &workloadapi.GatewayAddress{
Destination: &workloadapi.GatewayAddress_Address{
Address: &workloadapi.NetworkAddress{
Network: c.Network(ip.String(), make(labels.Instance, 0)).String(),
Address: ip.AsSlice(),
},
},
HboneMtlsPort: waypointPort,
}
updates := sets.New[model.ConfigKey]()
a.mu.Lock()
defer a.mu.Unlock()
if event == model.EventDelete {
delete(a.waypoints, scope)
updates.Merge(a.updateWaypoint(scope, addr, true))
} else if !proto.Equal(a.waypoints[scope], addr) {
a.waypoints[scope] = addr
updates.Merge(a.updateWaypoint(scope, addr, false))
}
if len(updates) > 0 {
log.Debug("Waypoint ready: Pushing Updates")
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
ConfigsUpdated: updates,
Reason: model.NewReasonStats(model.AmbientUpdate),
})
}
}
}
func (c *Controller) getPodsInService(svc *v1.Service) []*v1.Pod {
if svc.Spec.Selector == nil {
// services with nil selectors match nothing, not everything.
return nil
}
return c.podsClient.List(svc.Namespace, klabels.ValidatedSetSelector(svc.Spec.Selector))
}
// AddressInformation returns all AddressInfo's in the cluster.
// This may be scoped to specific subsets by specifying a non-empty addresses field
func (c *Controller) AddressInformation(addresses sets.String) ([]*model.AddressInfo, sets.String) {
if len(addresses) == 0 {
// Full update
return c.ambientIndex.All(), nil
}
var wls []*model.AddressInfo
removed := sets.New[string]()
got := sets.String{}
for addr := range addresses {
wl := c.ambientIndex.Lookup(addr)
if len(wl) == 0 {
removed.Insert(addr)
} else {
for _, addr := range wl {
if got.Contains(addr.ResourceName()) {
continue
}
got.Insert(addr.ResourceName())
wls = append(wls, addr)
}
}
}
return wls, removed
}
func (a *AmbientIndexImpl) constructWorkload(pod *v1.Pod, waypoint *workloadapi.GatewayAddress, policies []string,
c *Controller,
) *workloadapi.Workload {
workloadServices := map[string]*workloadapi.PortList{}
allServices := c.services.List(pod.Namespace, klabels.Everything())
if services := getPodServices(allServices, pod); len(services) > 0 {
for _, svc := range services {
// Build the ports for the service.
ports := &workloadapi.PortList{}
for _, port := range svc.Spec.Ports {
if port.Protocol != v1.ProtocolTCP {
continue
}
targetPort, err := FindPort(pod, &port)
if err != nil {
log.Debug(err)
continue
}
ports.Ports = append(ports.Ports, &workloadapi.Port{
ServicePort: uint32(port.Port),
TargetPort: uint32(targetPort),
})
}
workloadServices[c.namespacedHostname(svc)] = ports
}
}
addresses := make([][]byte, 0, len(pod.Status.PodIPs))
for _, podIP := range pod.Status.PodIPs {
addresses = append(addresses, parseIP(podIP.IP))
}
for nsName, ports := range a.getWorkloadServicesFromServiceEntries(nil, pod.GetNamespace(), pod.Labels) {
workloadServices[nsName] = ports
}
wl := &workloadapi.Workload{
Uid: c.generatePodUID(pod),
Name: pod.Name,
Addresses: addresses,
Hostname: pod.Spec.Hostname,
Network: c.Network(pod.Status.PodIP, pod.Labels).String(),
Namespace: pod.Namespace,
ServiceAccount: pod.Spec.ServiceAccountName,
Node: pod.Spec.NodeName,
Services: workloadServices,
AuthorizationPolicies: policies,
Status: workloadapi.WorkloadStatus_HEALTHY,
ClusterId: c.Cluster().String(),
Waypoint: waypoint,
}
if !IsPodReady(pod) {
wl.Status = workloadapi.WorkloadStatus_UNHEALTHY
}
if td := spiffe.GetTrustDomain(); td != "cluster.local" {
wl.TrustDomain = td
}
wl.WorkloadName, wl.WorkloadType = workloadNameAndType(pod)
wl.CanonicalName, wl.CanonicalRevision = kubelabels.CanonicalService(pod.Labels, wl.WorkloadName)
if pod.Annotations[constants.AmbientRedirection] == constants.AmbientRedirectionEnabled {
// Configured for override
wl.TunnelProtocol = workloadapi.TunnelProtocol_HBONE
}
// Otherwise supports tunnel directly
if model.SupportsTunnel(pod.Labels, model.TunnelHTTP) {
wl.TunnelProtocol = workloadapi.TunnelProtocol_HBONE
wl.NativeTunnel = true
}
return wl
}
func parseIP(ip string) []byte {
addr, err := netip.ParseAddr(ip)
if err != nil {
return nil
}
return addr.AsSlice()
}
// internal object used for indexing in ambientindex maps
type networkAddress struct {
network string
ip string
}
func (n *networkAddress) String() string {
return n.network + "/" + n.ip
}
func getVIPs(svc *v1.Service) []string {
res := make([]string, 0)
if svc.Spec.ClusterIP != "" && svc.Spec.ClusterIP != v1.ClusterIPNone {
res = append(res, svc.Spec.ClusterIP)
}
for _, ing := range svc.Status.LoadBalancer.Ingress {
// IPs are strictly optional for loadbalancers - they may just have a hostname.
if ing.IP != "" {
res = append(res, ing.IP)
}
}
return res
}
func (c *Controller) AdditionalPodSubscriptions(
proxy *model.Proxy,
allAddresses sets.String,
currentSubs sets.String,
) sets.String {
shouldSubscribe := sets.New[string]()
// First, we want to handle VIP subscriptions. Example:
// Client subscribes to VIP1. Pod1, part of VIP1, is sent.
// The client wouldn't be explicitly subscribed to Pod1, so it would normally ignore it.
// Since it is a part of VIP1 which we are subscribe to, add it to the subscriptions
for addr := range allAddresses {
for _, wl := range model.ExtractWorkloadsFromAddresses(c.ambientIndex.Lookup(addr)) {
// We may have gotten an update for Pod, but are subscribed to a Service.
// We need to force a subscription on the Pod as well
for namespacedHostname := range wl.Services {
if currentSubs.Contains(namespacedHostname) {
shouldSubscribe.Insert(wl.ResourceName())
break
}
}
}
}
// Next, as an optimization, we will send all node-local endpoints
if nodeName := proxy.Metadata.NodeName; nodeName != "" {
for _, wl := range model.ExtractWorkloadsFromAddresses(c.ambientIndex.All()) {
if wl.Node == nodeName {
n := wl.ResourceName()
if currentSubs.Contains(n) {
continue
}
shouldSubscribe.Insert(n)
}
}
}
return shouldSubscribe
}
// syncAllWorkloadsForAmbient refreshes all ambient workloads.
func (c *Controller) syncAllWorkloadsForAmbient() {
if c.ambientIndex != nil {
var namespaces []string
if c.opts.DiscoveryNamespacesFilter != nil {
namespaces = c.opts.DiscoveryNamespacesFilter.GetMembers().UnsortedList()
}
for _, ns := range namespaces {
pods := c.podsClient.List(ns, klabels.Everything())
services := c.services.List(ns, klabels.Everything())
c.ambientIndex.HandleSelectedNamespace(ns, pods, services, c)
}
}
}
func workloadNameAndType(pod *v1.Pod) (string, workloadapi.WorkloadType) {
objMeta, typeMeta := kubeutil.GetDeployMetaFromPod(pod)
switch typeMeta.Kind {
case "Deployment":
return objMeta.Name, workloadapi.WorkloadType_DEPLOYMENT
case "Job":
return objMeta.Name, workloadapi.WorkloadType_JOB
case "CronJob":
return objMeta.Name, workloadapi.WorkloadType_CRONJOB
default:
return pod.Name, workloadapi.WorkloadType_POD
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"fmt"
"net/netip"
"google.golang.org/protobuf/proto"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"istio.io/api/networking/v1alpha3"
apiv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/serviceentry"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/kind"
kubelabels "istio.io/istio/pkg/kube/labels"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/spiffe"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/workloadapi"
)
func (a *AmbientIndexImpl) handleServiceEntry(svcEntry *apiv1alpha3.ServiceEntry, event model.Event, c *Controller) sets.Set[model.ConfigKey] {
if len(svcEntry.Spec.Hosts) == 0 {
log.Warnf("ServiceEntry %s/%s is invalid as it has no hosts", svcEntry.GetNamespace(), svcEntry.GetName())
return sets.New[model.ConfigKey]()
}
// We will accrue updates as we update our internal state
updates := sets.New[model.ConfigKey]()
if event != model.EventAdd {
a.cleanupOldWorkloadEntriesInlinedOnServiceEntry(svcEntry, updates, c)
}
serviceEntryNamespacedName := types.NamespacedName{
Name: svcEntry.GetName(),
Namespace: svcEntry.GetNamespace(),
}
// Update indexes
if event == model.EventDelete {
// servicesMap is used when cleaning up old WEs inlined on a SE (i.e., `ServiceEntry.endpoints`)
//
// prefer this style to better enable us for future support for auto-allocated VIPs on ServiceEntries.
// this is necessary because not all information is available in the ServiceEntry spec
delete(a.servicesMap, serviceEntryNamespacedName)
} else {
// servicesMap is used when constructing workloads so it must be up to date
a.servicesMap[serviceEntryNamespacedName] = svcEntry
}
sel := klabels.ValidatedSetSelector(klabels.Set(svcEntry.Spec.WorkloadSelector.GetLabels()))
var pods []*v1.Pod
if !sel.Empty() {
pods = c.podsClient.List(svcEntry.GetNamespace(), sel)
}
wls := make(map[string]*model.WorkloadInfo, len(pods))
for _, pod := range pods {
newWl := a.extractWorkload(pod, c)
if newWl != nil {
// Update the pod, since it now has new VIP info
networkAddrs := networkAddressFromWorkload(newWl)
for _, networkAddr := range networkAddrs {
a.byPod[networkAddr] = newWl
}
a.byUID[c.generatePodUID(pod)] = newWl
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: newWl.ResourceName()})
wls[newWl.Uid] = newWl
}
}
workloadEntries := c.getSelectedWorkloadEntries(svcEntry.GetNamespace(), svcEntry.Spec.GetWorkloadSelector().GetLabels())
for _, w := range workloadEntries {
wl := a.extractWorkloadEntry(w, c)
// Can be nil if the WorkloadEntry IP has not been mapped yet
//
// Note: this is a defensive check that mimics the logic for
// pods above. WorkloadEntries are mapped by their IP address
// in the following cases:
// 1. WorkloadEntry add/update
// 2. AuthorizationPolicy add/update
// 3. Namespace Ambient label add/update
if wl != nil {
// Update the WorkloadEntry, since it now has new VIP info
for _, networkAddr := range networkAddressFromWorkload(wl) {
a.byWorkloadEntry[networkAddr] = wl
}
a.byUID[c.generateWorkloadEntryUID(wl.GetNamespace(), wl.GetName())] = wl
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: wl.ResourceName()})
wls[wl.Uid] = wl
}
}
for _, we := range svcEntry.Spec.Endpoints {
uid := c.generateServiceEntryUID(svcEntry.GetNamespace(), svcEntry.GetName(), we.GetAddress())
oldWl := a.byUID[uid]
var wl *model.WorkloadInfo
if event != model.EventDelete {
wl = a.extractWorkloadEntrySpec(we, svcEntry.GetNamespace(), svcEntry.GetName(), svcEntry, c)
}
a.updateWorkloadIndexes(oldWl, wl, updates)
}
vips := getVIPsFromServiceEntry(svcEntry)
var addrs []*workloadapi.NetworkAddress
for _, vip := range vips {
addrs = append(addrs, &workloadapi.NetworkAddress{
Network: c.network.String(),
Address: parseIP(vip),
})
}
var allPorts []*workloadapi.Port
for _, port := range svcEntry.Spec.Ports {
allPorts = append(allPorts, &workloadapi.Port{
ServicePort: port.Number,
TargetPort: port.TargetPort,
})
}
var allSubjAltNames []string
allSubjAltNames = append(allSubjAltNames, svcEntry.Spec.SubjectAltNames...)
// for each host make a service info
var serviceInfos []*model.ServiceInfo
for _, host := range svcEntry.Spec.Hosts {
serviceInfos = append(serviceInfos, &model.ServiceInfo{
Service: &workloadapi.Service{
Name: svcEntry.GetName(),
Namespace: svcEntry.GetNamespace(),
Hostname: host,
Addresses: addrs,
Ports: allPorts,
SubjectAltNames: allSubjAltNames,
},
})
}
// we already validated that there is at least one host at the beginning of this function, so this is safe
sampleSi := serviceInfos[0]
networkAddrs := toInternalNetworkAddresses(sampleSi.GetAddresses())
// We send an update for each *workload* IP address previously in the service; they may have changed
for _, si := range serviceInfos {
for _, wl := range a.byService[si.ResourceName()] {
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: wl.ResourceName()})
}
}
// Update indexes
if event == model.EventDelete {
for _, networkAddr := range networkAddrs {
delete(a.serviceByAddr, networkAddr)
}
for _, si := range serviceInfos {
delete(a.byService, si.ResourceName())
delete(a.serviceByNamespacedHostname, si.ResourceName())
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: si.ResourceName()})
}
} else {
for _, networkAddr := range networkAddrs {
// in ambient, we only allow a network address to map to a single host. we dedup by just mapping the first one
a.serviceByAddr[networkAddr] = sampleSi
}
for _, si := range serviceInfos {
a.byService[si.ResourceName()] = wls
a.serviceByNamespacedHostname[si.ResourceName()] = si
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: si.ResourceName()})
}
}
// Fetch updates again, in case it changed from adding new workloads
for _, si := range serviceInfos {
for _, wl := range a.byService[si.ResourceName()] {
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: wl.ResourceName()})
}
}
return updates
}
func (c *Controller) getWorkloadEntriesInPolicy(ns string, sel map[string]string) []*apiv1alpha3.WorkloadEntry {
if ns == c.meshWatcher.Mesh().GetRootNamespace() {
ns = metav1.NamespaceAll
}
return c.getSelectedWorkloadEntries(ns, sel)
}
func (c *Controller) getServiceEntryEndpointsInPolicy(ns string, sel map[string]string) map[*apiv1alpha3.ServiceEntry]sets.Set[*v1alpha3.WorkloadEntry] {
if ns == c.meshWatcher.Mesh().GetRootNamespace() {
ns = metav1.NamespaceAll
}
return c.getSelectedServiceEntries(ns, sel)
}
// NOTE: Mutex is locked prior to being called.
func (a *AmbientIndexImpl) extractWorkloadEntry(w *apiv1alpha3.WorkloadEntry, c *Controller) *model.WorkloadInfo {
if w == nil {
return nil
}
wl := a.extractWorkloadEntrySpec(&w.Spec, w.Namespace, w.Name, nil, c)
if wl != nil {
wl.CreationTime = w.CreationTimestamp.Time
}
return wl
}
func (a *AmbientIndexImpl) extractWorkloadEntrySpec(w *v1alpha3.WorkloadEntry, ns, name string,
parentServiceEntry *apiv1alpha3.ServiceEntry, c *Controller,
) *model.WorkloadInfo {
if w == nil {
return nil
}
var waypoint *workloadapi.GatewayAddress
if w.Labels[constants.ManagedGatewayLabel] == constants.ManagedGatewayMeshControllerLabel {
// Waypoints do not have waypoints
} else {
// First check for a waypoint for our SA explicit
// TODO: this is not robust against temporary waypoint downtime. We also need the users intent (Gateway).
found := false
if waypoint, found = a.waypoints[model.WaypointScope{Namespace: ns, ServiceAccount: w.ServiceAccount}]; !found {
// if there are none, check namespace wide waypoints
waypoint = a.waypoints[model.WaypointScope{Namespace: ns}]
}
}
policies := c.selectorAuthorizationPolicies(ns, w.Labels)
wl := a.constructWorkloadFromWorkloadEntry(w, ns, name, parentServiceEntry, waypoint, policies, c)
if wl == nil {
return nil
}
source := model.WorkloadSourceWorkloadEntry
if parentServiceEntry != nil {
source = model.WorkloadSourceServiceEntry
}
wli := &model.WorkloadInfo{
Workload: wl,
Labels: w.Labels,
Source: source,
}
if parentServiceEntry != nil {
wli.CreationTime = parentServiceEntry.CreationTimestamp.Time
}
return wli
}
// NOTE: Mutex is locked prior to being called.
func (a *AmbientIndexImpl) handleWorkloadEntry(oldWorkloadEntry, w *apiv1alpha3.WorkloadEntry, isDelete bool, c *Controller) sets.Set[model.ConfigKey] {
if oldWorkloadEntry != nil {
// compare only labels, annotations, and spec; which are what we care about
if proto.Equal(&oldWorkloadEntry.Spec, &w.Spec) &&
maps.Equal(oldWorkloadEntry.Annotations, w.Annotations) &&
maps.Equal(oldWorkloadEntry.Labels, w.Labels) {
return nil
}
}
updates := sets.New[model.ConfigKey]()
var wl *model.WorkloadInfo
if !isDelete {
wl = a.extractWorkloadEntry(w, c)
}
wlNetwork := c.Network(w.Spec.Address, w.Spec.Labels).String()
var networkAddr *networkAddress
if addr := w.Spec.Address; addr != "" {
networkAddr = &networkAddress{network: wlNetwork, ip: addr}
}
uid := c.generateWorkloadEntryUID(w.GetNamespace(), w.GetName())
oldWl := a.byUID[uid]
if wl == nil {
// This is an explicit delete event, or there is no longer a Workload to create (VM NotReady, etc)
if networkAddr != nil {
delete(a.byWorkloadEntry, *networkAddr)
}
delete(a.byUID, uid)
if oldWl != nil {
// If we already knew about this workload, we need to make sure we drop all service references as well
for namespacedHostname := range oldWl.Services {
a.dropWorkloadFromService(namespacedHostname, oldWl.ResourceName())
}
log.Debugf("%v: workload removed, pushing", oldWl.ResourceName())
return map[model.ConfigKey]struct{}{
// TODO: namespace for network?
{Kind: kind.Address, Name: oldWl.ResourceName()}: {},
}
}
// It was a 'delete' for a resource we didn't know yet, no need to send an event
return updates
}
if oldWl != nil && proto.Equal(wl.Workload, oldWl.Workload) {
log.Debugf("%v: no change, skipping", wl.ResourceName())
return updates
}
if networkAddr != nil {
a.byWorkloadEntry[*networkAddr] = wl
}
a.byUID[uid] = wl
if oldWl != nil {
// For updates, we will drop the services and then add the new ones back. This could be optimized
for namespacedHostname := range oldWl.Services {
a.dropWorkloadFromService(namespacedHostname, wl.ResourceName())
}
}
// Update the service indexes as well, as needed
for namespacedHostname := range wl.Services {
a.insertWorkloadToService(namespacedHostname, wl)
}
log.Debugf("%v: workload updated, pushing", wl.ResourceName())
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: wl.ResourceName()})
return updates
}
func (a *AmbientIndexImpl) constructWorkloadFromWorkloadEntry(workloadEntry *v1alpha3.WorkloadEntry, workloadEntryNamespace, workloadEntryName string,
parentServiceEntry *apiv1alpha3.ServiceEntry, waypoint *workloadapi.GatewayAddress, policies []string, c *Controller,
) *workloadapi.Workload {
if workloadEntry == nil {
return nil
}
workloadServices := map[string]*workloadapi.PortList{}
services := getWorkloadEntryServices(c.services.List(workloadEntryNamespace, klabels.Everything()), workloadEntry)
if features.EnableK8SServiceSelectWorkloadEntries && len(services) > 0 {
for _, svc := range services {
ports := &workloadapi.PortList{}
for _, port := range svc.Spec.Ports {
if port.Protocol != v1.ProtocolTCP {
continue
}
targetPort, err := findPortForWorkloadEntry(workloadEntry, &port)
if err != nil {
log.Errorf("error looking up port for WorkloadEntry %s/%s", workloadEntryNamespace, workloadEntryName)
continue
}
ports.Ports = append(ports.Ports, &workloadapi.Port{
ServicePort: uint32(port.Port),
TargetPort: targetPort,
})
}
workloadServices[c.namespacedHostname(svc)] = ports
}
}
// for constructing a workload from a standalone workload entry, which can be selected by many service entries
if parentServiceEntry == nil {
for nsName, ports := range a.getWorkloadServicesFromServiceEntries(workloadEntry, workloadEntryNamespace, workloadEntry.Labels) {
workloadServices[nsName] = ports
}
}
// for constructing workloads with a single parent (inlined on a SE)
if parentServiceEntry != nil {
ports := getPortsForServiceEntry(parentServiceEntry, workloadEntry)
for _, host := range parentServiceEntry.Spec.Hosts {
workloadServices[namespacedHostname(parentServiceEntry.GetNamespace(), host)] = ports
}
}
var addrBytes []byte
if workloadEntry.Address != "" {
// this can fail if the address is DNS, e.g. "external.external-1-15569.svc.cluster.local"
addr, err := netip.ParseAddr(workloadEntry.Address)
if err != nil {
log.Errorf("skipping ambient workload generation for workload entry %s/%s."+
"client DNS address resolution is not implemented in ztunnel yet: requested address: %v",
workloadEntryNamespace, workloadEntryName, workloadEntry.Address)
return nil
}
addrBytes = addr.AsSlice()
}
uid := c.generateWorkloadEntryUID(workloadEntryNamespace, workloadEntryName)
if parentServiceEntry != nil {
uid = c.generateServiceEntryUID(parentServiceEntry.GetNamespace(), parentServiceEntry.GetName(), workloadEntry.Address)
}
network := c.Network(workloadEntry.Address, workloadEntry.Labels).String()
if workloadEntry.Network != "" {
network = workloadEntry.Network
}
var addresses [][]byte
if addrBytes != nil {
addresses = [][]byte{addrBytes}
}
wl := &workloadapi.Workload{
Uid: uid,
Name: workloadEntryName,
Namespace: workloadEntryNamespace,
Addresses: addresses,
Network: network,
ServiceAccount: workloadEntry.ServiceAccount,
Services: workloadServices,
AuthorizationPolicies: policies,
Waypoint: waypoint,
ClusterId: c.Cluster().String(),
}
if td := spiffe.GetTrustDomain(); td != "cluster.local" {
wl.TrustDomain = td
}
wl.WorkloadName, wl.WorkloadType = workloadEntryName, workloadapi.WorkloadType_POD // XXX(shashankram): HACK to impersonate pod
wl.CanonicalName, wl.CanonicalRevision = kubelabels.CanonicalService(workloadEntry.Labels, wl.WorkloadName)
isMeshExternal := parentServiceEntry != nil && parentServiceEntry.Spec.Location == v1alpha3.ServiceEntry_MESH_EXTERNAL
// TODO(ambient): For VMs we use Labels instead of an Annotations since we don't
// have access to the top level WorkloadEntry object. Maybe this is fine?
if workloadEntry.Labels[constants.AmbientRedirection] == constants.AmbientRedirectionEnabled && !isMeshExternal {
// Configured for override
wl.TunnelProtocol = workloadapi.TunnelProtocol_HBONE
}
// Otherwise supports tunnel directly
if model.SupportsTunnel(workloadEntry.Labels, model.TunnelHTTP) {
wl.TunnelProtocol = workloadapi.TunnelProtocol_HBONE
wl.NativeTunnel = true
}
return wl
}
// updateWaypointForWorkload updates the Waypoint configuration for the given Workload(Pod/WorkloadEntry)
func (a *AmbientIndexImpl) updateWaypointForWorkload(byWorkload map[string]*model.WorkloadInfo, scope model.WaypointScope,
addr *workloadapi.GatewayAddress, isDelete bool, updates sets.Set[model.ConfigKey],
) {
for _, wl := range byWorkload {
if wl.Labels[constants.ManagedGatewayLabel] == constants.ManagedGatewayMeshControllerLabel {
continue
}
if wl.Namespace != scope.Namespace || (scope.ServiceAccount != "" && wl.ServiceAccount != scope.ServiceAccount) {
continue
}
if isDelete {
if wl.Waypoint != nil && proto.Equal(wl.Waypoint, addr) {
var wp *workloadapi.GatewayAddress
// Check if there is a waypoint for the namespace
if scope.ServiceAccount != "" {
wp = a.waypoints[model.WaypointScope{Namespace: wl.Namespace}]
}
wl.Waypoint = wp
// If there was a change, also update the VIPs and record for a push
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: wl.ResourceName()})
}
} else {
// If the workload has no waypoint, or the waypoint is for a SA, update it
if wl.Waypoint == nil || (!proto.Equal(wl.Waypoint, addr) && scope.ServiceAccount != "") {
wl.Waypoint = addr
// If there was a change, also update the VIPs and record for a push
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: wl.ResourceName()})
}
}
}
}
func getWorkloadEntryServices(services []*v1.Service, workloadEntry *v1alpha3.WorkloadEntry) []*v1.Service {
var filteredServices []*v1.Service
for _, service := range services {
if service.Spec.Selector == nil {
// services with nil selectors match nothing, not everything.
continue
}
if labels.Instance(service.Spec.Selector).SubsetOf(workloadEntry.Labels) {
filteredServices = append(filteredServices, service)
}
}
return filteredServices
}
func findPortForWorkloadEntry(workloadEntry *v1alpha3.WorkloadEntry, svcPort *v1.ServicePort) (uint32, error) {
if workloadEntry == nil {
return 0, fmt.Errorf("invalid input, got nil WorkloadEntry")
}
if svcPort == nil {
return 0, fmt.Errorf("invalid input, got nil ServicePort")
}
for portName, portVal := range workloadEntry.Ports {
if portName == svcPort.Name {
return portVal, nil
}
}
if svcPort.TargetPort.Type == intstr.Int {
return uint32(svcPort.TargetPort.IntValue()), nil
}
return uint32(svcPort.Port), nil
}
func (c *Controller) getSelectedWorkloadEntries(ns string, selector map[string]string) []*apiv1alpha3.WorkloadEntry {
// skip WLE for non config clusters
if !c.configCluster {
return nil
}
if len(selector) == 0 {
// k8s services and service entry workloadSelector with empty selectors match nothing, not everything.
return nil
}
allWorkloadEntries := c.getControllerWorkloadEntries(ns)
var workloadEntries []*apiv1alpha3.WorkloadEntry
for _, wl := range allWorkloadEntries {
if labels.Instance(selector).SubsetOf(wl.Spec.Labels) {
workloadEntries = append(workloadEntries, wl)
}
}
return workloadEntries
}
func (c *Controller) getControllerWorkloadEntries(ns string) []*apiv1alpha3.WorkloadEntry {
var allWorkloadEntries []*apiv1alpha3.WorkloadEntry
allUnstructuredWorkloadEntries := c.configController.List(gvk.WorkloadEntry, ns)
for _, w := range allUnstructuredWorkloadEntries {
conv := serviceentry.ConvertWorkloadEntry(w)
if conv == nil {
continue
}
c := &apiv1alpha3.WorkloadEntry{
ObjectMeta: w.ToObjectMeta(),
Spec: *conv.DeepCopy(),
}
allWorkloadEntries = append(allWorkloadEntries, c)
}
return allWorkloadEntries
}
func (c *Controller) getSelectedServiceEntries(ns string, selector map[string]string) map[*apiv1alpha3.ServiceEntry]sets.Set[*v1alpha3.WorkloadEntry] {
// skip WLE for non config clusters
if !c.configCluster {
return nil
}
if len(selector) == 0 {
// k8s services and service entry workloadSelector with empty selectors match nothing, not everything.
return nil
}
allServiceEntries := c.getControllerServiceEntries(ns)
seEndpoints := map[*apiv1alpha3.ServiceEntry]sets.Set[*v1alpha3.WorkloadEntry]{}
for _, se := range allServiceEntries {
for _, we := range se.Spec.Endpoints {
if labels.Instance(selector).SubsetOf(we.Labels) {
if seEndpoints[se] == nil {
seEndpoints[se] = sets.New[*v1alpha3.WorkloadEntry]()
}
seEndpoints[se].Insert(we)
}
}
}
return seEndpoints
}
func (c *Controller) getControllerServiceEntries(ns string) []*apiv1alpha3.ServiceEntry {
var allServiceEntries []*apiv1alpha3.ServiceEntry
allUnstructuredServiceEntries := c.configController.List(gvk.ServiceEntry, ns)
for _, se := range allUnstructuredServiceEntries {
conv := serviceentry.ConvertServiceEntry(se)
if conv == nil {
continue
}
c := &apiv1alpha3.ServiceEntry{
ObjectMeta: se.ToObjectMeta(),
Spec: *conv.DeepCopy(),
}
allServiceEntries = append(allServiceEntries, c)
}
return allServiceEntries
}
// name format: <cluster>/<group>/<kind>/<namespace>/<name></section-name>
// if the WorkloadEntry is inlined in the ServiceEntry, we may need section name. caller should use generateServiceEntryUID
func (c *Controller) generateWorkloadEntryUID(wkEntryNamespace, wkEntryName string) string {
return c.clusterID.String() + "/networking.istio.io/WorkloadEntry/" + wkEntryNamespace + "/" + wkEntryName
}
// name format: <cluster>/<group>/<kind>/<namespace>/<name></section-name>
// section name should be the WE address, which needs to be stable across SE updates (it is assumed WE addresses are unique)
func (c *Controller) generateServiceEntryUID(svcEntryNamespace, svcEntryName, addr string) string {
return c.clusterID.String() + "/networking.istio.io/ServiceEntry/" + svcEntryNamespace + "/" + svcEntryName + "/" + addr
}
// prefer this style to better enable us for future support for auto-allocated VIPs on ServiceEntries.
// this is necessary because not all information is available in the ServiceEntry spec
func (a *AmbientIndexImpl) cleanupOldWorkloadEntriesInlinedOnServiceEntry(svcEntry *apiv1alpha3.ServiceEntry,
updates sets.Set[model.ConfigKey], c *Controller,
) {
nsName := types.NamespacedName{
Name: svcEntry.GetName(),
Namespace: svcEntry.GetNamespace(),
}
// cleanup any old WorkloadEntries generated from this ServiceEntry (`ServiceEntry.endpoints`)
// we have to do this now before the a.servicesMap is updated to account for vip auto allocation
if oldServiceEntry, f := a.servicesMap[nsName]; f {
for _, oldWe := range oldServiceEntry.Spec.Endpoints {
oldUID := c.generateServiceEntryUID(nsName.Namespace, nsName.Name, oldWe.Address)
we, found := a.byUID[oldUID]
if found {
updates.Insert(model.ConfigKey{Kind: kind.Address, Name: we.ResourceName()})
for _, networkAddr := range networkAddressFromWorkload(we) {
delete(a.byWorkloadEntry, networkAddr)
}
delete(a.byUID, oldUID)
}
}
}
}
func (a *AmbientIndexImpl) getWorkloadServicesFromServiceEntries(workloadEntry *v1alpha3.WorkloadEntry,
workloadNamespace string, workloadLabels map[string]string,
) map[string]*workloadapi.PortList {
workloadServices := map[string]*workloadapi.PortList{}
for _, se := range a.servicesMap {
if se.GetNamespace() != workloadNamespace {
// service entry can only select workloads in the same namespace
continue
}
if se.Spec.WorkloadSelector == nil {
// nothing to do. we construct the ztunnel config if `endpoints` are provided in the service entry handler
continue
}
if se.Spec.Endpoints != nil {
// it is an error to provide both `endpoints` and `workloadSelector` in a service entry
continue
}
sel := se.Spec.WorkloadSelector.Labels
if !labels.Instance(sel).SubsetOf(workloadLabels) {
continue
}
ports := getPortsForServiceEntry(se, workloadEntry)
for _, host := range se.Spec.Hosts {
workloadServices[namespacedHostname(se.GetNamespace(), host)] = ports
}
}
return workloadServices
}
func getVIPsFromServiceEntry(svc *apiv1alpha3.ServiceEntry) []string {
var vips []string
vips = append(vips, svc.Spec.Addresses...)
// in the future we may want to include cluster VIPs from svc.ClusterVIPs, auto allocated VIPs
return vips
}
func getPortsForServiceEntry(svcEntry *apiv1alpha3.ServiceEntry, we *v1alpha3.WorkloadEntry) *workloadapi.PortList {
var ports *workloadapi.PortList
for _, port := range svcEntry.Spec.Ports {
if ports == nil {
ports = &workloadapi.PortList{}
}
newPort := &workloadapi.Port{
ServicePort: port.GetNumber(),
TargetPort: port.GetTargetPort(),
}
// take WE override port if necessary
if we != nil {
for wePortName, wePort := range we.Ports {
if wePortName == port.Name {
newPort.TargetPort = wePort
}
}
}
ports.Ports = append(ports.Ports, newPort)
}
return ports
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"fmt"
"net/netip"
"strconv"
"strings"
"google.golang.org/protobuf/proto"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"istio.io/api/networking/v1alpha3"
"istio.io/api/security/v1beta1"
apiv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/workloadapi/security"
)
const (
staticStrictPolicyName = "istio_converted_static_strict" // use '_' character since those are illegal in k8s names
)
func (c *Controller) Policies(requested sets.Set[model.ConfigKey]) []*security.Authorization {
if !c.configCluster {
return nil
}
var cfgs []config.Config
authzPolicies := c.configController.List(gvk.AuthorizationPolicy, metav1.NamespaceAll)
peerAuthenticationPolicies := c.configController.List(gvk.PeerAuthentication, metav1.NamespaceAll)
cfgs = append(cfgs, authzPolicies...)
cfgs = append(cfgs, peerAuthenticationPolicies...)
l := len(cfgs)
if len(requested) > 0 {
l = len(requested)
}
res := make([]*security.Authorization, 0, l)
for _, cfg := range cfgs {
k := model.ConfigKey{
Kind: kind.MustFromGVK(cfg.GroupVersionKind),
Name: cfg.Name,
Namespace: cfg.Namespace,
}
// All PeerAuthentications are synthetic, so we need to prepend our special prefix to the name
if k.Kind == kind.PeerAuthentication {
// PeerAuthentications are synthetic so prepend our special prefix
k.Name = model.GetAmbientPolicyConfigName(k)
}
if len(requested) > 0 && !requested.Contains(k) {
continue
}
var pol *security.Authorization
switch cfg.GroupVersionKind {
case gvk.AuthorizationPolicy:
pol = convertAuthorizationPolicy(c.meshWatcher.Mesh().GetRootNamespace(), cfg)
if pol == nil {
continue
}
res = append(res, pol)
case gvk.PeerAuthentication:
pol = convertPeerAuthentication(c.meshWatcher.Mesh().GetRootNamespace(), cfg)
if pol == nil {
continue
}
res = append(res, pol)
default:
log.Errorf("unknown config type %v", cfg.GroupVersionKind)
continue
}
}
// If there are any PeerAuthentications in our cache, send our static STRICT policy
if len(peerAuthenticationPolicies) > 0 {
res = append(res, &security.Authorization{
Name: staticStrictPolicyName,
Namespace: c.meshWatcher.Mesh().GetRootNamespace(),
Scope: security.Scope_WORKLOAD_SELECTOR,
Action: security.Action_DENY,
Groups: []*security.Group{
{
Rules: []*security.Rules{
{
Matches: []*security.Match{
{
NotPrincipals: []*security.StringMatch{
{
MatchType: &security.StringMatch_Presence{},
},
},
},
},
},
},
},
},
})
}
return res
}
// convertedSelectorPeerAuthentications returns a list of keys corresponding to one or both of:
// [static STRICT policy, port-level STRICT policy] based on the effective PeerAuthentication policy
func (c *Controller) convertedSelectorPeerAuthentications(ns string, lbls map[string]string) []string {
var meshCfg, namespaceCfg, workloadCfg *config.Config
rootNamespace := c.meshWatcher.Mesh().GetRootNamespace()
matches := func(c config.Config) bool {
sel := c.Spec.(*v1beta1.PeerAuthentication).Selector
if sel == nil {
return false
}
return labels.Instance(sel.MatchLabels).SubsetOf(lbls)
}
configs := c.configController.List(gvk.PeerAuthentication, rootNamespace)
configs = append(configs, c.configController.List(gvk.PeerAuthentication, ns)...)
for i := range configs {
cfg := configs[i]
spec, ok := cfg.Spec.(*v1beta1.PeerAuthentication)
if !ok || spec == nil {
continue
}
if spec.Selector == nil || len(spec.Selector.MatchLabels) == 0 {
// Namespace-level or mesh-level policy
if cfg.Namespace == rootNamespace {
if meshCfg == nil || cfg.CreationTimestamp.Before(meshCfg.CreationTimestamp) {
log.Debugf("Switch selected mesh policy to %s.%s (%v)", cfg.Name, cfg.Namespace, cfg.CreationTimestamp)
meshCfg = &cfg
}
} else {
if namespaceCfg == nil || cfg.CreationTimestamp.Before(namespaceCfg.CreationTimestamp) {
log.Debugf("Switch selected namespace policy to %s.%s (%v)", cfg.Name, cfg.Namespace, cfg.CreationTimestamp)
namespaceCfg = &cfg
}
}
} else if cfg.Namespace != rootNamespace {
// Workload-level policy, aka the one with selector and not in root namespace.
if !matches(cfg) {
continue
}
if workloadCfg == nil || cfg.CreationTimestamp.Before(workloadCfg.CreationTimestamp) {
log.Debugf("Switch selected workload policy to %s.%s (%v)", cfg.Name, cfg.Namespace, cfg.CreationTimestamp)
workloadCfg = &cfg
}
}
}
// Whether it comes from a mesh-wide, namespace-wide, or workload-specific policy
// if the effective policy is STRICT, then reference our static STRICT policy
var isEffectiveStrictPolicy bool
// Only 1 per port workload policy can be effective at a time. In the case of a conflict
// the oldest policy wins.
var effectivePortLevelPolicyKey string
// Process in mesh, namespace, workload order to resolve inheritance (UNSET)
if meshCfg != nil {
meshSpec, ok := meshCfg.Spec.(*v1beta1.PeerAuthentication)
if ok && !isMtlsModeUnset(meshSpec.Mtls) {
isEffectiveStrictPolicy = isMtlsModeStrict(meshSpec.Mtls)
}
}
if namespaceCfg != nil {
namespaceSpec, ok := namespaceCfg.Spec.(*v1beta1.PeerAuthentication)
if ok && !isMtlsModeUnset(namespaceSpec.Mtls) {
isEffectiveStrictPolicy = isMtlsModeStrict(namespaceSpec.Mtls)
}
}
if workloadCfg == nil {
return c.effectivePeerAuthenticationKeys(isEffectiveStrictPolicy, "")
}
workloadSpec, ok := workloadCfg.Spec.(*v1beta1.PeerAuthentication)
if !ok {
// no workload policy to calculate; go ahead and return the calculated keys
return c.effectivePeerAuthenticationKeys(isEffectiveStrictPolicy, "")
}
// Regardless of if we have port-level overrides, if the workload policy is STRICT, then we need to reference our static STRICT policy
if isMtlsModeStrict(workloadSpec.Mtls) {
isEffectiveStrictPolicy = true
}
// Regardless of if we have port-level overrides, if the workload policy is PERMISSIVE or DISABLE, then we shouldn't send our static STRICT policy
if isMtlsModePermissive(workloadSpec.Mtls) || isMtlsModeDisable(workloadSpec.Mtls) {
isEffectiveStrictPolicy = false
}
if workloadSpec.PortLevelMtls != nil {
switch workloadSpec.GetMtls().GetMode() {
case v1beta1.PeerAuthentication_MutualTLS_STRICT:
foundPermissive := false
for _, portMtls := range workloadSpec.PortLevelMtls {
if isMtlsModePermissive(portMtls) || isMtlsModeDisable(portMtls) {
foundPermissive = true
break
}
}
if foundPermissive {
// If we found a non-strict policy, we need to reference this workload policy to see the port level exceptions
effectivePortLevelPolicyKey = workloadCfg.Namespace + "/" + model.GetAmbientPolicyConfigName(model.ConfigKey{
Name: workloadCfg.Name,
Kind: kind.MustFromGVK(workloadCfg.GroupVersionKind),
Namespace: workloadCfg.Namespace,
})
isEffectiveStrictPolicy = false // don't send our static STRICT policy since the converted form of this policy will include the default STRICT mode
}
case v1beta1.PeerAuthentication_MutualTLS_PERMISSIVE, v1beta1.PeerAuthentication_MutualTLS_DISABLE:
foundStrict := false
for _, portMtls := range workloadSpec.PortLevelMtls {
if isMtlsModeStrict(portMtls) {
foundStrict = true
break
}
}
// There's a STRICT port mode, so we need to reference this policy in the workload
if foundStrict {
effectivePortLevelPolicyKey = workloadCfg.Namespace + "/" + model.GetAmbientPolicyConfigName(model.ConfigKey{
Name: workloadCfg.Name,
Kind: kind.MustFromGVK(workloadCfg.GroupVersionKind),
Namespace: workloadCfg.Namespace,
})
}
default: // Unset
if isEffectiveStrictPolicy {
// Strict mesh or namespace policy
foundPermissive := false
for _, portMtls := range workloadSpec.PortLevelMtls {
if isMtlsModePermissive(portMtls) {
foundPermissive = true
break
}
}
if foundPermissive {
// If we found a non-strict policy, we need to reference this workload policy to see the port level exceptions
effectivePortLevelPolicyKey = workloadCfg.Namespace + "/" + model.GetAmbientPolicyConfigName(model.ConfigKey{
Name: workloadCfg.Name,
Kind: kind.MustFromGVK(workloadCfg.GroupVersionKind),
Namespace: workloadCfg.Namespace,
})
}
} else {
// Permissive mesh or namespace policy
isEffectiveStrictPolicy = false // any ports that aren't specified will be PERMISSIVE so this workload isn't effectively under a STRICT policy
foundStrict := false
for _, portMtls := range workloadSpec.PortLevelMtls {
if isMtlsModeStrict(portMtls) {
foundStrict = true
continue
}
}
// There's a STRICT port mode, so we need to reference this policy in the workload
if foundStrict {
effectivePortLevelPolicyKey = workloadCfg.Namespace + "/" + model.GetAmbientPolicyConfigName(model.ConfigKey{
Name: workloadCfg.Name,
Kind: kind.MustFromGVK(workloadCfg.GroupVersionKind),
Namespace: workloadCfg.Namespace,
})
}
}
}
}
return c.effectivePeerAuthenticationKeys(isEffectiveStrictPolicy, effectivePortLevelPolicyKey)
}
func (c *Controller) effectivePeerAuthenticationKeys(isEffectiveStringPolicy bool, effectiveWorkloadPolicyKey string) []string {
res := sets.New[string]()
rootNamespace := c.meshWatcher.Mesh().GetRootNamespace()
if isEffectiveStringPolicy {
res.Insert(fmt.Sprintf("%s/%s", rootNamespace, staticStrictPolicyName))
}
if effectiveWorkloadPolicyKey != "" {
res.Insert(effectiveWorkloadPolicyKey)
}
return sets.SortedList(res)
}
func (c *Controller) selectorAuthorizationPolicies(ns string, lbls map[string]string) []string {
global := c.configController.List(gvk.AuthorizationPolicy, c.meshWatcher.Mesh().GetRootNamespace())
local := c.configController.List(gvk.AuthorizationPolicy, ns)
res := sets.New[string]()
matches := func(c config.Config) bool {
sel := c.Spec.(*v1beta1.AuthorizationPolicy).GetSelector()
if sel == nil {
return false
}
return labels.Instance(sel.MatchLabels).SubsetOf(lbls)
}
for _, pl := range [][]config.Config{global, local} {
for _, p := range pl {
if matches(p) {
res.Insert(p.Namespace + "/" + p.Name)
}
}
}
return sets.SortedList(res)
}
// We can't use the same optimizations as we do for AuthorizationPolicy because we dynamically send
// synthetic authorization policies to the proxy based on the effective mTLS mode. In other words, even
// if the selector of the PeerAuthentication doesn't change (indeed even if the PA has no selector),
// any number of workloads may be affected by its spec changing
func (c *Controller) PeerAuthenticationHandler(old config.Config, obj config.Config, ev model.Event) {
getSelector := func(c config.Config) map[string]string {
if c.Spec == nil {
return nil
}
pol := c.Spec.(*v1beta1.PeerAuthentication)
return pol.Selector.GetMatchLabels()
}
portMtlsEqual := func(m1, m2 map[uint32]*v1beta1.PeerAuthentication_MutualTLS) bool {
diffDetected := false
// Loop through all of the old PA ports
for port, m := range m1 {
newPortlevelMtls, ok := m2[port]
if !ok {
diffDetected = true // port not present in the new version of the resource; something changed
break
}
if !proto.Equal(newPortlevelMtls, m) {
diffDetected = true // port level mTLS settings changed
break
}
}
if !diffDetected {
for port, m := range m2 {
oldPortlevelMtls, ok := m1[port]
if !ok {
diffDetected = true // port not present in the old version of the resource; something changed
break
}
if !proto.Equal(oldPortlevelMtls, m) {
diffDetected = true // port level mTLS settings changed
break
}
}
}
return !diffDetected
}
// Normal flow for PeerAuthentication (initRegistryEventHandlers) will trigger XDS push, so we don't need to push those. But we do need
// to update any relevant workloads and push them.
sel := getSelector(obj)
oldSel := getSelector(old)
oldPa, oldPaOk := old.Spec.(*v1beta1.PeerAuthentication)
newPa := obj.Spec.(*v1beta1.PeerAuthentication)
if oldPaOk && ev == model.EventUpdate {
if sel == nil && oldSel == nil {
// global or namespace level policy change
if oldPa.GetMtls().GetMode() == newPa.GetMtls().GetMode() {
// No change in mTLS mode, no workloads to push
return
}
}
mtlsUnchanged := oldPa.GetMtls().GetMode() == newPa.GetMtls().GetMode()
mtlsUnchanged = mtlsUnchanged || (isMtlsModeDisable(oldPa.GetMtls()) && isMtlsModePermissive(newPa.GetMtls()))
mtlsUnchanged = mtlsUnchanged || (isMtlsModePermissive(oldPa.GetMtls()) && isMtlsModeDisable(newPa.GetMtls()))
portLevelMtlsUnchanged := portMtlsEqual(oldPa.GetPortLevelMtls(), newPa.GetPortLevelMtls())
if maps.Equal(sel, oldSel) && mtlsUnchanged && portLevelMtlsUnchanged {
// Update event, but nothing we care about changed. No workloads to push.
return
}
}
if (newPa.Mtls == nil || newPa.GetMtls().GetMode() == v1beta1.PeerAuthentication_MutualTLS_UNSET) && newPa.GetPortLevelMtls() == nil {
// Nothing to do, no workloads to push
return
}
pods := map[string]*v1.Pod{}
for _, p := range c.getPodsInPolicy(obj.Namespace, sel, false) {
pods[p.Status.PodIP] = p
}
if oldSel != nil {
for _, p := range c.getPodsInPolicy(obj.Namespace, oldSel, false) {
pods[p.Status.PodIP] = p
}
}
workloadEntries := map[networkAddress]*apiv1alpha3.WorkloadEntry{}
// 2. only process workload entries in config cluster
if c.configCluster {
for _, w := range c.getWorkloadEntriesInPolicy(obj.Namespace, sel) {
network := c.Network(w.Spec.Address, w.Spec.Labels).String()
if w.Spec.Network != "" {
network = w.Spec.Network
}
workloadEntries[networkAddress{
ip: w.Spec.Address,
network: network,
}] = w
}
if oldSel != nil {
for _, w := range c.getWorkloadEntriesInPolicy(obj.Namespace, oldSel) {
network := c.Network(w.Spec.Address, w.Spec.Labels).String()
if w.Spec.Network != "" {
network = w.Spec.Network
}
workloadEntries[networkAddress{
ip: w.Spec.Address,
network: network,
}] = w
}
}
}
updates := c.ambientIndex.CalculateUpdatedWorkloads(pods, workloadEntries, nil, c)
if len(updates) > 0 {
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
ConfigsUpdated: updates,
Reason: model.NewReasonStats(model.AmbientUpdate),
})
}
}
// CalculateUpdatedWorkloads returns the set of updated config keys for the given
// pods and workload entries.
//
// NOTE: As an interface method of AmbientIndex, this locks the index.
func (a *AmbientIndexImpl) CalculateUpdatedWorkloads(pods map[string]*v1.Pod,
workloadEntries map[networkAddress]*apiv1alpha3.WorkloadEntry, seEndpoints map[*apiv1alpha3.ServiceEntry]sets.Set[*v1alpha3.WorkloadEntry], c *Controller,
) map[model.ConfigKey]struct{} {
a.mu.Lock()
defer a.mu.Unlock()
updates := map[model.ConfigKey]struct{}{}
for _, pod := range pods {
uid := c.generatePodUID(pod)
oldWl := a.byUID[uid]
newWl := a.extractWorkload(pod, c)
a.updateWorkloadIndexes(oldWl, newWl, updates)
}
for _, w := range workloadEntries {
uid := c.generateWorkloadEntryUID(w.Namespace, w.Name)
oldWl := a.byUID[uid]
newWl := a.extractWorkloadEntry(w, c)
a.updateWorkloadIndexes(oldWl, newWl, updates)
}
for svcEntry, weSet := range seEndpoints {
for we := range weSet {
uid := c.generateServiceEntryUID(svcEntry.Namespace, svcEntry.Name, we.Address)
newWl := a.extractWorkloadEntrySpec(we, svcEntry.GetNamespace(), svcEntry.GetName(), svcEntry, c)
oldWl := a.byUID[uid]
a.updateWorkloadIndexes(oldWl, newWl, updates)
}
}
return updates
}
func (c *Controller) AuthorizationPolicyHandler(old config.Config, obj config.Config, ev model.Event) {
getSelector := func(c config.Config) map[string]string {
if c.Spec == nil {
return nil
}
pol := c.Spec.(*v1beta1.AuthorizationPolicy)
return pol.GetSelector().GetMatchLabels()
}
// Normal flow for AuthorizationPolicy will trigger XDS push, so we don't need to push those. But we do need
// to update any relevant workloads and push them.
sel := getSelector(obj)
oldSel := getSelector(old)
switch ev {
case model.EventUpdate:
if maps.Equal(sel, oldSel) {
// Update event, but selector didn't change. No workloads to push.
return
}
default:
if sel == nil {
// We only care about selector policies
return
}
}
// 1. process pods for all cluster
pods := map[string]*v1.Pod{}
for _, p := range c.getPodsInPolicy(obj.Namespace, sel, true) {
pods[p.Status.PodIP] = p
}
if oldSel != nil {
for _, p := range c.getPodsInPolicy(obj.Namespace, oldSel, true) {
pods[p.Status.PodIP] = p
}
}
workloadEntries := map[networkAddress]*apiv1alpha3.WorkloadEntry{}
// 2. only process workload entries in config cluster
if c.configCluster {
for _, w := range c.getWorkloadEntriesInPolicy(obj.Namespace, sel) {
network := c.Network(w.Spec.Address, w.Spec.Labels).String()
if w.Spec.Network != "" {
network = w.Spec.Network
}
workloadEntries[networkAddress{
ip: w.Spec.Address,
network: network,
}] = w
}
if oldSel != nil {
for _, w := range c.getWorkloadEntriesInPolicy(obj.Namespace, oldSel) {
network := c.Network(w.Spec.Address, w.Spec.Labels).String()
if w.Spec.Network != "" {
network = w.Spec.Network
}
workloadEntries[networkAddress{
ip: w.Spec.Address,
network: network,
}] = w
}
}
}
// 3. only process service entries in config cluster with endpoints
seEndpoints := map[*apiv1alpha3.ServiceEntry]sets.Set[*v1alpha3.WorkloadEntry]{}
if c.configCluster {
for se, we := range c.getServiceEntryEndpointsInPolicy(obj.Namespace, sel) {
seEndpoints[se] = we
}
if oldSel != nil {
for se, we := range c.getServiceEntryEndpointsInPolicy(obj.Namespace, oldSel) {
seEndpoints[se] = we
}
}
}
updates := c.ambientIndex.CalculateUpdatedWorkloads(pods, workloadEntries, seEndpoints, c)
if len(updates) > 0 {
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
ConfigsUpdated: updates,
Reason: model.NewReasonStats(model.AmbientUpdate),
})
}
}
// meshWideSelectorEnabled indicates whether a mesh-wide policy can have a selector.
// Is only true for AuthorizationPolicy, since PeerAuthentication doesn't support mesh-wide selector policies.
func (c *Controller) getPodsInPolicy(ns string, sel map[string]string, meshWideSelectorEnabled bool) []*v1.Pod {
if ns == c.meshWatcher.Mesh().GetRootNamespace() && (sel == nil || meshWideSelectorEnabled) {
ns = metav1.NamespaceAll
}
return c.podsClient.List(ns, klabels.ValidatedSetSelector(sel))
}
// convertPeerAuthentication converts a PeerAuthentication to an L4 authorization policy (i.e. security.Authorization) iff
// 1. the PeerAuthentication has a workload selector
// 2. The PeerAuthentication is NOT in the root namespace
// 3. There is a portLevelMtls policy (technically implied by 1)
// 4. If the top-level mode is PERMISSIVE or DISABLE, there is at least one portLevelMtls policy with mode STRICT
//
// STRICT policies that don't have portLevelMtls will be
// handled when the Workload xDS resource is pushed (a static STRICT-equivalent policy will always be pushed)
func convertPeerAuthentication(rootNamespace string, cfg config.Config) *security.Authorization {
pa, ok := cfg.Spec.(*v1beta1.PeerAuthentication)
if !ok {
return nil
}
mode := pa.GetMtls().GetMode()
scope := security.Scope_WORKLOAD_SELECTOR
// violates case #1, #2, or #3
if cfg.Namespace == rootNamespace || pa.Selector == nil || len(pa.PortLevelMtls) == 0 {
log.Debugf("skipping PeerAuthentication %s/%s for ambient since it isn't a workload policy with port level mTLS", cfg.Namespace, cfg.Name)
return nil
}
action := security.Action_DENY
var rules []*security.Rules
if mode == v1beta1.PeerAuthentication_MutualTLS_STRICT {
rules = append(rules, &security.Rules{
Matches: []*security.Match{
{
NotPrincipals: []*security.StringMatch{
{
MatchType: &security.StringMatch_Presence{},
},
},
},
},
})
}
// If we have a strict policy and all of the ports are strict, it's effectively a strict policy
// so we can exit early and have the WorkloadRbac xDS server push its static strict policy.
// Note that this doesn't actually attach the policy to any workload; it just makes it available
// to ztunnel in case a workload needs it.
foundNonStrictPortmTLS := false
for port, mtls := range pa.PortLevelMtls {
switch portMtlsMode := mtls.GetMode(); {
case portMtlsMode == v1beta1.PeerAuthentication_MutualTLS_STRICT:
rules = append(rules, &security.Rules{
Matches: []*security.Match{
{
NotPrincipals: []*security.StringMatch{
{
MatchType: &security.StringMatch_Presence{},
},
},
DestinationPorts: []uint32{port},
},
},
})
case portMtlsMode == v1beta1.PeerAuthentication_MutualTLS_PERMISSIVE:
// Check top-level mode
if mode == v1beta1.PeerAuthentication_MutualTLS_PERMISSIVE || mode == v1beta1.PeerAuthentication_MutualTLS_DISABLE {
// we don't care; log and continue
log.Debugf("skipping port %s/%s for PeerAuthentication %s/%s for ambient since the parent mTLS mode is %s",
port, portMtlsMode, cfg.Namespace, cfg.Name, mode)
continue
}
foundNonStrictPortmTLS = true
// If the top level policy is STRICT, we need to add a rule for the port that exempts it from the deny policy
rules = append(rules, &security.Rules{
Matches: []*security.Match{
{
NotDestinationPorts: []uint32{port}, // if the incoming connection does not match this port, deny (notice there's no principals requirement)
},
},
})
case portMtlsMode == v1beta1.PeerAuthentication_MutualTLS_DISABLE:
// Check top-level mode
if mode == v1beta1.PeerAuthentication_MutualTLS_PERMISSIVE || mode == v1beta1.PeerAuthentication_MutualTLS_DISABLE {
// we don't care; log and continue
log.Debugf("skipping port %s/%s for PeerAuthentication %s/%s for ambient since the parent mTLS mode is %s",
port, portMtlsMode, cfg.Namespace, cfg.Name, mode)
continue
}
foundNonStrictPortmTLS = true
// If the top level policy is STRICT, we need to add a rule for the port that exempts it from the deny policy
rules = append(rules, &security.Rules{
Matches: []*security.Match{
{
NotDestinationPorts: []uint32{port}, // if the incoming connection does not match this port, deny (notice there's no principals requirement)
},
},
})
default:
log.Debugf("skipping port %s for PeerAuthentication %s/%s for ambient since it is %s", port, cfg.Namespace, cfg.Name, portMtlsMode)
continue
}
}
// If the top level TLS mode is STRICT and all of the port level mTLS modes are STRICT, this is just a strict policy and we'll exit early
if mode == v1beta1.PeerAuthentication_MutualTLS_STRICT && !foundNonStrictPortmTLS {
return nil
}
if len(rules) == 0 {
// we never added any rules; return
return nil
}
opol := &security.Authorization{
Name: model.GetAmbientPolicyConfigName(model.ConfigKey{
Name: cfg.Name,
Kind: kind.PeerAuthentication,
Namespace: cfg.Namespace,
}),
Namespace: cfg.Namespace,
Scope: scope,
Action: action,
Groups: []*security.Group{{Rules: rules}},
}
return opol
}
func convertAuthorizationPolicy(rootns string, obj config.Config) *security.Authorization {
pol := obj.Spec.(*v1beta1.AuthorizationPolicy)
scope := security.Scope_WORKLOAD_SELECTOR
if pol.GetSelector() == nil {
scope = security.Scope_NAMESPACE
// TODO: TDA
if rootns == obj.Namespace {
scope = security.Scope_GLOBAL // TODO: global workload?
}
}
action := security.Action_ALLOW
switch pol.Action {
case v1beta1.AuthorizationPolicy_ALLOW:
case v1beta1.AuthorizationPolicy_DENY:
action = security.Action_DENY
default:
return nil
}
opol := &security.Authorization{
Name: obj.Name,
Namespace: obj.Namespace,
Scope: scope,
Action: action,
Groups: nil,
}
for _, rule := range pol.Rules {
rules := handleRule(action, rule)
if rules != nil {
rg := &security.Group{
Rules: rules,
}
opol.Groups = append(opol.Groups, rg)
}
}
return opol
}
func anyNonEmpty[T any](arr ...[]T) bool {
for _, a := range arr {
if len(a) > 0 {
return true
}
}
return false
}
func handleRule(action security.Action, rule *v1beta1.Rule) []*security.Rules {
toMatches := []*security.Match{}
for _, to := range rule.To {
op := to.Operation
if action == security.Action_ALLOW && anyNonEmpty(op.Hosts, op.NotHosts, op.Methods, op.NotMethods, op.Paths, op.NotPaths) {
// L7 policies never match for ALLOW
// For DENY they will always match, so it is more restrictive
return nil
}
match := &security.Match{
DestinationPorts: stringToPort(op.Ports),
NotDestinationPorts: stringToPort(op.NotPorts),
}
toMatches = append(toMatches, match)
}
fromMatches := []*security.Match{}
for _, from := range rule.From {
op := from.Source
if action == security.Action_ALLOW && anyNonEmpty(op.RemoteIpBlocks, op.NotRemoteIpBlocks, op.RequestPrincipals, op.NotRequestPrincipals) {
// L7 policies never match for ALLOW
// For DENY they will always match, so it is more restrictive
return nil
}
match := &security.Match{
SourceIps: stringToIP(op.IpBlocks),
NotSourceIps: stringToIP(op.NotIpBlocks),
Namespaces: stringToMatch(op.Namespaces),
NotNamespaces: stringToMatch(op.NotNamespaces),
Principals: stringToMatch(op.Principals),
NotPrincipals: stringToMatch(op.NotPrincipals),
}
fromMatches = append(fromMatches, match)
}
rules := []*security.Rules{}
if len(toMatches) > 0 {
rules = append(rules, &security.Rules{Matches: toMatches})
}
if len(fromMatches) > 0 {
rules = append(rules, &security.Rules{Matches: fromMatches})
}
for _, when := range rule.When {
l4 := l4WhenAttributes.Contains(when.Key)
if action == security.Action_ALLOW && !l4 {
// L7 policies never match for ALLOW
// For DENY they will always match, so it is more restrictive
return nil
}
positiveMatch := &security.Match{
Namespaces: whenMatch("source.namespace", when, false, stringToMatch),
Principals: whenMatch("source.principal", when, false, stringToMatch),
SourceIps: whenMatch("source.ip", when, false, stringToIP),
DestinationPorts: whenMatch("destination.port", when, false, stringToPort),
DestinationIps: whenMatch("destination.ip", when, false, stringToIP),
NotNamespaces: whenMatch("source.namespace", when, true, stringToMatch),
NotPrincipals: whenMatch("source.principal", when, true, stringToMatch),
NotSourceIps: whenMatch("source.ip", when, true, stringToIP),
NotDestinationPorts: whenMatch("destination.port", when, true, stringToPort),
NotDestinationIps: whenMatch("destination.ip", when, true, stringToIP),
}
rules = append(rules, &security.Rules{Matches: []*security.Match{positiveMatch}})
}
return rules
}
var l4WhenAttributes = sets.New(
"source.ip",
"source.namespace",
"source.principal",
"destination.ip",
"destination.port",
)
func whenMatch[T any](s string, when *v1beta1.Condition, invert bool, f func(v []string) []T) []T {
if when.Key != s {
return nil
}
if invert {
return f(when.NotValues)
}
return f(when.Values)
}
func stringToMatch(rules []string) []*security.StringMatch {
res := make([]*security.StringMatch, 0, len(rules))
for _, v := range rules {
var sm *security.StringMatch
switch {
case v == "*":
sm = &security.StringMatch{MatchType: &security.StringMatch_Presence{}}
case strings.HasPrefix(v, "*"):
sm = &security.StringMatch{MatchType: &security.StringMatch_Suffix{
Suffix: strings.TrimPrefix(v, "*"),
}}
case strings.HasSuffix(v, "*"):
sm = &security.StringMatch{MatchType: &security.StringMatch_Prefix{
Prefix: strings.TrimSuffix(v, "*"),
}}
default:
sm = &security.StringMatch{MatchType: &security.StringMatch_Exact{
Exact: v,
}}
}
res = append(res, sm)
}
return res
}
func stringToPort(rules []string) []uint32 {
res := make([]uint32, 0, len(rules))
for _, m := range rules {
p, err := strconv.ParseUint(m, 10, 32)
if err != nil || p > 65535 {
continue
}
res = append(res, uint32(p))
}
return res
}
func stringToIP(rules []string) []*security.Address {
res := make([]*security.Address, 0, len(rules))
for _, m := range rules {
if len(m) == 0 {
continue
}
var (
ipAddr netip.Addr
maxCidrPrefix uint32
)
if strings.Contains(m, "/") {
ipp, err := netip.ParsePrefix(m)
if err != nil {
continue
}
ipAddr = ipp.Addr()
maxCidrPrefix = uint32(ipp.Bits())
} else {
ipa, err := netip.ParseAddr(m)
if err != nil {
continue
}
ipAddr = ipa
maxCidrPrefix = uint32(ipAddr.BitLen())
}
res = append(res, &security.Address{
Address: ipAddr.AsSlice(),
Length: maxCidrPrefix,
})
}
return res
}
func isMtlsModeUnset(mtls *v1beta1.PeerAuthentication_MutualTLS) bool {
return mtls == nil || mtls.Mode == v1beta1.PeerAuthentication_MutualTLS_UNSET
}
func isMtlsModeStrict(mtls *v1beta1.PeerAuthentication_MutualTLS) bool {
return mtls != nil && mtls.Mode == v1beta1.PeerAuthentication_MutualTLS_STRICT
}
func isMtlsModeDisable(mtls *v1beta1.PeerAuthentication_MutualTLS) bool {
return mtls != nil && mtls.Mode == v1beta1.PeerAuthentication_MutualTLS_DISABLE
}
func isMtlsModePermissive(mtls *v1beta1.PeerAuthentication_MutualTLS) bool {
return mtls != nil && mtls.Mode == v1beta1.PeerAuthentication_MutualTLS_PERMISSIVE
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
mcsapi "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1"
"istio.io/istio/pilot/pkg/model"
serviceRegistryKube "istio.io/istio/pilot/pkg/serviceregistry/kube"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/kube/mcs"
)
type autoServiceExportController struct {
autoServiceExportOptions
client kube.Client
queue controllers.Queue
services kclient.Client[*v1.Service]
// We use this flag to short-circuit the logic and stop the controller
// if the CRD does not exist (or is deleted)
mcsSupported bool
}
// autoServiceExportOptions provide options for creating a autoServiceExportController.
type autoServiceExportOptions struct {
Client kube.Client
ClusterID cluster.ID
DomainSuffix string
ClusterLocal model.ClusterLocalProvider
}
// newAutoServiceExportController creates a new autoServiceExportController.
func newAutoServiceExportController(opts autoServiceExportOptions) *autoServiceExportController {
c := &autoServiceExportController{
autoServiceExportOptions: opts,
client: opts.Client,
mcsSupported: true,
}
c.queue = controllers.NewQueue("auto export",
controllers.WithReconciler(c.Reconcile),
controllers.WithMaxAttempts(5))
c.services = kclient.New[*v1.Service](opts.Client)
// Only handle add. The controller only acts on parts of the service
// that are immutable (e.g. name). When we create ServiceExport, we bind its
// lifecycle to the Service so that when the Service is deleted,
// k8s automatically deletes the ServiceExport.
c.services.AddEventHandler(controllers.EventHandler[controllers.Object]{AddFunc: c.queue.AddObject})
return c
}
func (c *autoServiceExportController) Run(stopCh <-chan struct{}) {
kube.WaitForCacheSync("auto service export", stopCh, c.services.HasSynced)
c.queue.Run(stopCh)
c.services.ShutdownHandlers()
}
func (c *autoServiceExportController) logPrefix() string {
return "AutoServiceExport (cluster=" + c.ClusterID.String() + ") "
}
// func (c *autoServiceExportController) createServiceExportIfNotPresent(svc *v1.Service) error {
func (c *autoServiceExportController) Reconcile(key types.NamespacedName) error {
if !c.mcsSupported {
// Don't create ServiceExport if MCS is not supported on the cluster.
log.Debugf("%s ignoring added Service, since !mcsSupported", c.logPrefix())
return nil
}
svc := c.services.Get(key.Name, key.Namespace)
if svc == nil {
// Service no longer exists, no action needed
return nil
}
if c.isClusterLocalService(svc) {
// Don't create ServiceExport if the service is configured to be
// local to the cluster (i.e. non-exported).
log.Debugf("%s ignoring cluster-local service %s/%s", c.logPrefix(), svc.Namespace, svc.Name)
return nil
}
serviceExport := mcsapi.ServiceExport{
TypeMeta: metav1.TypeMeta{
Kind: "ServiceExport",
APIVersion: mcs.MCSSchemeGroupVersion.String(),
},
ObjectMeta: metav1.ObjectMeta{
Namespace: svc.Namespace,
Name: svc.Name,
// Bind the lifecycle of the ServiceExport to the Service. We do this by making the Service
// the "owner" of the ServiceExport resource.
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: v1.SchemeGroupVersion.String(),
Kind: gvk.Service.Kind,
Name: svc.Name,
UID: svc.UID,
},
},
},
}
// Convert to unstructured.
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&serviceExport)
if err != nil {
log.Warnf("%s failed converting ServiceExport %s/%s to Unstructured: %v", c.logPrefix(),
svc.Namespace, svc.Name, err)
return err
}
if _, err = c.client.Dynamic().Resource(mcs.ServiceExportGVR).Namespace(serviceExport.Namespace).Create(
context.TODO(), &unstructured.Unstructured{Object: u}, metav1.CreateOptions{}); err != nil {
switch {
case errors.IsAlreadyExists(err):
// The ServiceExport already exists. Nothing to do.
return nil
case errors.IsNotFound(err):
log.Warnf("%s ServiceExport CRD Not found. Shutting down MCS ServiceExport sync. "+
"Please add the CRD then restart the istiod deployment", c.logPrefix())
c.mcsSupported = false
// Do not return the error, so that the queue does not attempt a retry.
return nil
}
}
if err != nil {
log.Warnf("%s failed creating ServiceExport %s/%s: %v", c.logPrefix(), svc.Namespace, svc.Name, err)
return err
}
log.Debugf("%s created ServiceExport %s/%s", c.logPrefix(), svc.Namespace, svc.Name)
return nil
}
func (c *autoServiceExportController) isClusterLocalService(svc *v1.Service) bool {
hostname := serviceRegistryKube.ServiceHostname(svc.Name, svc.Namespace, c.DomainSuffix)
return c.ClusterLocal.GetClusterLocalHosts().IsClusterLocal(hostname)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"fmt"
"sort"
"sync"
"time"
"github.com/hashicorp/go-multierror"
"go.uber.org/atomic"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"istio.io/api/label"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry"
"istio.io/istio/pilot/pkg/serviceregistry/aggregate"
"istio.io/istio/pilot/pkg/serviceregistry/kube"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
labelutil "istio.io/istio/pilot/pkg/serviceregistry/util/label"
"istio.io/istio/pilot/pkg/serviceregistry/util/workloadinstances"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/visibility"
kubelib "istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/kube/namespace"
istiolog "istio.io/istio/pkg/log"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/monitoring"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/ptr"
"istio.io/istio/pkg/queue"
"istio.io/istio/pkg/slices"
)
const (
// NodeRegionLabel is the well-known label for kubernetes node region in beta
NodeRegionLabel = v1.LabelFailureDomainBetaRegion
// NodeZoneLabel is the well-known label for kubernetes node zone in beta
NodeZoneLabel = v1.LabelFailureDomainBetaZone
// NodeRegionLabelGA is the well-known label for kubernetes node region in ga
NodeRegionLabelGA = v1.LabelTopologyRegion
// NodeZoneLabelGA is the well-known label for kubernetes node zone in ga
NodeZoneLabelGA = v1.LabelTopologyZone
// DefaultNetworkGatewayPort is the port used by default for cross-network traffic if not otherwise specified
// by meshNetworks or "networking.istio.io/gatewayPort"
DefaultNetworkGatewayPort = 15443
)
var log = istiolog.RegisterScope("kube", "kubernetes service registry controller")
var (
typeTag = monitoring.CreateLabel("type")
eventTag = monitoring.CreateLabel("event")
k8sEvents = monitoring.NewSum(
"pilot_k8s_reg_events",
"Events from k8s registry.",
)
// nolint: gocritic
// This is deprecated in favor of `pilot_k8s_endpoints_pending_pod`, which is a gauge indicating the number of
// currently missing pods. This helps distinguish transient errors from permanent ones
endpointsWithNoPods = monitoring.NewSum(
"pilot_k8s_endpoints_with_no_pods",
"Endpoints that does not have any corresponding pods.")
endpointsPendingPodUpdate = monitoring.NewGauge(
"pilot_k8s_endpoints_pending_pod",
"Number of endpoints that do not currently have any corresponding pods.",
)
)
func incrementEvent(kind, event string) {
if kind == "" || event == "" {
return
}
k8sEvents.With(typeTag.Value(kind), eventTag.Value(event)).Increment()
}
// Options stores the configurable attributes of a Controller.
type Options struct {
SystemNamespace string
// MeshServiceController is a mesh-wide service Controller.
MeshServiceController *aggregate.Controller
DomainSuffix string
// ClusterID identifies the cluster which the controller communicate with.
ClusterID cluster.ID
// ClusterAliases are alias names for cluster. When a proxy connects with a cluster ID
// and if it has a different alias we should use that a cluster ID for proxy.
ClusterAliases map[string]string
// Metrics for capturing node-based metrics.
Metrics model.Metrics
// XDSUpdater will push changes to the xDS server.
XDSUpdater model.XDSUpdater
// MeshNetworksWatcher observes changes to the mesh networks config.
MeshNetworksWatcher mesh.NetworksWatcher
// MeshWatcher observes changes to the mesh config
MeshWatcher mesh.Watcher
// Maximum QPS when communicating with kubernetes API
KubernetesAPIQPS float32
// Maximum burst for throttle when communicating with the kubernetes API
KubernetesAPIBurst int
// SyncTimeout, if set, causes HasSynced to be returned when timeout.
SyncTimeout time.Duration
// If meshConfig.DiscoverySelectors are specified, the DiscoveryNamespacesFilter tracks the namespaces this controller watches.
DiscoveryNamespacesFilter namespace.DiscoveryNamespacesFilter
ConfigController model.ConfigStoreController
ConfigCluster bool
}
func (o *Options) GetFilter() namespace.DiscoveryFilter {
if o.DiscoveryNamespacesFilter != nil {
return o.DiscoveryNamespacesFilter.Filter
}
return nil
}
// kubernetesNode represents a kubernetes node that is reachable externally
type kubernetesNode struct {
address string
labels labels.Instance
}
// controllerInterface is a simplified interface for the Controller used for testing.
type controllerInterface interface {
getPodLocality(pod *v1.Pod) string
Network(endpointIP string, labels labels.Instance) network.ID
Cluster() cluster.ID
}
var (
_ controllerInterface = &Controller{}
_ serviceregistry.Instance = &Controller{}
)
// Controller is a collection of synchronized resource watchers
// Caches are thread-safe
type Controller struct {
opts Options
client kubelib.Client
queue queue.Instance
namespaces kclient.Client[*v1.Namespace]
services kclient.Client[*v1.Service]
endpoints *endpointSliceController
// Used to watch node accessible from remote cluster.
// In multi-cluster(shared control plane multi-networks) scenario, ingress gateway service can be of nodePort type.
// With this, we can populate mesh's gateway address with the node ips.
nodes kclient.Client[*v1.Node]
exports serviceExportCache
imports serviceImportCache
pods *PodCache
crdHandlers []func(name string)
handlers model.ControllerHandlers
namespaceDiscoveryHandlers []func(ns string, event model.Event)
// This is only used for test
stop chan struct{}
sync.RWMutex
// servicesMap stores hostname ==> service, it is used to reduce convertService calls.
servicesMap map[host.Name]*model.Service
// nodeSelectorsForServices stores hostname => label selectors that can be used to
// refine the set of node port IPs for a service.
nodeSelectorsForServices map[host.Name]labels.Instance
// map of node name and its address+labels - this is the only thing we need from nodes
// for vm to k8s or cross cluster. When node port services select specific nodes by labels,
// we run through the label selectors here to pick only ones that we need.
// Only nodes with ExternalIP addresses are included in this map !
nodeInfoMap map[string]kubernetesNode
// index over workload instances from workload entries
workloadInstancesIndex workloadinstances.Index
*networkManager
// initialSyncTimedout is set to true after performing an initial processing timed out.
initialSyncTimedout *atomic.Bool
meshWatcher mesh.Watcher
podsClient kclient.Client[*v1.Pod]
ambientIndex AmbientIndex
configController model.ConfigStoreController
configCluster bool
networksHandlerRegistration *mesh.WatcherHandlerRegistration
meshHandlerRegistration *mesh.WatcherHandlerRegistration
}
// NewController creates a new Kubernetes controller
// Created by bootstrap and multicluster (see multicluster.Controller).
func NewController(kubeClient kubelib.Client, options Options) *Controller {
c := &Controller{
opts: options,
client: kubeClient,
queue: queue.NewQueueWithID(1*time.Second, string(options.ClusterID)),
servicesMap: make(map[host.Name]*model.Service),
nodeSelectorsForServices: make(map[host.Name]labels.Instance),
nodeInfoMap: make(map[string]kubernetesNode),
workloadInstancesIndex: workloadinstances.NewIndex(),
initialSyncTimedout: atomic.NewBool(false),
configCluster: options.ConfigCluster,
}
c.networkManager = initNetworkManager(c, options)
c.namespaces = kclient.New[*v1.Namespace](kubeClient)
if c.opts.SystemNamespace != "" {
registerHandlers[*v1.Namespace](
c,
c.namespaces,
"Namespaces",
func(old *v1.Namespace, cur *v1.Namespace, event model.Event) error {
if cur.Name == c.opts.SystemNamespace {
return c.onSystemNamespaceEvent(old, cur, event)
}
return nil
},
nil,
)
}
// always init for each cluster, otherwise different ns labels in different cluster may not take effect,
// but we skip it for configCluster which has been initiated before
if !c.opts.ConfigCluster || c.opts.DiscoveryNamespacesFilter == nil {
c.opts.DiscoveryNamespacesFilter = namespace.NewDiscoveryNamespacesFilter(c.namespaces, options.MeshWatcher.Mesh().DiscoverySelectors)
}
c.initDiscoveryHandlers(c.opts.MeshWatcher, c.opts.DiscoveryNamespacesFilter)
c.services = kclient.NewFiltered[*v1.Service](kubeClient, kclient.Filter{ObjectFilter: c.opts.DiscoveryNamespacesFilter.Filter})
registerHandlers[*v1.Service](c, c.services, "Services", c.onServiceEvent, nil)
c.endpoints = newEndpointSliceController(c)
// This is for getting the node IPs of a selected set of nodes
c.nodes = kclient.NewFiltered[*v1.Node](kubeClient, kclient.Filter{ObjectTransform: kubelib.StripNodeUnusedFields})
registerHandlers[*v1.Node](c, c.nodes, "Nodes", c.onNodeEvent, nil)
c.podsClient = kclient.NewFiltered[*v1.Pod](kubeClient, kclient.Filter{
ObjectFilter: c.opts.DiscoveryNamespacesFilter.Filter,
ObjectTransform: kubelib.StripPodUnusedFields,
})
c.pods = newPodCache(c, c.podsClient, func(key types.NamespacedName) {
c.queue.Push(func() error {
return c.endpoints.podArrived(key.Name, key.Namespace)
})
})
registerHandlers[*v1.Pod](c, c.podsClient, "Pods", c.pods.onEvent, c.pods.labelFilter)
if features.EnableAmbientControllers {
c.configController = options.ConfigController
c.ambientIndex = c.setupIndex()
}
c.exports = newServiceExportCache(c)
c.imports = newServiceImportCache(c)
c.meshWatcher = options.MeshWatcher
if c.opts.MeshNetworksWatcher != nil {
c.networksHandlerRegistration = c.opts.MeshNetworksWatcher.AddNetworksHandler(func() {
c.reloadMeshNetworks()
c.onNetworkChange()
})
c.reloadMeshNetworks()
}
return c
}
func (c *Controller) Provider() provider.ID {
return provider.Kubernetes
}
func (c *Controller) Cluster() cluster.ID {
return c.opts.ClusterID
}
func (c *Controller) MCSServices() []model.MCSServiceInfo {
outMap := make(map[types.NamespacedName]model.MCSServiceInfo)
// Add the ServiceExport info.
for _, se := range c.exports.ExportedServices() {
mcsService := outMap[se.namespacedName]
mcsService.Cluster = c.Cluster()
mcsService.Name = se.namespacedName.Name
mcsService.Namespace = se.namespacedName.Namespace
mcsService.Exported = true
mcsService.Discoverability = se.discoverability
outMap[se.namespacedName] = mcsService
}
// Add the ServiceImport info.
for _, si := range c.imports.ImportedServices() {
mcsService := outMap[si.namespacedName]
mcsService.Cluster = c.Cluster()
mcsService.Name = si.namespacedName.Name
mcsService.Namespace = si.namespacedName.Namespace
mcsService.Imported = true
mcsService.ClusterSetVIP = si.clusterSetVIP
outMap[si.namespacedName] = mcsService
}
return maps.Values(outMap)
}
func (c *Controller) Network(endpointIP string, labels labels.Instance) network.ID {
// 1. check the pod/workloadEntry label
if nw := labels[label.TopologyNetwork.Name]; nw != "" {
return network.ID(nw)
}
// 2. check the system namespace labels
if nw := c.networkFromSystemNamespace(); nw != "" {
return nw
}
// 3. check the meshNetworks config
if nw := c.networkFromMeshNetworks(endpointIP); nw != "" {
return nw
}
return ""
}
func (c *Controller) Cleanup() error {
if err := queue.WaitForClose(c.queue, 30*time.Second); err != nil {
log.Warnf("queue for removed kube registry %q may not be done processing: %v", c.Cluster(), err)
}
if c.opts.XDSUpdater != nil {
c.opts.XDSUpdater.RemoveShard(model.ShardKeyFromRegistry(c))
}
// Unregister networks handler
if c.networksHandlerRegistration != nil {
c.opts.MeshNetworksWatcher.DeleteNetworksHandler(c.networksHandlerRegistration)
}
// Unregister mesh handler
if c.meshHandlerRegistration != nil {
c.opts.MeshWatcher.DeleteMeshHandler(c.meshHandlerRegistration)
}
return nil
}
func (c *Controller) onServiceEvent(pre, curr *v1.Service, event model.Event) error {
log.Debugf("Handle event %s for service %s in namespace %s", event, curr.Name, curr.Namespace)
// Create the standard (cluster.local) service.
svcConv := kube.ConvertService(*curr, c.opts.DomainSuffix, c.Cluster())
switch event {
case model.EventDelete:
c.deleteService(svcConv)
default:
c.addOrUpdateService(pre, curr, svcConv, event, false)
}
return nil
}
func (c *Controller) deleteService(svc *model.Service) {
c.Lock()
delete(c.servicesMap, svc.Hostname)
delete(c.nodeSelectorsForServices, svc.Hostname)
_, isNetworkGateway := c.networkGatewaysBySvc[svc.Hostname]
delete(c.networkGatewaysBySvc, svc.Hostname)
c.Unlock()
if isNetworkGateway {
c.NotifyGatewayHandlers()
// TODO trigger push via handler
// networks are different, we need to update all eds endpoints
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.NetworksTrigger)})
}
shard := model.ShardKeyFromRegistry(c)
event := model.EventDelete
c.opts.XDSUpdater.SvcUpdate(shard, string(svc.Hostname), svc.Attributes.Namespace, event)
c.handlers.NotifyServiceHandlers(nil, svc, event)
}
func (c *Controller) addOrUpdateService(pre, curr *v1.Service, currConv *model.Service, event model.Event, updateEDSCache bool) {
needsFullPush := false
// First, process nodePort gateway service, whose externalIPs specified
// and loadbalancer gateway service
if currConv.Attributes.ClusterExternalAddresses.Len() > 0 {
needsFullPush = c.extractGatewaysFromService(currConv)
} else if isNodePortGatewayService(curr) {
// We need to know which services are using node selectors because during node events,
// we have to update all the node port services accordingly.
nodeSelector := getNodeSelectorsForService(curr)
c.Lock()
// only add when it is nodePort gateway service
c.nodeSelectorsForServices[currConv.Hostname] = nodeSelector
c.Unlock()
needsFullPush = c.updateServiceNodePortAddresses(currConv)
}
// For ExternalName, we need to update the EndpointIndex, as we will store endpoints just based on the Service.
if !features.EnableExternalNameAlias && curr != nil && curr.Spec.Type == v1.ServiceTypeExternalName {
updateEDSCache = true
}
c.Lock()
prevConv := c.servicesMap[currConv.Hostname]
c.servicesMap[currConv.Hostname] = currConv
c.Unlock()
// This full push needed to update ALL ends endpoints, even though we do a full push on service add/update
// as that full push is only triggered for the specific service.
if needsFullPush {
// networks are different, we need to update all eds endpoints
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.NetworksTrigger)})
}
shard := model.ShardKeyFromRegistry(c)
ns := currConv.Attributes.Namespace
// We also need to update when the Service changes. For Kubernetes, a service change will result in Endpoint updates,
// but workload entries will also need to be updated.
// TODO(nmittler): Build different sets of endpoints for cluster.local and clusterset.local.
if updateEDSCache || features.EnableK8SServiceSelectWorkloadEntries {
endpoints := c.buildEndpointsForService(currConv, updateEDSCache)
if len(endpoints) > 0 {
c.opts.XDSUpdater.EDSCacheUpdate(shard, string(currConv.Hostname), ns, endpoints)
}
}
// filter out same service event
if event == model.EventUpdate && !serviceUpdateNeedsPush(pre, curr, prevConv, currConv) {
return
}
c.opts.XDSUpdater.SvcUpdate(shard, string(currConv.Hostname), ns, event)
c.handlers.NotifyServiceHandlers(prevConv, currConv, event)
}
func (c *Controller) buildEndpointsForService(svc *model.Service, updateCache bool) []*model.IstioEndpoint {
endpoints := c.endpoints.buildIstioEndpointsWithService(svc.Attributes.Name, svc.Attributes.Namespace, svc.Hostname, updateCache)
if features.EnableK8SServiceSelectWorkloadEntries {
fep := c.collectWorkloadInstanceEndpoints(svc)
endpoints = append(endpoints, fep...)
}
if !features.EnableExternalNameAlias {
endpoints = append(endpoints, kube.ExternalNameEndpoints(svc)...)
}
return endpoints
}
func (c *Controller) onNodeEvent(_, node *v1.Node, event model.Event) error {
var updatedNeeded bool
if event == model.EventDelete {
updatedNeeded = true
c.Lock()
delete(c.nodeInfoMap, node.Name)
c.Unlock()
} else {
k8sNode := kubernetesNode{labels: node.Labels}
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeExternalIP && address.Address != "" {
k8sNode.address = address.Address
break
}
}
if k8sNode.address == "" {
return nil
}
c.Lock()
// check if the node exists as this add event could be due to controller resync
// if the stored object changes, then fire an update event. Otherwise, ignore this event.
currentNode, exists := c.nodeInfoMap[node.Name]
if !exists || !nodeEquals(currentNode, k8sNode) {
c.nodeInfoMap[node.Name] = k8sNode
updatedNeeded = true
}
c.Unlock()
}
// update all related services
if updatedNeeded && c.updateServiceNodePortAddresses() {
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
Full: true,
Reason: model.NewReasonStats(model.ServiceUpdate),
})
}
return nil
}
// FilterOutFunc func for filtering out objects during update callback
type FilterOutFunc[T controllers.Object] func(old, cur T) bool
// registerHandlers registers a handler for a given informer
// Note: `otype` is used for metric, if empty, no metric will be reported
func registerHandlers[T controllers.ComparableObject](c *Controller,
informer kclient.Informer[T], otype string,
handler func(T, T, model.Event) error, filter FilterOutFunc[T],
) {
wrappedHandler := func(prev, curr T, event model.Event) error {
curr = informer.Get(curr.GetName(), curr.GetNamespace())
if controllers.IsNil(curr) {
// this can happen when an immediate delete after update
// the delete event can be handled later
return nil
}
return handler(prev, curr, event)
}
informer.AddEventHandler(
controllers.EventHandler[T]{
AddFunc: func(obj T) {
incrementEvent(otype, "add")
c.queue.Push(func() error {
return wrappedHandler(ptr.Empty[T](), obj, model.EventAdd)
})
},
UpdateFunc: func(old, cur T) {
if filter != nil {
if filter(old, cur) {
incrementEvent(otype, "updatesame")
return
}
}
incrementEvent(otype, "update")
c.queue.Push(func() error {
return wrappedHandler(old, cur, model.EventUpdate)
})
},
DeleteFunc: func(obj T) {
incrementEvent(otype, "delete")
c.queue.Push(func() error {
return handler(ptr.Empty[T](), obj, model.EventDelete)
})
},
})
}
// HasSynced returns true after the initial state synchronization
func (c *Controller) HasSynced() bool {
return c.queue.HasSynced() || c.initialSyncTimedout.Load()
}
func (c *Controller) informersSynced() bool {
return c.namespaces.HasSynced() &&
c.services.HasSynced() &&
c.endpoints.slices.HasSynced() &&
c.pods.pods.HasSynced() &&
c.nodes.HasSynced() &&
c.imports.HasSynced() &&
c.exports.HasSynced() &&
c.networkManager.HasSynced()
}
func (c *Controller) syncPods() error {
var err *multierror.Error
pods := c.podsClient.List(metav1.NamespaceAll, klabels.Everything())
log.Debugf("initializing %d pods", len(pods))
for _, s := range pods {
err = multierror.Append(err, c.pods.onEvent(nil, s, model.EventAdd))
}
return err.ErrorOrNil()
}
// Run all controllers until a signal is received
func (c *Controller) Run(stop <-chan struct{}) {
if c.opts.SyncTimeout != 0 {
time.AfterFunc(c.opts.SyncTimeout, func() {
if !c.queue.HasSynced() {
log.Warnf("kube controller for %s initial sync timed out", c.opts.ClusterID)
c.initialSyncTimedout.Store(true)
}
})
}
st := time.Now()
go c.imports.Run(stop)
go c.exports.Run(stop)
kubelib.WaitForCacheSync("kube controller", stop, c.informersSynced)
log.Infof("kube controller for %s synced after %v", c.opts.ClusterID, time.Since(st))
// after the in-order sync we can start processing the queue
c.queue.Run(stop)
log.Infof("Controller terminated")
}
// Stop the controller. Only for tests, to simplify the code (defer c.Stop())
func (c *Controller) Stop() {
if c.stop != nil {
close(c.stop)
}
}
// Services implements a service catalog operation
func (c *Controller) Services() []*model.Service {
c.RLock()
out := make([]*model.Service, 0, len(c.servicesMap))
for _, svc := range c.servicesMap {
out = append(out, svc)
}
c.RUnlock()
sort.Slice(out, func(i, j int) bool { return out[i].Hostname < out[j].Hostname })
return out
}
// GetService implements a service catalog operation by hostname specified.
func (c *Controller) GetService(hostname host.Name) *model.Service {
c.RLock()
svc := c.servicesMap[hostname]
c.RUnlock()
return svc
}
// getPodLocality retrieves the locality for a pod.
func (c *Controller) getPodLocality(pod *v1.Pod) string {
// if pod has `istio-locality` label, skip below ops
if len(pod.Labels[model.LocalityLabel]) > 0 {
return model.GetLocalityLabel(pod.Labels[model.LocalityLabel])
}
// NodeName is set by the scheduler after the pod is created
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#late-initialization
node := c.nodes.Get(pod.Spec.NodeName, "")
if node == nil {
if pod.Spec.NodeName != "" {
log.Warnf("unable to get node %q for pod %q/%q", pod.Spec.NodeName, pod.Namespace, pod.Name)
}
return ""
}
region := getLabelValue(node.ObjectMeta, NodeRegionLabelGA, NodeRegionLabel)
zone := getLabelValue(node.ObjectMeta, NodeZoneLabelGA, NodeZoneLabel)
subzone := getLabelValue(node.ObjectMeta, label.TopologySubzone.Name, "")
if region == "" && zone == "" && subzone == "" {
return ""
}
return region + "/" + zone + "/" + subzone // Format: "%s/%s/%s"
}
func (c *Controller) serviceInstancesFromWorkloadInstances(svc *model.Service, reqSvcPort int) []*model.ServiceInstance {
// Run through all the workload instances, select ones that match the service labels
// only if this is a kubernetes internal service and of ClientSideLB (eds) type
// as InstancesByPort is called by the aggregate controller. We dont want to include
// workload instances for any other registry
workloadInstancesExist := !c.workloadInstancesIndex.Empty()
c.RLock()
_, inRegistry := c.servicesMap[svc.Hostname]
c.RUnlock()
// Only select internal Kubernetes services with selectors
if !inRegistry || !workloadInstancesExist || svc.Attributes.ServiceRegistry != provider.Kubernetes ||
svc.MeshExternal || svc.Resolution != model.ClientSideLB || svc.Attributes.LabelSelectors == nil {
return nil
}
selector := labels.Instance(svc.Attributes.LabelSelectors)
// Get the service port name and target port so that we can construct the service instance
k8sService := c.services.Get(svc.Attributes.Name, svc.Attributes.Namespace)
// We did not find the k8s service. We cannot get the targetPort
if k8sService == nil {
log.Infof("serviceInstancesFromWorkloadInstances(%s.%s) failed to get k8s service",
svc.Attributes.Name, svc.Attributes.Namespace)
return nil
}
var servicePort *model.Port
for _, p := range svc.Ports {
if p.Port == reqSvcPort {
servicePort = p
break
}
}
if servicePort == nil {
return nil
}
// Now get the target Port for this service port
targetPort := findServiceTargetPort(servicePort, k8sService)
if targetPort.num == 0 {
targetPort.num = servicePort.Port
}
out := make([]*model.ServiceInstance, 0)
c.workloadInstancesIndex.ForEach(func(wi *model.WorkloadInstance) {
if wi.Namespace != svc.Attributes.Namespace {
return
}
if selector.Match(wi.Endpoint.Labels) {
instance := serviceInstanceFromWorkloadInstance(svc, servicePort, targetPort, wi)
if instance != nil {
out = append(out, instance)
}
}
})
return out
}
func serviceInstanceFromWorkloadInstance(svc *model.Service, servicePort *model.Port,
targetPort serviceTargetPort, wi *model.WorkloadInstance,
) *model.ServiceInstance {
// create an instance with endpoint whose service port name matches
istioEndpoint := wi.Endpoint.ShallowCopy()
// by default, use the numbered targetPort
istioEndpoint.EndpointPort = uint32(targetPort.num)
if targetPort.name != "" {
// This is a named port, find the corresponding port in the port map
matchedPort := wi.PortMap[targetPort.name]
if matchedPort != 0 {
istioEndpoint.EndpointPort = matchedPort
} else if targetPort.explicitName {
// No match found, and we expect the name explicitly in the service, skip this endpoint
return nil
}
}
istioEndpoint.ServicePortName = servicePort.Name
return &model.ServiceInstance{
Service: svc,
ServicePort: servicePort,
Endpoint: istioEndpoint,
}
}
// convenience function to collect all workload entry endpoints in updateEDS calls.
func (c *Controller) collectWorkloadInstanceEndpoints(svc *model.Service) []*model.IstioEndpoint {
workloadInstancesExist := !c.workloadInstancesIndex.Empty()
if !workloadInstancesExist || svc.Resolution != model.ClientSideLB || len(svc.Ports) == 0 {
return nil
}
endpoints := make([]*model.IstioEndpoint, 0)
for _, port := range svc.Ports {
for _, instance := range c.serviceInstancesFromWorkloadInstances(svc, port.Port) {
endpoints = append(endpoints, instance.Endpoint)
}
}
return endpoints
}
// GetProxyServiceTargets returns service targets co-located with a given proxy
// TODO: this code does not return k8s service instances when the proxy's IP is a workload entry
// To tackle this, we need a ip2instance map like what we have in service entry.
func (c *Controller) GetProxyServiceTargets(proxy *model.Proxy) []model.ServiceTarget {
if len(proxy.IPAddresses) > 0 {
proxyIP := proxy.IPAddresses[0]
// look up for a WorkloadEntry; if there are multiple WorkloadEntry(s)
// with the same IP, choose one deterministically
workload := workloadinstances.GetInstanceForProxy(c.workloadInstancesIndex, proxy, proxyIP)
if workload != nil {
return c.serviceInstancesFromWorkloadInstance(workload)
}
pod := c.pods.getPodByProxy(proxy)
if pod != nil && !proxy.IsVM() {
// we don't want to use this block for our test "VM" which is actually a Pod.
if !c.isControllerForProxy(proxy) {
log.Errorf("proxy is in cluster %v, but controller is for cluster %v", proxy.Metadata.ClusterID, c.Cluster())
return nil
}
// 1. find proxy service by label selector, if not any, there may exist headless service without selector
// failover to 2
allServices := c.services.List(pod.Namespace, klabels.Everything())
if services := getPodServices(allServices, pod); len(services) > 0 {
out := make([]model.ServiceTarget, 0)
for _, svc := range services {
out = append(out, c.GetProxyServiceTargetsByPod(pod, svc)...)
}
return out
}
// 2. Headless service without selector
return c.endpoints.GetProxyServiceTargets(proxy)
}
// 3. The pod is not present when this is called
// due to eventual consistency issues. However, we have a lot of information about the pod from the proxy
// metadata already. Because of this, we can still get most of the information we need.
// If we cannot accurately construct ServiceEndpoints from just the metadata, this will return an error and we can
// attempt to read the real pod.
out, err := c.GetProxyServiceTargetsFromMetadata(proxy)
if err != nil {
log.Warnf("GetProxyServiceTargetsFromMetadata for %v failed: %v", proxy.ID, err)
}
return out
}
// TODO: This could not happen, remove?
if c.opts.Metrics != nil {
c.opts.Metrics.AddMetric(model.ProxyStatusNoService, proxy.ID, proxy.ID, "")
} else {
log.Infof("Missing metrics env, empty list of services for pod %s", proxy.ID)
}
return nil
}
func (c *Controller) serviceInstancesFromWorkloadInstance(si *model.WorkloadInstance) []model.ServiceTarget {
out := make([]model.ServiceTarget, 0)
// find the workload entry's service by label selector
// rather than scanning through our internal map of model.services, get the services via the k8s apis
dummyPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Namespace: si.Namespace, Labels: si.Endpoint.Labels},
}
// find the services that map to this workload entry, fire off eds updates if the service is of type client-side lb
allServices := c.services.List(si.Namespace, klabels.Everything())
if k8sServices := getPodServices(allServices, dummyPod); len(k8sServices) > 0 {
for _, k8sSvc := range k8sServices {
service := c.GetService(kube.ServiceHostname(k8sSvc.Name, k8sSvc.Namespace, c.opts.DomainSuffix))
// Note that this cannot be an external service because k8s external services do not have label selectors.
if service == nil || service.Resolution != model.ClientSideLB {
// may be a headless service
continue
}
for _, servicePort := range service.Ports {
if servicePort.Protocol == protocol.UDP {
continue
}
// Now get the target Port for this service port
targetPort := findServiceTargetPort(servicePort, k8sSvc)
if targetPort.num == 0 {
targetPort.num = servicePort.Port
}
instance := serviceInstanceFromWorkloadInstance(service, servicePort, targetPort, si)
if instance != nil {
out = append(out, model.ServiceInstanceToTarget(instance))
}
}
}
}
return out
}
// WorkloadInstanceHandler defines the handler for service instances generated by other registries
func (c *Controller) WorkloadInstanceHandler(si *model.WorkloadInstance, event model.Event) {
c.queue.Push(func() error {
c.workloadInstanceHandler(si, event)
return nil
})
}
func (c *Controller) workloadInstanceHandler(si *model.WorkloadInstance, event model.Event) {
// ignore malformed workload entries. And ignore any workload entry that does not have a label
// as there is no way for us to select them
if si.Namespace == "" || len(si.Endpoint.Labels) == 0 {
return
}
// this is from a workload entry. Store it in separate index so that
// the InstancesByPort can use these as well as the k8s pods.
switch event {
case model.EventDelete:
c.workloadInstancesIndex.Delete(si)
default: // add or update
c.workloadInstancesIndex.Insert(si)
}
// find the workload entry's service by label selector
// rather than scanning through our internal map of model.services, get the services via the k8s apis
dummyPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Namespace: si.Namespace, Labels: si.Endpoint.Labels},
}
// We got an instance update, which probably effects EDS. However, EDS is keyed by Hostname. We need to find all
// Hostnames (services) that were updated and recompute them
// find the services that map to this workload entry, fire off eds updates if the service is of type client-side lb
allServices := c.services.List(si.Namespace, klabels.Everything())
matchedServices := getPodServices(allServices, dummyPod)
matchedHostnames := slices.Map(matchedServices, func(e *v1.Service) host.Name {
return kube.ServiceHostname(e.Name, e.Namespace, c.opts.DomainSuffix)
})
c.endpoints.pushEDS(matchedHostnames, si.Namespace)
}
func (c *Controller) onSystemNamespaceEvent(_, ns *v1.Namespace, ev model.Event) error {
if ev == model.EventDelete {
return nil
}
if c.setNetworkFromNamespace(ns) {
// network changed, rarely happen
// refresh pods/endpoints/services
c.onNetworkChange()
}
return nil
}
// isControllerForProxy should be used for proxies assumed to be in the kube cluster for this controller. Workload Entries
// may not necessarily pass this check, but we still want to allow kube services to select workload instances.
func (c *Controller) isControllerForProxy(proxy *model.Proxy) bool {
return proxy.Metadata.ClusterID == "" || proxy.Metadata.ClusterID == c.Cluster()
}
// GetProxyServiceTargetsFromMetadata retrieves ServiceTargets using proxy Metadata rather than
// from the Pod. This allows retrieving Instances immediately, regardless of delays in Kubernetes.
// If the proxy doesn't have enough metadata, an error is returned
func (c *Controller) GetProxyServiceTargetsFromMetadata(proxy *model.Proxy) ([]model.ServiceTarget, error) {
if len(proxy.Labels) == 0 {
return nil, nil
}
if !c.isControllerForProxy(proxy) {
return nil, fmt.Errorf("proxy is in cluster %v, but controller is for cluster %v", proxy.Metadata.ClusterID, c.Cluster())
}
// Create a pod with just the information needed to find the associated Services
dummyPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: proxy.ConfigNamespace,
Labels: proxy.Labels,
},
}
// Find the Service associated with the pod.
allServices := c.services.List(proxy.ConfigNamespace, klabels.Everything())
services := getPodServices(allServices, dummyPod)
if len(services) == 0 {
return nil, fmt.Errorf("no instances found for %s", proxy.ID)
}
out := make([]model.ServiceTarget, 0)
for _, svc := range services {
hostname := kube.ServiceHostname(svc.Name, svc.Namespace, c.opts.DomainSuffix)
modelService := c.GetService(hostname)
if modelService == nil {
return nil, fmt.Errorf("failed to find model service for %v", hostname)
}
for _, modelService := range c.servicesForNamespacedName(config.NamespacedName(svc)) {
tps := make(map[model.Port]*model.Port)
tpsList := make([]model.Port, 0)
for _, port := range svc.Spec.Ports {
svcPort, f := modelService.Ports.Get(port.Name)
if !f {
return nil, fmt.Errorf("failed to get svc port for %v", port.Name)
}
var portNum int
if len(proxy.Metadata.PodPorts) > 0 {
var err error
portNum, err = findPortFromMetadata(port, proxy.Metadata.PodPorts)
if err != nil {
return nil, fmt.Errorf("failed to find target port for %v: %v", proxy.ID, err)
}
} else {
// most likely a VM - we assume the WorkloadEntry won't remap any ports
portNum = port.TargetPort.IntValue()
}
// Dedupe the target ports here - Service might have configured multiple ports to the same target port,
// we will have to create only one ingress listener per port and protocol so that we do not endup
// complaining about listener conflicts.
targetPort := model.Port{
Port: portNum,
Protocol: svcPort.Protocol,
}
if _, exists := tps[targetPort]; !exists {
tps[targetPort] = svcPort
tpsList = append(tpsList, targetPort)
}
}
// Iterate over target ports in the same order as defined in service spec, in case of
// protocol conflict for a port causes unstable protocol selection for a port.
for _, tp := range tpsList {
svcPort := tps[tp]
out = append(out, model.ServiceTarget{
Service: modelService,
Port: model.ServiceInstancePort{
ServicePort: svcPort,
TargetPort: uint32(tp.Port),
},
})
}
}
}
return out, nil
}
func (c *Controller) GetProxyServiceTargetsByPod(pod *v1.Pod, service *v1.Service) []model.ServiceTarget {
var out []model.ServiceTarget
for _, svc := range c.servicesForNamespacedName(config.NamespacedName(service)) {
tps := make(map[model.Port]*model.Port)
tpsList := make([]model.Port, 0)
for _, port := range service.Spec.Ports {
svcPort, exists := svc.Ports.Get(port.Name)
if !exists {
continue
}
// find target port
portNum, err := FindPort(pod, &port)
if err != nil {
log.Warnf("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
continue
}
// Dedupe the target ports here - Service might have configured multiple ports to the same target port,
// we will have to create only one ingress listener per port and protocol so that we do not endup
// complaining about listener conflicts.
targetPort := model.Port{
Port: portNum,
Protocol: svcPort.Protocol,
}
if _, exists := tps[targetPort]; !exists {
tps[targetPort] = svcPort
tpsList = append(tpsList, targetPort)
}
}
// Iterate over target ports in the same order as defined in service spec, in case of
// protocol conflict for a port causes unstable protocol selection for a port.
for _, tp := range tpsList {
svcPort := tps[tp]
out = append(out, model.ServiceTarget{
Service: svc,
Port: model.ServiceInstancePort{
ServicePort: svcPort,
TargetPort: uint32(tp.Port),
},
})
}
}
return out
}
func (c *Controller) GetProxyWorkloadLabels(proxy *model.Proxy) labels.Instance {
pod := c.pods.getPodByProxy(proxy)
if pod != nil {
var locality, nodeName string
locality = c.getPodLocality(pod)
if len(proxy.GetNodeName()) == 0 {
// this can happen for an "old" proxy with no `Metadata.NodeName` set
// in this case we set the node name in labels on the fly
// TODO: remove this when 1.16 is EOL?
nodeName = pod.Spec.NodeName
}
if len(locality) == 0 && len(nodeName) == 0 {
return pod.Labels
}
return labelutil.AugmentLabels(pod.Labels, c.clusterID, locality, nodeName, c.network)
}
return nil
}
// AppendServiceHandler implements a service catalog operation
func (c *Controller) AppendServiceHandler(f model.ServiceHandler) {
c.handlers.AppendServiceHandler(f)
}
// AppendWorkloadHandler implements a service catalog operation
func (c *Controller) AppendWorkloadHandler(f func(*model.WorkloadInstance, model.Event)) {
c.handlers.AppendWorkloadHandler(f)
}
// AppendNamespaceDiscoveryHandlers register handlers on namespace selected/deselected by discovery selectors change.
func (c *Controller) AppendNamespaceDiscoveryHandlers(f func(string, model.Event)) {
c.namespaceDiscoveryHandlers = append(c.namespaceDiscoveryHandlers, f)
}
// AppendCrdHandlers register handlers on crd event.
func (c *Controller) AppendCrdHandlers(f func(name string)) {
c.crdHandlers = append(c.crdHandlers, f)
}
// hostNamesForNamespacedName returns all possible hostnames for the given service name.
// If Kubernetes Multi-Cluster Services (MCS) is enabled, this will contain the regular
// hostname as well as the MCS hostname (clusterset.local). Otherwise, only the regular
// hostname will be returned.
func (c *Controller) hostNamesForNamespacedName(name types.NamespacedName) []host.Name {
if features.EnableMCSHost {
return []host.Name{
kube.ServiceHostname(name.Name, name.Namespace, c.opts.DomainSuffix),
serviceClusterSetLocalHostname(name),
}
}
return []host.Name{
kube.ServiceHostname(name.Name, name.Namespace, c.opts.DomainSuffix),
}
}
// servicesForNamespacedName returns all services for the given service name.
// If Kubernetes Multi-Cluster Services (MCS) is enabled, this will contain the regular
// service as well as the MCS service (clusterset.local), if available. Otherwise,
// only the regular service will be returned.
func (c *Controller) servicesForNamespacedName(name types.NamespacedName) []*model.Service {
if features.EnableMCSHost {
out := make([]*model.Service, 0, 2)
c.RLock()
if svc := c.servicesMap[kube.ServiceHostname(name.Name, name.Namespace, c.opts.DomainSuffix)]; svc != nil {
out = append(out, svc)
}
if svc := c.servicesMap[serviceClusterSetLocalHostname(name)]; svc != nil {
out = append(out, svc)
}
c.RUnlock()
return out
}
if svc := c.GetService(kube.ServiceHostname(name.Name, name.Namespace, c.opts.DomainSuffix)); svc != nil {
return []*model.Service{svc}
}
return nil
}
func serviceUpdateNeedsPush(prev, curr *v1.Service, preConv, currConv *model.Service) bool {
if !features.EnableOptimizedServicePush {
return true
}
if preConv == nil {
return !currConv.Attributes.ExportTo.Contains(visibility.None)
}
// if service are not exported, no need to push
if preConv.Attributes.ExportTo.Contains(visibility.None) &&
currConv.Attributes.ExportTo.Contains(visibility.None) {
return false
}
// Check if there are any changes we care about by comparing `model.Service`s
if !preConv.Equals(currConv) {
return true
}
// Also check if target ports are changed since they are not included in `model.Service`
// `preConv.Equals(currConv)` already makes sure the length of ports is not changed
if prev != nil && curr != nil {
for i := 0; i < len(prev.Spec.Ports); i++ {
if prev.Spec.Ports[i].TargetPort != curr.Spec.Ports[i].TargetPort {
return true
}
}
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"github.com/hashicorp/go-multierror"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/mesh"
filter "istio.io/istio/pkg/kube/namespace"
"istio.io/istio/pkg/util/sets"
)
// initialize handlers for discovery selection scoping
func (c *Controller) initDiscoveryHandlers(meshWatcher mesh.Watcher, discoveryNamespacesFilter filter.DiscoveryNamespacesFilter) {
c.initDiscoveryNamespaceHandlers(discoveryNamespacesFilter)
c.initMeshWatcherHandler(meshWatcher, discoveryNamespacesFilter)
}
// handle discovery namespace membership changes triggered by namespace events,
// which requires triggering create/delete event handlers for services, pods, and endpoints,
// and updating the DiscoveryNamespacesFilter.
func (c *Controller) initDiscoveryNamespaceHandlers(discoveryNamespacesFilter filter.DiscoveryNamespacesFilter) {
discoveryNamespacesFilter.AddHandler(func(ns string, event model.Event) {
switch event {
case model.EventAdd:
c.queue.Push(func() error {
c.handleSelectedNamespace(ns)
// This is necessary because namespace handled by discoveryNamespacesFilter may take some time,
// if a CR is processed before discoveryNamespacesFilter takes effect, it will be ignored.
if features.EnableEnhancedResourceScoping {
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
Full: true,
Reason: model.NewReasonStats(model.NamespaceUpdate),
})
}
return nil
})
case model.EventDelete:
c.queue.Push(func() error {
c.handleDeselectedNamespace(ns)
// This is necessary because namespace handled by discoveryNamespacesFilter may take some time,
// if a CR is processed before discoveryNamespacesFilter takes effect, it will be ignored.
if features.EnableEnhancedResourceScoping {
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
Full: true,
Reason: model.NewReasonStats(model.NamespaceUpdate),
})
}
return nil
})
}
})
}
// handle discovery namespace membership changes triggered by changes to meshConfig's discovery selectors
// which requires updating the DiscoveryNamespaceFilter and triggering create/delete event handlers for services/pods/endpoints
// for membership changes
func (c *Controller) initMeshWatcherHandler(meshWatcher mesh.Watcher, discoveryNamespacesFilter filter.DiscoveryNamespacesFilter) {
c.meshHandlerRegistration = meshWatcher.AddMeshHandler(func() {
discoveryNamespacesFilter.SelectorsChanged(meshWatcher.Mesh().GetDiscoverySelectors())
})
}
// HandleSelectedNamespace processes pods, workload entries and services for the selected namespace
// and sends an XDS update as needed.
//
// NOTE: As an interface method of AmbientIndex, this locks the index.
func (a *AmbientIndexImpl) HandleSelectedNamespace(ns string, pods []*corev1.Pod, services []*corev1.Service, c *Controller) {
a.mu.Lock()
defer a.mu.Unlock()
updates := sets.New[model.ConfigKey]()
// Handle Pods.
for _, p := range pods {
updates = updates.Merge(a.handlePod(nil, p, model.EventAdd, c))
}
// Handle Services.
for _, s := range services {
updates = updates.Merge(a.handleService(s, model.EventAdd, c))
}
if c.configCluster {
// Handle WorkloadEntries.
allWorkloadEntries := c.getControllerWorkloadEntries(ns)
for _, w := range allWorkloadEntries {
updates = updates.Merge(a.handleWorkloadEntry(nil, w, false, c))
}
allServiceEntries := c.getControllerServiceEntries(ns)
for _, s := range allServiceEntries {
updates = updates.Merge(a.handleServiceEntry(s, model.EventUpdate, c))
}
}
if len(updates) > 0 {
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
ConfigsUpdated: updates,
Reason: model.NewReasonStats(model.AmbientUpdate),
})
}
}
// issue create events for all services, pods, and endpoints in the newly labeled namespace
func (c *Controller) handleSelectedNamespace(ns string) {
var errs *multierror.Error
// for each resource type, issue create events for objects in the labeled namespace
services := c.services.List(ns, labels.Everything())
for _, svc := range services {
errs = multierror.Append(errs, c.onServiceEvent(nil, svc, model.EventAdd))
}
pods := c.podsClient.List(ns, labels.Everything())
for _, pod := range pods {
errs = multierror.Append(errs, c.pods.onEvent(nil, pod, model.EventAdd))
}
if c.ambientIndex != nil {
c.ambientIndex.HandleSelectedNamespace(ns, pods, services, c)
}
errs = multierror.Append(errs, c.endpoints.initializeNamespace(ns, false))
for _, handler := range c.namespaceDiscoveryHandlers {
handler(ns, model.EventAdd)
}
if err := multierror.Flatten(errs.ErrorOrNil()); err != nil {
log.Errorf("one or more errors while handling newly labeled discovery namespace %s: %v", ns, err)
}
}
// issue delete events for all services, pods, and endpoints in the deselected namespace
// use kubeClient.KubeInformer() to bypass filter in order to list resources from non-labeled namespace,
// which fetches informers from the SharedInformerFactory cache (i.e. does not instantiate a new informer)
func (c *Controller) handleDeselectedNamespace(ns string) {
var errs *multierror.Error
// for each resource type, issue delete events for objects in the deselected namespace
for _, svc := range c.services.ListUnfiltered(ns, labels.Everything()) {
errs = multierror.Append(errs, c.onServiceEvent(nil, svc, model.EventDelete))
}
for _, pod := range c.podsClient.ListUnfiltered(ns, labels.Everything()) {
errs = multierror.Append(errs, c.pods.onEvent(nil, pod, model.EventDelete))
}
errs = multierror.Append(errs, c.endpoints.deleteEndpoints(ns))
for _, handler := range c.namespaceDiscoveryHandlers {
handler(ns, model.EventDelete)
}
if err := multierror.Flatten(errs.ErrorOrNil()); err != nil {
log.Errorf("one or more errors while handling deselected discovery namespace %s: %v", ns, err)
}
}
// Copyright Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
v1 "k8s.io/api/core/v1"
"istio.io/api/label"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/serviceregistry/kube"
labelutil "istio.io/istio/pilot/pkg/serviceregistry/util/label"
"istio.io/istio/pkg/config/labels"
kubeUtil "istio.io/istio/pkg/kube"
"istio.io/istio/pkg/network"
)
// EndpointBuilder is a stateful IstioEndpoint builder with metadata used to build IstioEndpoint
type EndpointBuilder struct {
controller controllerInterface
labels labels.Instance
metaNetwork network.ID
serviceAccount string
locality model.Locality
tlsMode string
workloadName string
namespace string
// Values used to build dns name tables per pod.
// The hostname of the Pod, by default equals to pod name.
hostname string
// If specified, the fully qualified Pod hostname will be "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>".
subDomain string
// If in k8s, the node where the pod resides
nodeName string
}
func NewEndpointBuilder(c controllerInterface, pod *v1.Pod) *EndpointBuilder {
var locality, sa, namespace, hostname, subdomain, ip, node string
var podLabels labels.Instance
if pod != nil {
locality = c.getPodLocality(pod)
sa = kube.SecureNamingSAN(pod)
podLabels = pod.Labels
namespace = pod.Namespace
subdomain = pod.Spec.Subdomain
if subdomain != "" {
hostname = pod.Spec.Hostname
if hostname == "" {
hostname = pod.Name
}
}
ip = pod.Status.PodIP
node = pod.Spec.NodeName
}
dm, _ := kubeUtil.GetDeployMetaFromPod(pod)
out := &EndpointBuilder{
controller: c,
serviceAccount: sa,
locality: model.Locality{
Label: locality,
ClusterID: c.Cluster(),
},
tlsMode: kube.PodTLSMode(pod),
workloadName: dm.Name,
namespace: namespace,
hostname: hostname,
subDomain: subdomain,
labels: podLabels,
nodeName: node,
}
networkID := out.endpointNetwork(ip)
out.labels = labelutil.AugmentLabels(podLabels, c.Cluster(), locality, node, networkID)
return out
}
func NewEndpointBuilderFromMetadata(c controllerInterface, proxy *model.Proxy) *EndpointBuilder {
locality := util.LocalityToString(proxy.Locality)
out := &EndpointBuilder{
controller: c,
metaNetwork: proxy.Metadata.Network,
serviceAccount: proxy.Metadata.ServiceAccount,
locality: model.Locality{
Label: locality,
ClusterID: c.Cluster(),
},
tlsMode: model.GetTLSModeFromEndpointLabels(proxy.Labels),
nodeName: proxy.GetNodeName(),
}
var networkID network.ID
if len(proxy.IPAddresses) > 0 {
networkID = out.endpointNetwork(proxy.IPAddresses[0])
}
out.labels = labelutil.AugmentLabels(proxy.Labels, c.Cluster(), locality, out.nodeName, networkID)
return out
}
func (b *EndpointBuilder) buildIstioEndpoint(
endpointAddress string,
endpointPort int32,
svcPortName string,
discoverabilityPolicy model.EndpointDiscoverabilityPolicy,
healthStatus model.HealthStatus,
) *model.IstioEndpoint {
if b == nil {
return nil
}
// in case pod is not found when init EndpointBuilder.
networkID := network.ID(b.labels[label.TopologyNetwork.Name])
if networkID == "" {
networkID = b.endpointNetwork(endpointAddress)
b.labels[label.TopologyNetwork.Name] = string(networkID)
}
return &model.IstioEndpoint{
Labels: b.labels,
ServiceAccount: b.serviceAccount,
Locality: b.locality,
TLSMode: b.tlsMode,
Address: endpointAddress,
EndpointPort: uint32(endpointPort),
ServicePortName: svcPortName,
Network: networkID,
WorkloadName: b.workloadName,
Namespace: b.namespace,
HostName: b.hostname,
SubDomain: b.subDomain,
DiscoverabilityPolicy: discoverabilityPolicy,
HealthStatus: healthStatus,
NodeName: b.nodeName,
}
}
// return the mesh network for the endpoint IP. Empty string if not found.
func (b *EndpointBuilder) endpointNetwork(endpointIP string) network.ID {
// If we're building the endpoint based on proxy meta, prefer the injected ISTIO_META_NETWORK value.
if b.metaNetwork != "" {
return b.metaNetwork
}
return b.controller.Network(endpointIP, b.labels)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"sync"
"github.com/hashicorp/go-multierror"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/discovery/v1"
"k8s.io/api/discovery/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
mcs "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/config/visibility"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/util/sets"
)
type endpointSliceController struct {
endpointCache *endpointSliceCache
slices kclient.Client[*v1.EndpointSlice]
c *Controller
}
var (
endpointSliceRequirement = labelRequirement(mcs.LabelServiceName, selection.DoesNotExist, nil)
endpointSliceSelector = klabels.NewSelector().Add(*endpointSliceRequirement)
)
func newEndpointSliceController(c *Controller) *endpointSliceController {
slices := kclient.NewFiltered[*v1.EndpointSlice](c.client, kclient.Filter{ObjectFilter: c.opts.GetFilter()})
out := &endpointSliceController{
c: c,
slices: slices,
endpointCache: newEndpointSliceCache(),
}
registerHandlers[*v1.EndpointSlice](c, slices, "EndpointSlice", out.onEvent, nil)
return out
}
func (esc *endpointSliceController) podArrived(name, ns string) error {
ep := esc.slices.Get(name, ns)
if ep == nil {
return nil
}
return esc.onEvent(nil, ep, model.EventAdd)
}
// initializeNamespace initializes endpoints for a given namespace.
func (esc *endpointSliceController) initializeNamespace(ns string, filtered bool) error {
var err *multierror.Error
var endpoints []*v1.EndpointSlice
if filtered {
endpoints = esc.slices.List(ns, klabels.Everything())
} else {
endpoints = esc.slices.ListUnfiltered(ns, klabels.Everything())
}
log.Debugf("initializing %d endpointslices", len(endpoints))
for _, s := range endpoints {
err = multierror.Append(err, esc.onEvent(nil, s, model.EventAdd))
}
return err.ErrorOrNil()
}
// deleteEndpoints deletes endpoints for a given namespace.
func (esc *endpointSliceController) deleteEndpoints(ns string) error {
var err *multierror.Error
endpoints := esc.slices.ListUnfiltered(ns, klabels.Everything())
log.Debugf("deleting %d endpointslices", len(endpoints))
for _, s := range endpoints {
err = multierror.Append(err, esc.onEvent(nil, s, model.EventDelete))
}
return err.ErrorOrNil()
}
func (esc *endpointSliceController) onEvent(_, ep *v1.EndpointSlice, event model.Event) error {
esc.onEventInternal(nil, ep, event)
return nil
}
func (esc *endpointSliceController) onEventInternal(_, ep *v1.EndpointSlice, event model.Event) {
esLabels := ep.GetLabels()
if !endpointSliceSelector.Matches(klabels.Set(esLabels)) {
return
}
// Update internal endpoint cache no matter what kind of service, even headless service.
// As for gateways, the cluster discovery type is `EDS` for headless service.
namespacedName := getServiceNamespacedName(ep)
log.Debugf("Handle EDS endpoint %s %s in namespace %s", namespacedName.Name, event, namespacedName.Namespace)
if event == model.EventDelete {
esc.deleteEndpointSlice(ep)
} else {
esc.updateEndpointSlice(ep)
}
hostnames := esc.c.hostNamesForNamespacedName(namespacedName)
// Trigger EDS push for all hostnames.
esc.pushEDS(hostnames, namespacedName.Namespace)
name := serviceNameForEndpointSlice(esLabels)
namespace := ep.GetNamespace()
svc := esc.c.services.Get(name, namespace)
if svc == nil || svc.Spec.ClusterIP != corev1.ClusterIPNone {
return
}
configsUpdated := sets.New[model.ConfigKey]()
pureHTTP := true
for _, modelSvc := range esc.c.servicesForNamespacedName(config.NamespacedName(svc)) {
// skip push if it is not exported
if modelSvc.Attributes.ExportTo.Contains(visibility.None) {
continue
}
configsUpdated.Insert(model.ConfigKey{Kind: kind.ServiceEntry, Name: modelSvc.Hostname.String(), Namespace: svc.Namespace})
for _, p := range modelSvc.Ports {
if !p.Protocol.IsHTTP() {
pureHTTP = false
break
}
}
}
if len(configsUpdated) > 0 {
// For headless services, trigger a full push if EnableHeadlessService is true and svc ports are not pure HTTP.
// otherwise push endpoint updates - needed for NDS output.
esc.c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{
// pure HTTP headless services should not need a full push since they do not
// require a Listener based on IP: https://github.com/istio/istio/issues/48207
Full: !pureHTTP && features.EnableHeadlessService,
// TODO: extend and set service instance type, so no need to re-init push context
ConfigsUpdated: configsUpdated,
Reason: model.NewReasonStats(model.HeadlessEndpointUpdate),
})
}
}
// GetProxyServiceTargets returns service instances co-located with a given proxy
// TODO: this code does not return k8s service instances when the proxy's IP is a workload entry
// To tackle this, we need a ip2instance map like what we have in service entry.
func (esc *endpointSliceController) GetProxyServiceTargets(proxy *model.Proxy) []model.ServiceTarget {
eps := esc.slices.List(proxy.Metadata.Namespace, endpointSliceSelector)
var out []model.ServiceTarget
for _, ep := range eps {
instances := esc.serviceTargets(ep, proxy)
out = append(out, instances...)
}
return out
}
func serviceNameForEndpointSlice(labels map[string]string) string {
return labels[v1.LabelServiceName]
}
func (esc *endpointSliceController) serviceTargets(ep *v1.EndpointSlice, proxy *model.Proxy) []model.ServiceTarget {
var out []model.ServiceTarget
esc.endpointCache.mu.RLock()
defer esc.endpointCache.mu.RUnlock()
for _, svc := range esc.c.servicesForNamespacedName(getServiceNamespacedName(ep)) {
for _, instance := range esc.endpointCache.get(svc.Hostname) {
port, f := svc.Ports.Get(instance.ServicePortName)
if !f {
log.Warnf("unexpected state, svc %v missing port %v", svc.Hostname, instance.ServicePortName)
continue
}
// consider multiple IP scenarios
for _, ip := range proxy.IPAddresses {
if ip != instance.Address {
continue
}
// If the endpoint isn't ready, report this
if instance.HealthStatus == model.UnHealthy && esc.c.opts.Metrics != nil {
esc.c.opts.Metrics.AddMetric(model.ProxyStatusEndpointNotReady, proxy.ID, proxy.ID, "")
}
si := model.ServiceTarget{
Service: svc,
Port: model.ServiceInstancePort{
ServicePort: port,
TargetPort: instance.EndpointPort,
},
}
out = append(out, si)
}
}
}
return out
}
func (esc *endpointSliceController) deleteEndpointSlice(slice *v1.EndpointSlice) {
key := config.NamespacedName(slice)
for _, e := range slice.Endpoints {
for _, a := range e.Addresses {
esc.c.pods.endpointDeleted(key, a)
}
}
esc.endpointCache.mu.Lock()
defer esc.endpointCache.mu.Unlock()
for _, hostName := range esc.c.hostNamesForNamespacedName(getServiceNamespacedName(slice)) {
// endpointSlice cache update
if esc.endpointCache.has(hostName) {
esc.endpointCache.delete(hostName, slice.Name)
}
}
}
func (esc *endpointSliceController) updateEndpointSlice(slice *v1.EndpointSlice) {
for _, hostname := range esc.c.hostNamesForNamespacedName(getServiceNamespacedName(slice)) {
esc.updateEndpointCacheForSlice(hostname, slice)
}
}
func endpointHealthStatus(svc *model.Service, e v1.Endpoint) model.HealthStatus {
if e.Conditions.Ready == nil || *e.Conditions.Ready {
return model.Healthy
}
if features.PersistentSessionLabel != "" &&
svc != nil &&
svc.Attributes.Labels[features.PersistentSessionLabel] != "" &&
(e.Conditions.Serving == nil || *e.Conditions.Serving) &&
(e.Conditions.Terminating == nil || *e.Conditions.Terminating) {
return model.Draining
}
return model.UnHealthy
}
func (esc *endpointSliceController) updateEndpointCacheForSlice(hostName host.Name, slice *v1.EndpointSlice) {
var endpoints []*model.IstioEndpoint
if slice.AddressType == v1.AddressTypeFQDN {
// TODO(https://github.com/istio/istio/issues/34995) support FQDN endpointslice
return
}
svc := esc.c.GetService(hostName)
discoverabilityPolicy := esc.c.exports.EndpointDiscoverabilityPolicy(svc)
for _, e := range slice.Endpoints {
// Draining tracking is only enabled if persistent sessions is enabled.
// If we start using them for other features, this can be adjusted.
healthStatus := endpointHealthStatus(svc, e)
for _, a := range e.Addresses {
pod, expectedPod := getPod(esc.c, a, &metav1.ObjectMeta{Name: slice.Name, Namespace: slice.Namespace}, e.TargetRef, hostName)
if pod == nil && expectedPod {
continue
}
builder := NewEndpointBuilder(esc.c, pod)
// EDS and ServiceEntry use name for service port - ADS will need to map to numbers.
for _, port := range slice.Ports {
var portNum int32
if port.Port != nil {
portNum = *port.Port
}
var portName string
if port.Name != nil {
portName = *port.Name
}
istioEndpoint := builder.buildIstioEndpoint(a, portNum, portName, discoverabilityPolicy, healthStatus)
endpoints = append(endpoints, istioEndpoint)
}
}
}
esc.endpointCache.Update(hostName, slice.Name, endpoints)
}
func (esc *endpointSliceController) buildIstioEndpointsWithService(name, namespace string, hostName host.Name, updateCache bool) []*model.IstioEndpoint {
esLabelSelector := endpointSliceSelectorForService(name)
slices := esc.slices.List(namespace, esLabelSelector)
if len(slices) == 0 {
log.Debugf("endpoint slices of (%s, %s) not found", name, namespace)
return nil
}
if updateCache {
// A cache update was requested. Rebuild the endpoints for these slices.
for _, slice := range slices {
esc.updateEndpointCacheForSlice(hostName, slice)
}
}
return esc.endpointCache.Get(hostName)
}
func getServiceNamespacedName(slice *v1.EndpointSlice) types.NamespacedName {
return types.NamespacedName{
Namespace: slice.GetNamespace(),
Name: serviceNameForEndpointSlice(slice.GetLabels()),
}
}
// endpointKey unique identifies an endpoint by IP and port name
// This is used for deduping endpoints across slices.
type endpointKey struct {
ip string
port string
}
type endpointSliceCache struct {
mu sync.RWMutex
endpointsByServiceAndSlice map[host.Name]map[string][]*model.IstioEndpoint
}
func newEndpointSliceCache() *endpointSliceCache {
out := &endpointSliceCache{
endpointsByServiceAndSlice: make(map[host.Name]map[string][]*model.IstioEndpoint),
}
return out
}
func (e *endpointSliceCache) Update(hostname host.Name, slice string, endpoints []*model.IstioEndpoint) {
e.mu.Lock()
defer e.mu.Unlock()
e.update(hostname, slice, endpoints)
}
func (e *endpointSliceCache) update(hostname host.Name, slice string, endpoints []*model.IstioEndpoint) {
if len(endpoints) == 0 {
delete(e.endpointsByServiceAndSlice[hostname], slice)
}
if _, f := e.endpointsByServiceAndSlice[hostname]; !f {
e.endpointsByServiceAndSlice[hostname] = make(map[string][]*model.IstioEndpoint)
}
// We will always overwrite. A conflict here means an endpoint is transitioning
// from one slice to another See
// https://github.com/kubernetes/website/blob/master/content/en/docs/concepts/services-networking/endpoint-slices.md#duplicate-endpoints
// In this case, we can always assume and update is fresh, although older slices
// we have not gotten updates may be stale; therefore we always take the new
// update.
e.endpointsByServiceAndSlice[hostname][slice] = endpoints
}
func (e *endpointSliceCache) Delete(hostname host.Name, slice string) {
e.mu.Lock()
defer e.mu.Unlock()
e.delete(hostname, slice)
}
func (e *endpointSliceCache) delete(hostname host.Name, slice string) {
delete(e.endpointsByServiceAndSlice[hostname], slice)
if len(e.endpointsByServiceAndSlice[hostname]) == 0 {
delete(e.endpointsByServiceAndSlice, hostname)
}
}
func (e *endpointSliceCache) Get(hostname host.Name) []*model.IstioEndpoint {
e.mu.RLock()
defer e.mu.RUnlock()
return e.get(hostname)
}
func (e *endpointSliceCache) get(hostname host.Name) []*model.IstioEndpoint {
var endpoints []*model.IstioEndpoint
found := sets.New[endpointKey]()
for _, eps := range e.endpointsByServiceAndSlice[hostname] {
for _, ep := range eps {
key := endpointKey{ep.Address, ep.ServicePortName}
if found.InsertContains(key) {
// This a duplicate. Update() already handles conflict resolution, so we don't
// need to pick the "right" one here.
continue
}
endpoints = append(endpoints, ep)
}
}
return endpoints
}
func (e *endpointSliceCache) Has(hostname host.Name) bool {
e.mu.RLock()
defer e.mu.RUnlock()
return e.has(hostname)
}
func (e *endpointSliceCache) has(hostname host.Name) bool {
_, found := e.endpointsByServiceAndSlice[hostname]
return found
}
func endpointSliceSelectorForService(name string) klabels.Selector {
return klabels.Set(map[string]string{
v1beta1.LabelServiceName: name,
}).AsSelectorPreValidated().Add(*endpointSliceRequirement)
}
func (esc *endpointSliceController) pushEDS(hostnames []host.Name, namespace string) {
shard := model.ShardKeyFromRegistry(esc.c)
// Even though we just read from the cache, we need the full lock to ensure pushEDS
// runs sequentially when `EnableK8SServiceSelectWorkloadEntries` is enabled. Otherwise,
// we may end up with eds updates can go out of order with workload entry updates causing
// incorrect endpoints. For regular endpoint updates, pushEDS is already serialized
// because the events are queued.
esc.endpointCache.mu.Lock()
defer esc.endpointCache.mu.Unlock()
for _, hostname := range hostnames {
endpoints := esc.endpointCache.get(hostname)
if features.EnableK8SServiceSelectWorkloadEntries {
svc := esc.c.GetService(hostname)
if svc != nil {
fep := esc.c.collectWorkloadInstanceEndpoints(svc)
endpoints = append(endpoints, fep...)
} else {
log.Debugf("Handle EDS endpoint: skip collecting workload entry endpoints, service %s/ has not been populated",
hostname)
}
}
esc.c.opts.XDSUpdater.EDSUpdate(shard, string(hostname), namespace, endpoints)
}
}
// getPod fetches a pod by name or IP address.
// A pod may be missing (nil) for two reasons:
// - It is an endpoint without an associated Pod. In this case, expectPod will be false.
// - It is an endpoint with an associate Pod, but its not found. In this case, expectPod will be true.
// this may happen due to eventually consistency issues, out of order events, etc. In this case, the caller
// should not precede with the endpoint, or inaccurate information would be sent which may have impacts on
// correctness and security.
//
// Note: this is only used by endpointslice controller
func getPod(c *Controller, ip string, ep *metav1.ObjectMeta, targetRef *corev1.ObjectReference, host host.Name) (*corev1.Pod, bool) {
var expectPod bool
pod := c.getPod(ip, ep.Namespace, targetRef)
if targetRef != nil && targetRef.Kind == "Pod" {
expectPod = true
if pod == nil {
c.registerEndpointResync(ep, ip, host)
}
}
return pod, expectPod
}
func (c *Controller) registerEndpointResync(ep *metav1.ObjectMeta, ip string, host host.Name) {
// This means, the endpoint event has arrived before pod event.
// This might happen because PodCache is eventually consistent.
log.Debugf("Endpoint without pod %s %s.%s", ip, ep.Name, ep.Namespace)
endpointsWithNoPods.Increment()
if c.opts.Metrics != nil {
c.opts.Metrics.AddMetric(model.EndpointNoPod, string(host), "", ip)
}
// Tell pod cache we want to queue the endpoint event when this pod arrives.
c.pods.queueEndpointEventOnPodArrival(config.NamespacedName(ep), ip)
}
// getPod fetches a pod by name or IP address.
// A pod may be missing (nil) for two reasons:
// * It is an endpoint without an associated Pod.
// * It is an endpoint with an associate Pod, but its not found.
func (c *Controller) getPod(ip string, namespace string, targetRef *corev1.ObjectReference) *corev1.Pod {
if targetRef != nil && targetRef.Kind == "Pod" {
key := types.NamespacedName{Name: targetRef.Name, Namespace: targetRef.Namespace}
pod := c.pods.getPodByKey(key)
return pod
}
// This means the endpoint is manually controlled
// We will want to lookup a pod to find metadata like service account, labels, etc. But for hostNetwork, we just get a raw IP,
// and the IP may be shared by many pods. Best we can do is guess.
pods := c.pods.getPodsByIP(ip)
for _, p := range pods {
if p.Namespace == namespace {
// Might not be right, but best we can do.
return p
}
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"time"
"k8s.io/apimachinery/pkg/runtime/schema"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/aggregate"
"istio.io/istio/pilot/pkg/serviceregistry/util/xdsfake"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/mesh"
kubelib "istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/kclient/clienttest"
filter "istio.io/istio/pkg/kube/namespace"
"istio.io/istio/pkg/queue"
"istio.io/istio/pkg/test"
"istio.io/istio/pkg/test/util/assert"
)
const (
defaultFakeDomainSuffix = "company.com"
)
type FakeControllerOptions struct {
Client kubelib.Client
CRDs []schema.GroupVersionResource
NetworksWatcher mesh.NetworksWatcher
MeshWatcher mesh.Watcher
ServiceHandler model.ServiceHandler
ClusterID cluster.ID
WatchedNamespaces string
DomainSuffix string
XDSUpdater model.XDSUpdater
DiscoveryNamespacesFilter filter.DiscoveryNamespacesFilter
Stop chan struct{}
SkipRun bool
ConfigController model.ConfigStoreController
ConfigCluster bool
SystemNamespace string
}
type FakeController struct {
*Controller
Endpoints *model.EndpointIndex
}
func NewFakeControllerWithOptions(t test.Failer, opts FakeControllerOptions) (*FakeController, *xdsfake.Updater) {
xdsUpdater := opts.XDSUpdater
var endpoints *model.EndpointIndex
if xdsUpdater == nil {
endpoints = model.NewEndpointIndex(model.DisabledCache{})
delegate := model.NewEndpointIndexUpdater(endpoints)
xdsUpdater = xdsfake.NewWithDelegate(delegate)
}
domainSuffix := defaultFakeDomainSuffix
if opts.DomainSuffix != "" {
domainSuffix = opts.DomainSuffix
}
if opts.Client == nil {
opts.Client = kubelib.NewFakeClient()
}
if opts.MeshWatcher == nil {
opts.MeshWatcher = mesh.NewFixedWatcher(&meshconfig.MeshConfig{})
}
meshServiceController := aggregate.NewController(aggregate.Options{MeshHolder: opts.MeshWatcher})
options := Options{
DomainSuffix: domainSuffix,
XDSUpdater: xdsUpdater,
Metrics: &model.Environment{},
MeshNetworksWatcher: opts.NetworksWatcher,
MeshWatcher: opts.MeshWatcher,
ClusterID: opts.ClusterID,
DiscoveryNamespacesFilter: opts.DiscoveryNamespacesFilter,
MeshServiceController: meshServiceController,
ConfigCluster: opts.ConfigCluster,
ConfigController: opts.ConfigController,
SystemNamespace: opts.SystemNamespace,
}
c := NewController(opts.Client, options)
meshServiceController.AddRegistry(c)
if opts.ServiceHandler != nil {
c.AppendServiceHandler(opts.ServiceHandler)
}
t.Cleanup(func() {
c.client.Shutdown()
})
if !opts.SkipRun {
t.Cleanup(func() {
assert.NoError(t, queue.WaitForClose(c.queue, time.Second*5))
})
}
c.stop = opts.Stop
if c.stop == nil {
// If we created the stop, clean it up. Otherwise, caller is responsible
c.stop = test.NewStop(t)
}
for _, crd := range opts.CRDs {
clienttest.MakeCRD(t, c.client, crd)
}
opts.Client.RunAndWait(c.stop)
var fx *xdsfake.Updater
if x, ok := xdsUpdater.(*xdsfake.Updater); ok {
fx = x
}
if !opts.SkipRun {
go c.Run(c.stop)
kubelib.WaitForCacheSync("test", c.stop, c.HasSynced)
}
return &FakeController{Controller: c, Endpoints: endpoints}, fx
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"context"
"strings"
"sync"
"golang.org/x/sync/errgroup"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"istio.io/api/annotation"
"istio.io/istio/pilot/pkg/config/kube/crdclient"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/keycertbundle"
"istio.io/istio/pilot/pkg/leaderelection"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/server"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/serviceregistry/serviceentry"
"istio.io/istio/pkg/backoff"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/collections"
kubelib "istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/multicluster"
"istio.io/istio/pkg/kube/namespace"
"istio.io/istio/pkg/webhooks"
)
const (
// Name of the webhook config in the config - no need to change it.
webhookName = "sidecar-injector.istio.io"
)
var _ multicluster.ClusterHandler = &Multicluster{}
type kubeController struct {
*Controller
workloadEntryController *serviceentry.Controller
}
// Multicluster structure holds the remote kube Controllers and multicluster specific attributes.
type Multicluster struct {
// serverID of this pilot instance used for leader election
serverID string
// options to use when creating kube controllers
opts Options
// client for reading remote-secrets to initialize multicluster registries
client kubernetes.Interface
s server.Instance
closing bool
serviceEntryController *serviceentry.Controller
configController model.ConfigStoreController
XDSUpdater model.XDSUpdater
m sync.Mutex // protects remoteKubeControllers
remoteKubeControllers map[cluster.ID]*kubeController
clusterLocal model.ClusterLocalProvider
startNsController bool
caBundleWatcher *keycertbundle.Watcher
revision string
// secretNamespace where we get cluster-access secrets
secretNamespace string
}
// NewMulticluster initializes data structure to store multicluster information
func NewMulticluster(
serverID string,
kc kubernetes.Interface,
secretNamespace string,
opts Options,
serviceEntryController *serviceentry.Controller,
configController model.ConfigStoreController,
caBundleWatcher *keycertbundle.Watcher,
revision string,
startNsController bool,
clusterLocal model.ClusterLocalProvider,
s server.Instance,
) *Multicluster {
remoteKubeController := make(map[cluster.ID]*kubeController)
mc := &Multicluster{
serverID: serverID,
opts: opts,
serviceEntryController: serviceEntryController,
configController: configController,
startNsController: startNsController,
caBundleWatcher: caBundleWatcher,
revision: revision,
XDSUpdater: opts.XDSUpdater,
remoteKubeControllers: remoteKubeController,
clusterLocal: clusterLocal,
secretNamespace: secretNamespace,
client: kc,
s: s,
}
return mc
}
func (m *Multicluster) Run(stopCh <-chan struct{}) error {
// Wait for server shutdown.
<-stopCh
return m.close()
}
func (m *Multicluster) close() error {
m.m.Lock()
m.closing = true
// Gather all the member clusters.
var clusterIDs []cluster.ID
for clusterID := range m.remoteKubeControllers {
clusterIDs = append(clusterIDs, clusterID)
}
m.m.Unlock()
// Remove all the clusters.
g, _ := errgroup.WithContext(context.Background())
for _, clusterID := range clusterIDs {
clusterID := clusterID
g.Go(func() error {
m.ClusterDeleted(clusterID)
return nil
})
}
return g.Wait()
}
// ClusterAdded is passed to the secret controller as a callback to be called
// when a remote cluster is added. This function needs to set up all the handlers
// to watch for resources being added, deleted or changed on remote clusters.
func (m *Multicluster) ClusterAdded(cluster *multicluster.Cluster, clusterStopCh <-chan struct{}) {
m.m.Lock()
kubeController, kubeRegistry, options, configCluster := m.addCluster(cluster)
if kubeController == nil {
// m.closing was true, nothing to do.
m.m.Unlock()
return
}
m.m.Unlock()
// clusterStopCh is a channel that will be closed when this cluster removed.
m.initializeCluster(cluster, kubeController, kubeRegistry, *options, configCluster, clusterStopCh)
}
// ClusterUpdated is passed to the secret controller as a callback to be called
// when a remote cluster is updated.
func (m *Multicluster) ClusterUpdated(cluster *multicluster.Cluster, stop <-chan struct{}) {
m.m.Lock()
m.deleteCluster(cluster.ID)
kubeController, kubeRegistry, options, configCluster := m.addCluster(cluster)
if kubeController == nil {
// m.closing was true, nothing to do.
m.m.Unlock()
return
}
m.m.Unlock()
// clusterStopCh is a channel that will be closed when this cluster removed.
m.initializeCluster(cluster, kubeController, kubeRegistry, *options, configCluster, stop)
}
// ClusterDeleted is passed to the secret controller as a callback to be called
// when a remote cluster is deleted. Also must clear the cache so remote resources
// are removed.
func (m *Multicluster) ClusterDeleted(clusterID cluster.ID) {
m.m.Lock()
m.deleteCluster(clusterID)
m.m.Unlock()
if m.XDSUpdater != nil {
m.XDSUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.ClusterUpdate)})
}
}
// addCluster adds cluster related resources and updates internal structures.
// This is not thread safe.
func (m *Multicluster) addCluster(cluster *multicluster.Cluster) (*kubeController, *Controller, *Options, bool) {
if m.closing {
return nil, nil, nil, false
}
client := cluster.Client
configCluster := m.opts.ClusterID == cluster.ID
options := m.opts
options.ClusterID = cluster.ID
if !configCluster {
options.SyncTimeout = features.RemoteClusterTimeout
}
// config cluster's DiscoveryNamespacesFilter is shared by both configController and serviceController
// it is initiated in bootstrap initMulticluster function, pass to service controller to update it.
// For other clusters, it should filter by its own cluster's namespace.
if !configCluster {
options.DiscoveryNamespacesFilter = nil
}
options.ConfigController = m.configController
log.Infof("Initializing Kubernetes service registry %q", options.ClusterID)
options.ConfigCluster = configCluster
kubeRegistry := NewController(client, options)
kubeController := &kubeController{
Controller: kubeRegistry,
}
m.remoteKubeControllers[cluster.ID] = kubeController
return kubeController, kubeRegistry, &options, configCluster
}
// initializeCluster initializes the cluster by setting various handlers.
func (m *Multicluster) initializeCluster(cluster *multicluster.Cluster, kubeController *kubeController, kubeRegistry *Controller,
options Options, configCluster bool, clusterStopCh <-chan struct{},
) {
client := cluster.Client
if m.serviceEntryController != nil && features.EnableServiceEntrySelectPods {
// Add an instance handler in the kubernetes registry to notify service entry store about pod events
kubeRegistry.AppendWorkloadHandler(m.serviceEntryController.WorkloadInstanceHandler)
}
if configCluster && m.serviceEntryController != nil && features.EnableEnhancedResourceScoping {
kubeRegistry.AppendNamespaceDiscoveryHandlers(m.serviceEntryController.NamespaceDiscoveryHandler)
}
// TODO implement deduping in aggregate registry to allow multiple k8s registries to handle WorkloadEntry
if features.EnableK8SServiceSelectWorkloadEntries {
if m.serviceEntryController != nil && configCluster {
// Add an instance handler in the service entry store to notify kubernetes about workload entry events
m.serviceEntryController.AppendWorkloadHandler(kubeRegistry.WorkloadInstanceHandler)
} else if features.WorkloadEntryCrossCluster {
// TODO only do this for non-remotes, can't guarantee CRDs in remotes (depends on https://github.com/istio/istio/pull/29824)
configStore := createWleConfigStore(client, m.revision, options)
kubeController.workloadEntryController = serviceentry.NewWorkloadEntryController(
configStore, options.XDSUpdater,
serviceentry.WithClusterID(cluster.ID),
serviceentry.WithNetworkIDCb(kubeRegistry.Network))
// Services can select WorkloadEntry from the same cluster. We only duplicate the Service to configure kube-dns.
kubeController.workloadEntryController.AppendWorkloadHandler(kubeRegistry.WorkloadInstanceHandler)
// ServiceEntry selects WorkloadEntry from remote cluster
kubeController.workloadEntryController.AppendWorkloadHandler(m.serviceEntryController.WorkloadInstanceHandler)
if features.EnableEnhancedResourceScoping {
kubeRegistry.AppendNamespaceDiscoveryHandlers(kubeController.workloadEntryController.NamespaceDiscoveryHandler)
}
m.opts.MeshServiceController.AddRegistryAndRun(kubeController.workloadEntryController, clusterStopCh)
go configStore.Run(clusterStopCh)
}
}
// namespacecontroller requires discoverySelectors only if EnableEnhancedResourceScoping feature flag is set.
var discoveryNamespacesFilter namespace.DiscoveryNamespacesFilter
if features.EnableEnhancedResourceScoping {
discoveryNamespacesFilter = kubeRegistry.opts.DiscoveryNamespacesFilter
}
// run after WorkloadHandler is added
m.opts.MeshServiceController.AddRegistryAndRun(kubeRegistry, clusterStopCh)
go func() {
var shouldLead bool
if !configCluster {
shouldLead = m.checkShouldLead(client, options.SystemNamespace, clusterStopCh)
log.Infof("should join leader-election for cluster %s: %t", cluster.ID, shouldLead)
}
if m.startNsController && (shouldLead || configCluster) {
// Block server exit on graceful termination of the leader controller.
m.s.RunComponentAsyncAndWait("namespace controller", func(_ <-chan struct{}) error {
log.Infof("joining leader-election for %s in %s on cluster %s",
leaderelection.NamespaceController, options.SystemNamespace, options.ClusterID)
election := leaderelection.
NewLeaderElectionMulticluster(options.SystemNamespace, m.serverID, leaderelection.NamespaceController, m.revision, !configCluster, client).
AddRunFunction(func(leaderStop <-chan struct{}) {
log.Infof("starting namespace controller for cluster %s", cluster.ID)
nc := NewNamespaceController(client, m.caBundleWatcher, discoveryNamespacesFilter)
// Start informers again. This fixes the case where informers for namespace do not start,
// as we create them only after acquiring the leader lock
// Note: stop here should be the overall pilot stop, NOT the leader election stop. We are
// basically lazy loading the informer, if we stop it when we lose the lock we will never
// recreate it again.
client.RunAndWait(clusterStopCh)
nc.Run(leaderStop)
})
election.Run(clusterStopCh)
return nil
})
}
// Set up injection webhook patching for remote clusters we are controlling.
// The config cluster has this patching set up elsewhere. We may eventually want to move it here.
// We can not use leader election for webhook patching because each revision needs to patch its own
// webhook.
if shouldLead && !configCluster && m.caBundleWatcher != nil {
// Patch injection webhook cert
// This requires RBAC permissions - a low-priv Istiod should not attempt to patch but rely on
// operator or CI/CD
if features.InjectionWebhookConfigName != "" {
log.Infof("initializing injection webhook cert patcher for cluster %s", cluster.ID)
patcher, err := webhooks.NewWebhookCertPatcher(client, m.revision, webhookName, m.caBundleWatcher)
if err != nil {
log.Errorf("could not initialize webhook cert patcher: %v", err)
} else {
go patcher.Run(clusterStopCh)
}
}
}
}()
// setting up the serviceexport controller if and only if it is turned on in the meshconfig.
if features.EnableMCSAutoExport {
log.Infof("joining leader-election for %s in %s on cluster %s",
leaderelection.ServiceExportController, options.SystemNamespace, options.ClusterID)
// Block server exit on graceful termination of the leader controller.
m.s.RunComponentAsyncAndWait("auto serviceexport controller", func(_ <-chan struct{}) error {
leaderelection.
NewLeaderElectionMulticluster(options.SystemNamespace, m.serverID, leaderelection.ServiceExportController, m.revision, !configCluster, client).
AddRunFunction(func(leaderStop <-chan struct{}) {
serviceExportController := newAutoServiceExportController(autoServiceExportOptions{
Client: client,
ClusterID: options.ClusterID,
DomainSuffix: options.DomainSuffix,
ClusterLocal: m.clusterLocal,
})
// Start informers again. This fixes the case where informers do not start,
// as we create them only after acquiring the leader lock
// Note: stop here should be the overall pilot stop, NOT the leader election stop. We are
// basically lazy loading the informer, if we stop it when we lose the lock we will never
// recreate it again.
client.RunAndWait(clusterStopCh)
serviceExportController.Run(leaderStop)
}).Run(clusterStopCh)
return nil
})
}
}
// checkShouldLead returns true if the caller should attempt leader election for a remote cluster.
func (m *Multicluster) checkShouldLead(client kubelib.Client, systemNamespace string, stop <-chan struct{}) bool {
var res bool
if features.ExternalIstiod {
b := backoff.NewExponentialBackOff(backoff.DefaultOption())
ctx, cancel := context.WithCancel(context.Background())
go func() {
select {
case <-stop:
cancel()
case <-ctx.Done():
}
}()
defer cancel()
_ = b.RetryWithContext(ctx, func() error {
namespace, err := client.Kube().CoreV1().Namespaces().Get(context.TODO(), systemNamespace, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return nil
}
return err
}
// found same system namespace on the remote cluster so check if we are a selected istiod to lead
istiodCluster, found := namespace.Annotations[annotation.TopologyControlPlaneClusters.Name]
if found {
localCluster := string(m.opts.ClusterID)
for _, cluster := range strings.Split(istiodCluster, ",") {
if cluster == "*" || cluster == localCluster {
res = true
return nil
}
}
}
return nil
})
}
return res
}
// deleteCluster deletes cluster resources and does not trigger push.
// This call is not thread safe.
func (m *Multicluster) deleteCluster(clusterID cluster.ID) {
m.opts.MeshServiceController.UnRegisterHandlersForCluster(clusterID)
m.opts.MeshServiceController.DeleteRegistry(clusterID, provider.Kubernetes)
kc, ok := m.remoteKubeControllers[clusterID]
if !ok {
log.Infof("cluster %s does not exist, maybe caused by invalid kubeconfig", clusterID)
return
}
if kc.workloadEntryController != nil {
m.opts.MeshServiceController.DeleteRegistry(clusterID, provider.External)
}
if err := kc.Cleanup(); err != nil {
log.Warnf("failed cleaning up services in %s: %v", clusterID, err)
}
delete(m.remoteKubeControllers, clusterID)
}
func createWleConfigStore(client kubelib.Client, revision string, opts Options) model.ConfigStoreController {
log.Infof("Creating WorkloadEntry only config store for %s", opts.ClusterID)
workloadEntriesSchemas := collection.NewSchemasBuilder().
MustAdd(collections.WorkloadEntry).
Build()
crdOpts := crdclient.Option{Revision: revision, DomainSuffix: opts.DomainSuffix, Identifier: "mc-workload-entry-controller"}
return crdclient.NewForSchemas(client, crdOpts, workloadEntriesSchemas)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/keycertbundle"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/inject"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/kube/namespace"
"istio.io/istio/security/pkg/k8s"
)
const (
// CACertNamespaceConfigMap is the name of the ConfigMap in each namespace storing the root cert of non-Kube CA.
CACertNamespaceConfigMap = "istio-ca-root-cert"
// maxRetries is the number of times a namespace will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the
// sequence of delays between successive queuing of a namespace.
//
// 5ms, 10ms, 20ms, 40ms, 80ms
maxRetries = 5
)
var configMapLabel = map[string]string{"istio.io/config": "true"}
// NamespaceController manages reconciles a configmap in each namespace with a desired set of data.
type NamespaceController struct {
caBundleWatcher *keycertbundle.Watcher
queue controllers.Queue
namespaces kclient.Client[*v1.Namespace]
configmaps kclient.Client[*v1.ConfigMap]
// if meshConfig.DiscoverySelectors specified, DiscoveryNamespacesFilter tracks the namespaces to be watched by this controller.
DiscoveryNamespacesFilter namespace.DiscoveryNamespacesFilter
}
// NewNamespaceController returns a pointer to a newly constructed NamespaceController instance.
func NewNamespaceController(kubeClient kube.Client, caBundleWatcher *keycertbundle.Watcher,
discoveryNamespacesFilter namespace.DiscoveryNamespacesFilter,
) *NamespaceController {
c := &NamespaceController{
caBundleWatcher: caBundleWatcher,
DiscoveryNamespacesFilter: discoveryNamespacesFilter,
}
c.queue = controllers.NewQueue("namespace controller",
controllers.WithReconciler(c.reconcileCACert),
controllers.WithMaxAttempts(maxRetries))
c.configmaps = kclient.NewFiltered[*v1.ConfigMap](kubeClient, kclient.Filter{
FieldSelector: "metadata.name=" + CACertNamespaceConfigMap,
ObjectFilter: c.GetFilter(),
})
c.namespaces = kclient.New[*v1.Namespace](kubeClient)
c.configmaps.AddEventHandler(controllers.FilteredObjectSpecHandler(c.queue.AddObject, func(o controllers.Object) bool {
// skip special kubernetes system namespaces
return !inject.IgnoredNamespaces.Contains(o.GetNamespace())
}))
if c.DiscoveryNamespacesFilter != nil {
c.DiscoveryNamespacesFilter.AddHandler(func(ns string, event model.Event) {
c.syncNamespace(ns)
})
} else {
c.namespaces.AddEventHandler(controllers.FilteredObjectSpecHandler(c.queue.AddObject, func(o controllers.Object) bool {
if features.InformerWatchNamespace != "" && features.InformerWatchNamespace != o.GetName() {
// We are only watching one namespace, and its not this one
return false
}
if inject.IgnoredNamespaces.Contains(o.GetName()) {
// skip special kubernetes system namespaces
return false
}
return true
}))
}
return c
}
func (nc *NamespaceController) GetFilter() namespace.DiscoveryFilter {
if nc.DiscoveryNamespacesFilter != nil {
return nc.DiscoveryNamespacesFilter.Filter
}
return nil
}
// Run starts the NamespaceController until a value is sent to stopCh.
func (nc *NamespaceController) Run(stopCh <-chan struct{}) {
if !kube.WaitForCacheSync("namespace controller", stopCh, nc.namespaces.HasSynced, nc.configmaps.HasSynced) {
return
}
go nc.startCaBundleWatcher(stopCh)
nc.queue.Run(stopCh)
controllers.ShutdownAll(nc.configmaps, nc.namespaces)
}
// startCaBundleWatcher listens for updates to the CA bundle and update cm in each namespace
func (nc *NamespaceController) startCaBundleWatcher(stop <-chan struct{}) {
id, watchCh := nc.caBundleWatcher.AddWatcher()
defer nc.caBundleWatcher.RemoveWatcher(id)
for {
select {
case <-watchCh:
for _, ns := range nc.namespaces.List("", labels.Everything()) {
nc.namespaceChange(ns)
}
case <-stop:
return
}
}
}
// reconcileCACert will reconcile the ca root cert configmap for the specified namespace
// If the configmap is not found, it will be created.
// If the namespace is filtered out by discovery selector, the configmap will be deleted.
func (nc *NamespaceController) reconcileCACert(o types.NamespacedName) error {
ns := o.Namespace
if ns == "" {
// For Namespace object, it will not have o.Namespace field set
ns = o.Name
}
if nc.DiscoveryNamespacesFilter != nil && !nc.DiscoveryNamespacesFilter.Filter(ns) {
// do not delete the configmap, maybe it is owned by another control plane
return nil
}
meta := metav1.ObjectMeta{
Name: CACertNamespaceConfigMap,
Namespace: ns,
Labels: configMapLabel,
}
return k8s.InsertDataToConfigMap(nc.configmaps, meta, nc.caBundleWatcher.GetCABundle())
}
// On namespace change, update the config map.
// If terminating, this will be skipped
func (nc *NamespaceController) namespaceChange(ns *v1.Namespace) {
if ns.Status.Phase != v1.NamespaceTerminating {
nc.syncNamespace(ns.Name)
}
}
func (nc *NamespaceController) syncNamespace(ns string) {
// skip special kubernetes system namespaces
if inject.IgnoredNamespaces.Contains(ns) {
return
}
nc.queue.Add(types.NamespacedName{Name: ns})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"net"
"strconv"
"sync"
"github.com/yl2chen/cidranger"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/gateway-api/apis/v1beta1"
"istio.io/api/label"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/kube"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/schema/gvr"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/slices"
)
type networkManager struct {
sync.RWMutex
// CIDR ranger based on path-compressed prefix trie
ranger cidranger.Ranger
clusterID cluster.ID
gatewayResourceClient kclient.Informer[*v1beta1.Gateway]
meshNetworksWatcher mesh.NetworksWatcher
// Network name for to be used when the meshNetworks fromRegistry nor network label on pod is specified
// This is defined by a topology.istio.io/network label on the system namespace.
network network.ID
// Network name for the registry as specified by the MeshNetworks configmap
networkFromMeshConfig network.ID
// map of svc fqdn to partially built network gateways; the actual gateways will be built from these into networkGatewaysBySvc
// this map just enumerates which networks/ports each Service is a gateway for
registryServiceNameGateways map[host.Name][]model.NetworkGateway
// gateways for each service
networkGatewaysBySvc map[host.Name]model.NetworkGatewaySet
// gateways from kubernetes Gateway resources
gatewaysFromResource map[types.UID]model.NetworkGatewaySet
// we don't want to discover gateways with class "istio-remote" from outside cluster's API servers.
discoverRemoteGatewayResources bool
// implements NetworkGatewaysWatcher; we need to call c.NotifyGatewayHandlers when our gateways change
model.NetworkGatewaysHandler
}
func initNetworkManager(c *Controller, options Options) *networkManager {
n := &networkManager{
clusterID: options.ClusterID,
meshNetworksWatcher: options.MeshNetworksWatcher,
// zero values are a workaround structcheck issue: https://github.com/golangci/golangci-lint/issues/826
ranger: nil,
network: "",
networkFromMeshConfig: "",
registryServiceNameGateways: make(map[host.Name][]model.NetworkGateway),
networkGatewaysBySvc: make(map[host.Name]model.NetworkGatewaySet),
gatewaysFromResource: make(map[types.UID]model.NetworkGatewaySet),
discoverRemoteGatewayResources: options.ConfigCluster,
}
// initialize the gateway resource client when any feature that uses it is enabled
if features.MultiNetworkGatewayAPI || features.EnableAmbientControllers {
n.gatewayResourceClient = kclient.NewDelayedInformer[*v1beta1.Gateway](c.client, gvr.KubernetesGateway, kubetypes.StandardInformer, kubetypes.Filter{})
}
if features.MultiNetworkGatewayAPI {
// conditionally register this handler
registerHandlers(c, n.gatewayResourceClient, "Gateways", n.handleGatewayResource, nil)
}
return n
}
// setNetworkFromNamespace sets network got from system namespace, returns whether it has changed
func (n *networkManager) setNetworkFromNamespace(ns *v1.Namespace) bool {
nw := ns.Labels[label.TopologyNetwork.Name]
n.Lock()
defer n.Unlock()
oldDefaultNetwork := n.network
n.network = network.ID(nw)
return oldDefaultNetwork != n.network
}
func (n *networkManager) networkFromSystemNamespace() network.ID {
n.RLock()
defer n.RUnlock()
return n.network
}
func (n *networkManager) networkFromMeshNetworks(endpointIP string) network.ID {
n.RLock()
defer n.RUnlock()
if n.networkFromMeshConfig != "" {
return n.networkFromMeshConfig
}
if n.ranger != nil {
ip := net.ParseIP(endpointIP)
if ip == nil {
return ""
}
entries, err := n.ranger.ContainingNetworks(ip)
if err != nil {
log.Errorf("error getting cidr ranger entry from endpoint ip %s", endpointIP)
return ""
}
if len(entries) > 1 {
log.Warnf("Found multiple networks CIDRs matching the endpoint IP: %s. Using the first match.", endpointIP)
}
if len(entries) > 0 {
return (entries[0].(namedRangerEntry)).name
}
}
return ""
}
// namedRangerEntry for holding network's CIDR and name
type namedRangerEntry struct {
name network.ID
network net.IPNet
}
// Network returns the IPNet for the network
func (n namedRangerEntry) Network() net.IPNet {
return n.network
}
// onNetworkChange is fired if the default network is changed either via the namespace label or mesh-networks
func (c *Controller) onNetworkChange() {
// the network for endpoints are computed when we process the events; this will fix the cache
// NOTE: this must run before the other network watcher handler that creates a force push
if err := c.syncPods(); err != nil {
log.Errorf("one or more errors force-syncing pods: %v", err)
}
if err := c.endpoints.initializeNamespace(metav1.NamespaceAll, true); err != nil {
log.Errorf("one or more errors force-syncing endpoints: %v", err)
}
c.reloadNetworkGateways()
// This is to ensure the ambient workloads are updated dynamically, aligning them with the current network settings.
// With this, the pod do not need to restart when the network configuration changes.
if features.EnableAmbientControllers {
c.syncAllWorkloadsForAmbient()
}
}
// reloadMeshNetworks will read the mesh networks configuration to setup
// fromRegistry and cidr based network lookups for this registry
func (n *networkManager) reloadMeshNetworks() {
n.Lock()
defer n.Unlock()
n.networkFromMeshConfig = ""
ranger := cidranger.NewPCTrieRanger()
n.networkFromMeshConfig = ""
n.registryServiceNameGateways = make(map[host.Name][]model.NetworkGateway)
meshNetworks := n.meshNetworksWatcher.Networks()
if meshNetworks == nil || len(meshNetworks.Networks) == 0 {
return
}
for id, v := range meshNetworks.Networks {
// track endpoints items from this registry are a part of this network
fromRegistry := false
for _, ep := range v.Endpoints {
if ep.GetFromCidr() != "" {
_, nw, err := net.ParseCIDR(ep.GetFromCidr())
if err != nil {
log.Warnf("unable to parse CIDR %q for network %s", ep.GetFromCidr(), id)
continue
}
rangerEntry := namedRangerEntry{
name: network.ID(id),
network: *nw,
}
_ = ranger.Insert(rangerEntry)
}
if ep.GetFromRegistry() != "" && cluster.ID(ep.GetFromRegistry()) == n.clusterID {
fromRegistry = true
}
}
// fromRegistry field specified this cluster
if fromRegistry {
// treat endpoints in this cluster as part of this network
if n.networkFromMeshConfig != "" {
log.Warnf("multiple networks specify %s in fromRegistry; endpoints from %s will continue to be treated as part of %s",
n.clusterID, n.clusterID, n.networkFromMeshConfig)
} else {
n.networkFromMeshConfig = network.ID(id)
}
// services in this registry matching the registryServiceName and port are part of this network
for _, gw := range v.Gateways {
if gwSvcName := gw.GetRegistryServiceName(); gwSvcName != "" {
svc := host.Name(gwSvcName)
n.registryServiceNameGateways[svc] = append(n.registryServiceNameGateways[svc], model.NetworkGateway{
Network: network.ID(id),
Cluster: n.clusterID,
Port: gw.GetPort(),
})
}
}
}
}
n.ranger = ranger
}
func (c *Controller) NetworkGateways() []model.NetworkGateway {
c.networkManager.RLock()
defer c.networkManager.RUnlock()
// Merge all the gateways into a single set to eliminate duplicates.
out := make(model.NetworkGatewaySet)
for _, gateways := range c.networkGatewaysBySvc {
out.Merge(gateways)
}
for _, gateways := range c.gatewaysFromResource {
out.Merge(gateways)
}
unsorted := out.UnsortedList()
return model.SortGateways(unsorted)
}
// extractGatewaysFromService checks if the service is a cross-network gateway
// and if it is, updates the controller's gateways.
func (c *Controller) extractGatewaysFromService(svc *model.Service) bool {
changed := c.extractGatewaysInner(svc)
if changed {
c.NotifyGatewayHandlers()
}
return changed
}
// reloadNetworkGateways performs extractGatewaysFromService for all services registered with the controller.
// It is called only by `onNetworkChange`.
// It iterates over all services, because mesh networks can be set with a service name.
func (c *Controller) reloadNetworkGateways() {
c.Lock()
gwsChanged := false
for _, svc := range c.servicesMap {
if c.extractGatewaysInner(svc) {
gwsChanged = true
break
}
}
c.Unlock()
if gwsChanged {
c.NotifyGatewayHandlers()
// TODO ConfigUpdate via gateway handler
c.opts.XDSUpdater.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.NetworksTrigger)})
}
}
// extractGatewaysInner performs the logic for extractGatewaysFromService without locking the controller.
// Returns true if any gateways changed.
func (n *networkManager) extractGatewaysInner(svc *model.Service) bool {
n.Lock()
defer n.Unlock()
previousGateways := n.networkGatewaysBySvc[svc.Hostname]
gateways := n.getGatewayDetails(svc)
// short circuit for most services.
if len(previousGateways) == 0 && len(gateways) == 0 {
return false
}
newGateways := make(model.NetworkGatewaySet)
// check if we have node port mappings
nodePortMap := make(map[uint32]uint32)
if svc.Attributes.ClusterExternalPorts != nil {
if npm, exists := svc.Attributes.ClusterExternalPorts[n.clusterID]; exists {
nodePortMap = npm
}
}
for _, addr := range svc.Attributes.ClusterExternalAddresses.GetAddressesFor(n.clusterID) {
for _, gw := range gateways {
// what we now have is a service port. If there is a mapping for cluster external ports,
// look it up and get the node port for the remote port
if nodePort, exists := nodePortMap[gw.Port]; exists {
gw.Port = nodePort
}
gw.Cluster = n.clusterID
gw.Addr = addr
newGateways.Insert(gw)
}
}
gatewaysChanged := !newGateways.Equals(previousGateways)
if len(newGateways) > 0 {
n.networkGatewaysBySvc[svc.Hostname] = newGateways
} else {
delete(n.networkGatewaysBySvc, svc.Hostname)
}
return gatewaysChanged
}
// getGatewayDetails returns gateways without the address populated, only the network and (unmapped) port for a given service.
func (n *networkManager) getGatewayDetails(svc *model.Service) []model.NetworkGateway {
// TODO should we start checking if svc's Ports contain the gateway port?
// label based gateways
// TODO label based gateways could support being the gateway for multiple networks
if nw := svc.Attributes.Labels[label.TopologyNetwork.Name]; nw != "" {
if gwPortStr := svc.Attributes.Labels[label.NetworkingGatewayPort.Name]; gwPortStr != "" {
if gwPort, err := strconv.Atoi(gwPortStr); err == nil {
return []model.NetworkGateway{{Port: uint32(gwPort), Network: network.ID(nw)}}
}
log.Warnf("could not parse %q for %s on %s/%s; defaulting to %d",
gwPortStr, label.NetworkingGatewayPort.Name, svc.Attributes.Namespace, svc.Attributes.Name, DefaultNetworkGatewayPort)
}
return []model.NetworkGateway{{Port: DefaultNetworkGatewayPort, Network: network.ID(nw)}}
}
// meshNetworks registryServiceName+fromRegistry
if gws, ok := n.registryServiceNameGateways[svc.Hostname]; ok {
out := append(make([]model.NetworkGateway, 0, len(gws)), gws...)
return out
}
return nil
}
// handleGateway resource adds a NetworkGateway for each combination of address and auto-passthrough listener
// discovering duplicates from the generated Service is not a huge concern as we de-duplicate in NetworkGateways
// which returns a set, although it's not totally efficient.
func (n *networkManager) handleGatewayResource(_ *v1beta1.Gateway, gw *v1beta1.Gateway, event model.Event) error {
if nw := gw.GetLabels()[label.TopologyNetwork.Name]; nw == "" {
return nil
}
// Gateway with istio-remote: only discover this from the config cluster
// this is a way to reference a gateway that lives in a place that this control plane
// won't have API server access. Nothing will be deployed for these Gateway resources.
if !n.discoverRemoteGatewayResources && gw.Spec.GatewayClassName == constants.RemoteGatewayClassName {
return nil
}
gatewaysChanged := false
n.Lock()
defer func() {
n.Unlock()
if gatewaysChanged {
n.NotifyGatewayHandlers()
}
}()
previousGateways := n.gatewaysFromResource[gw.UID]
if event == model.EventDelete {
gatewaysChanged = len(previousGateways) > 0
delete(n.gatewaysFromResource, gw.UID)
return nil
}
autoPassthrough := func(l v1beta1.Listener) bool {
return kube.IsAutoPassthrough(gw.GetLabels(), l)
}
base := model.NetworkGateway{
Network: network.ID(gw.GetLabels()[label.TopologyNetwork.Name]),
Cluster: n.clusterID,
}
newGateways := model.NetworkGatewaySet{}
for _, addr := range gw.Spec.Addresses {
if addr.Type == nil {
continue
}
if addrType := *addr.Type; addrType != v1beta1.IPAddressType && addrType != v1beta1.HostnameAddressType {
continue
}
for _, l := range slices.Filter(gw.Spec.Listeners, autoPassthrough) {
networkGateway := base
networkGateway.Addr = addr.Value
networkGateway.Port = uint32(l.Port)
newGateways.Insert(networkGateway)
}
}
n.gatewaysFromResource[gw.UID] = newGateways
if len(previousGateways) != len(newGateways) {
gatewaysChanged = true
return nil
}
gatewaysChanged = !newGateways.Equals(previousGateways)
if len(newGateways) > 0 {
n.gatewaysFromResource[gw.UID] = newGateways
} else {
delete(n.gatewaysFromResource, gw.UID)
}
return nil
}
func (n *networkManager) HasSynced() bool {
if n.gatewayResourceClient == nil {
return true
}
return n.gatewayResourceClient.HasSynced()
}
// updateServiceNodePortAddresses updates ClusterExternalAddresses for Services of nodePort type
func (c *Controller) updateServiceNodePortAddresses(svcs ...*model.Service) bool {
// node event, update all nodePort gateway services
if len(svcs) == 0 {
svcs = c.getNodePortGatewayServices()
}
// no nodePort gateway service found, no update
if len(svcs) == 0 {
return false
}
for _, svc := range svcs {
c.RLock()
nodeSelector := c.nodeSelectorsForServices[svc.Hostname]
c.RUnlock()
// update external address
var nodeAddresses []string
for _, n := range c.nodeInfoMap {
if nodeSelector.SubsetOf(n.labels) {
nodeAddresses = append(nodeAddresses, n.address)
}
}
if svc.Attributes.ClusterExternalAddresses == nil {
svc.Attributes.ClusterExternalAddresses = &model.AddressMap{}
}
svc.Attributes.ClusterExternalAddresses.SetAddressesFor(c.Cluster(), nodeAddresses)
// update gateways that use the service
c.extractGatewaysFromService(svc)
}
return true
}
// getNodePortServices returns nodePort type gateway service
func (c *Controller) getNodePortGatewayServices() []*model.Service {
c.RLock()
defer c.RUnlock()
out := make([]*model.Service, 0, len(c.nodeSelectorsForServices))
for hostname := range c.nodeSelectorsForServices {
svc := c.servicesMap[hostname]
if svc != nil {
out = append(out, svc)
}
}
return out
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"sync"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/util/sets"
)
// PodCache is an eventually consistent pod cache
type PodCache struct {
pods kclient.Client[*v1.Pod]
sync.RWMutex
// podsByIP maintains stable pod IP to name key mapping
// this allows us to retrieve the latest status by pod IP.
// This should only contain RUNNING or PENDING pods with an allocated IP.
podsByIP map[string]sets.Set[types.NamespacedName]
// IPByPods is a reverse map of podsByIP. This exists to allow us to prune stale entries in the
// pod cache if a pod changes IP.
IPByPods map[types.NamespacedName]string
// needResync is map of IP to endpoint namespace/name. This is used to requeue endpoint
// events when pod event comes. This typically happens when pod is not available
// in podCache when endpoint event comes.
needResync map[string]sets.Set[types.NamespacedName]
queueEndpointEvent func(types.NamespacedName)
c *Controller
}
func newPodCache(c *Controller, pods kclient.Client[*v1.Pod], queueEndpointEvent func(types.NamespacedName)) *PodCache {
out := &PodCache{
pods: pods,
c: c,
podsByIP: make(map[string]sets.Set[types.NamespacedName]),
IPByPods: make(map[types.NamespacedName]string),
needResync: make(map[string]sets.Set[types.NamespacedName]),
queueEndpointEvent: queueEndpointEvent,
}
return out
}
// Copied from kubernetes/kubernetes/pkg/controller/util/endpoint/controller_utils.go
//
// shouldPodBeInEndpoints returns true if a specified pod should be in an
// Endpoints or EndpointSlice resource. Terminating pods are not included.
func shouldPodBeInEndpoints(pod *v1.Pod) bool {
// "Terminal" describes when a Pod is complete (in a succeeded or failed phase).
// This is distinct from the "Terminating" condition which represents when a Pod
// is being terminated (metadata.deletionTimestamp is non nil).
if isPodPhaseTerminal(pod.Status.Phase) {
return false
}
if len(pod.Status.PodIP) == 0 && len(pod.Status.PodIPs) == 0 {
return false
}
if pod.DeletionTimestamp != nil {
return false
}
return true
}
// isPodPhaseTerminal returns true if the pod's phase is terminal.
func isPodPhaseTerminal(phase v1.PodPhase) bool {
return phase == v1.PodFailed || phase == v1.PodSucceeded
}
func IsPodRunning(pod *v1.Pod) bool {
return pod.Status.Phase == v1.PodRunning
}
// IsPodReady is copied from kubernetes/pkg/api/v1/pod/utils.go
func IsPodReady(pod *v1.Pod) bool {
return IsPodReadyConditionTrue(pod.Status)
}
// IsPodReadyConditionTrue returns true if a pod is ready; false otherwise.
func IsPodReadyConditionTrue(status v1.PodStatus) bool {
condition := GetPodReadyCondition(status)
return condition != nil && condition.Status == v1.ConditionTrue
}
func GetPodReadyCondition(status v1.PodStatus) *v1.PodCondition {
_, condition := GetPodCondition(&status, v1.PodReady)
return condition
}
func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
if status == nil {
return -1, nil
}
return GetPodConditionFromList(status.Conditions, conditionType)
}
// GetPodConditionFromList extracts the provided condition from the given list of condition and
// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
if conditions == nil {
return -1, nil
}
for i := range conditions {
if conditions[i].Type == conditionType {
return i, &conditions[i]
}
}
return -1, nil
}
func (pc *PodCache) labelFilter(old, cur *v1.Pod) bool {
// If labels updated, trigger proxy push
if cur.Status.PodIP != "" && !maps.Equal(old.Labels, cur.Labels) {
pc.proxyUpdates(cur.Status.PodIP)
}
// always continue calling pc.onEvent
return false
}
// onEvent updates the IP-based index (pc.podsByIP).
func (pc *PodCache) onEvent(_, pod *v1.Pod, ev model.Event) error {
ip := pod.Status.PodIP
// PodIP will be empty when pod is just created, but before the IP is assigned
// via UpdateStatus.
if len(ip) == 0 {
return nil
}
key := config.NamespacedName(pod)
switch ev {
case model.EventAdd:
if shouldPodBeInEndpoints(pod) && IsPodReady(pod) {
pc.update(ip, key)
} else {
return nil
}
case model.EventUpdate:
if !shouldPodBeInEndpoints(pod) || !IsPodReady(pod) {
// delete only if this pod was in the cache
if !pc.deleteIP(ip, key) {
return nil
}
ev = model.EventDelete
} else if shouldPodBeInEndpoints(pod) && IsPodReady(pod) {
pc.update(ip, key)
} else {
return nil
}
case model.EventDelete:
// delete only if this pod was in the cache,
// in most case it has already been deleted in `UPDATE` with `DeletionTimestamp` set.
if !pc.deleteIP(ip, key) {
return nil
}
}
pc.notifyWorkloadHandlers(pod, ev)
return nil
}
// notifyWorkloadHandlers fire workloadInstance handlers for pod
func (pc *PodCache) notifyWorkloadHandlers(pod *v1.Pod, ev model.Event) {
// if no workload handler registered, skip building WorkloadInstance
if len(pc.c.handlers.GetWorkloadHandlers()) == 0 {
return
}
// fire instance handles for workload
ep := NewEndpointBuilder(pc.c, pod).buildIstioEndpoint(pod.Status.PodIP, 0, "", model.AlwaysDiscoverable, model.Healthy)
workloadInstance := &model.WorkloadInstance{
Name: pod.Name,
Namespace: pod.Namespace,
Kind: model.PodKind,
Endpoint: ep,
PortMap: getPortMap(pod),
}
pc.c.handlers.NotifyWorkloadHandlers(workloadInstance, ev)
}
func getPortMap(pod *v1.Pod) map[string]uint32 {
pmap := map[string]uint32{}
for _, c := range pod.Spec.Containers {
for _, port := range c.Ports {
if port.Name == "" || port.Protocol != v1.ProtocolTCP {
continue
}
// First port wins, per Kubernetes (https://github.com/kubernetes/kubernetes/issues/54213)
if _, f := pmap[port.Name]; !f {
pmap[port.Name] = uint32(port.ContainerPort)
}
}
}
return pmap
}
// deleteIP returns true if the pod and ip are really deleted.
func (pc *PodCache) deleteIP(ip string, podKey types.NamespacedName) bool {
pc.Lock()
defer pc.Unlock()
if pc.podsByIP[ip].Contains(podKey) {
sets.DeleteCleanupLast(pc.podsByIP, ip, podKey)
delete(pc.IPByPods, podKey)
return true
}
return false
}
func (pc *PodCache) update(ip string, key types.NamespacedName) {
pc.Lock()
// if the pod has been cached, return
if pc.podsByIP[ip].Contains(key) {
pc.Unlock()
return
}
if current, f := pc.IPByPods[key]; f {
// The pod already exists, but with another IP Address. We need to clean up that
sets.DeleteCleanupLast(pc.podsByIP, current, key)
}
sets.InsertOrNew(pc.podsByIP, ip, key)
pc.IPByPods[key] = ip
if endpointsToUpdate, f := pc.needResync[ip]; f {
delete(pc.needResync, ip)
for epKey := range endpointsToUpdate {
pc.queueEndpointEvent(epKey)
}
endpointsPendingPodUpdate.Record(float64(len(pc.needResync)))
}
pc.Unlock()
pc.proxyUpdates(ip)
}
// queueEndpointEventOnPodArrival registers this endpoint and queues endpoint event
// when the corresponding pod arrives.
func (pc *PodCache) queueEndpointEventOnPodArrival(key types.NamespacedName, ip string) {
pc.Lock()
defer pc.Unlock()
sets.InsertOrNew(pc.needResync, ip, key)
endpointsPendingPodUpdate.Record(float64(len(pc.needResync)))
}
// endpointDeleted cleans up endpoint from resync endpoint list.
func (pc *PodCache) endpointDeleted(key types.NamespacedName, ip string) {
pc.Lock()
defer pc.Unlock()
sets.DeleteCleanupLast(pc.needResync, ip, key)
endpointsPendingPodUpdate.Record(float64(len(pc.needResync)))
}
func (pc *PodCache) proxyUpdates(ip string) {
if pc.c != nil && pc.c.opts.XDSUpdater != nil {
pc.c.opts.XDSUpdater.ProxyUpdate(pc.c.Cluster(), ip)
}
}
func (pc *PodCache) getPodKeys(addr string) []types.NamespacedName {
pc.RLock()
defer pc.RUnlock()
return pc.podsByIP[addr].UnsortedList()
}
// getPodByIp returns the pod or nil if pod not found or an error occurred
func (pc *PodCache) getPodsByIP(addr string) []*v1.Pod {
keys := pc.getPodKeys(addr)
if keys == nil {
return nil
}
res := make([]*v1.Pod, 0, len(keys))
for _, key := range keys {
p := pc.getPodByKey(key)
// Subtle race condition. getPodKeys is our cache over pods, while getPodByKey hits the informer cache.
// if these are out of sync, p may be nil (pod was deleted).
if p != nil {
res = append(res, p)
}
}
return res
}
// getPodByKey returns the pod by key
func (pc *PodCache) getPodByKey(key types.NamespacedName) *v1.Pod {
return pc.pods.Get(key.Name, key.Namespace)
}
// getPodByKey returns the pod of the proxy
func (pc *PodCache) getPodByProxy(proxy *model.Proxy) *v1.Pod {
var pod *v1.Pod
key := podKeyByProxy(proxy)
if key.Name != "" {
pod = pc.getPodByKey(key)
if pod != nil {
return pod
}
}
// only need to fetch the corresponding pod through the first IP, although there are multiple IP scenarios,
// because multiple ips belong to the same pod
proxyIP := proxy.IPAddresses[0]
// just in case the proxy ID is bad formatted
pods := pc.getPodsByIP(proxyIP)
switch len(pods) {
case 0:
return nil
case 1:
return pods[0]
default:
// This should only happen with hostNetwork pods, which cannot be proxy clients...
log.Errorf("unexpected: found multiple pods for proxy %v (%v)", proxy.ID, proxyIP)
// Try to handle it gracefully
for _, p := range pods {
// At least filter out wrong namespaces...
if proxy.ConfigNamespace != p.Namespace {
continue
}
return p
}
return nil
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
kubesr "istio.io/istio/pilot/pkg/serviceregistry/kube"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/kube/mcs"
)
type exportedService struct {
namespacedName types.NamespacedName
discoverability map[host.Name]string
}
// serviceExportCache reads Kubernetes Multi-Cluster Services (MCS) ServiceExport resources in the
// cluster and generates discoverability policies for the endpoints.
type serviceExportCache interface {
// EndpointDiscoverabilityPolicy returns the policy for Service endpoints residing within the current cluster.
EndpointDiscoverabilityPolicy(svc *model.Service) model.EndpointDiscoverabilityPolicy
// ExportedServices returns the list of services that are exported in this cluster. Used for debugging.
ExportedServices() []exportedService
Run(stop <-chan struct{})
// HasSynced indicates whether the kube createClient has synced for the watched resources.
HasSynced() bool
}
// newServiceExportCache creates a new serviceExportCache that observes the given cluster.
func newServiceExportCache(c *Controller) serviceExportCache {
if features.EnableMCSServiceDiscovery {
ec := &serviceExportCacheImpl{
Controller: c,
}
ec.serviceExports = kclient.NewDelayedInformer[controllers.Object](ec.client, mcs.ServiceExportGVR, kubetypes.DynamicInformer, kclient.Filter{
ObjectFilter: ec.opts.GetFilter(),
})
// Register callbacks for events.
registerHandlers(ec.Controller, ec.serviceExports, "ServiceExports", ec.onServiceExportEvent, nil)
// Set the discoverability policy for the clusterset.local host.
ec.clusterSetLocalPolicySelector = func(svc *model.Service) (policy model.EndpointDiscoverabilityPolicy) {
// If the service is exported in this cluster, allow the endpoints in this cluster to be discoverable
// anywhere in the mesh.
if ec.isExported(namespacedNameForService(svc)) {
return model.AlwaysDiscoverable
}
// Otherwise, endpoints are only discoverable from within the same cluster.
return model.DiscoverableFromSameCluster
}
// Set the discoverability policy for the cluster.local host.
if features.EnableMCSClusterLocal {
// MCS cluster.local mode is enabled. Allow endpoints for the cluster.local host to be
// discoverable only from within the same cluster.
ec.clusterLocalPolicySelector = func(svc *model.Service) (policy model.EndpointDiscoverabilityPolicy) {
return model.DiscoverableFromSameCluster
}
} else {
// MCS cluster.local mode is not enabled, so requests to the cluster.local host are not confined
// to the same cluster. Use the same discoverability policy as for clusterset.local.
ec.clusterLocalPolicySelector = ec.clusterSetLocalPolicySelector
}
return ec
}
// MCS Service discovery is disabled. Use a placeholder cache.
return disabledServiceExportCache{}
}
type discoverabilityPolicySelector func(*model.Service) model.EndpointDiscoverabilityPolicy
// serviceExportCache reads ServiceExport resources for a single cluster.
type serviceExportCacheImpl struct {
*Controller
serviceExports kclient.Untyped
// clusterLocalPolicySelector selects an appropriate EndpointDiscoverabilityPolicy for the cluster.local host.
clusterLocalPolicySelector discoverabilityPolicySelector
// clusterSetLocalPolicySelector selects an appropriate EndpointDiscoverabilityPolicy for the clusterset.local host.
clusterSetLocalPolicySelector discoverabilityPolicySelector
}
func (ec *serviceExportCacheImpl) onServiceExportEvent(_, obj controllers.Object, event model.Event) error {
se := controllers.Extract[*unstructured.Unstructured](obj)
if se == nil {
return nil
}
switch event {
case model.EventAdd, model.EventDelete:
ec.updateXDS(se)
default:
// Don't care about updates.
}
return nil
}
func (ec *serviceExportCacheImpl) updateXDS(se metav1.Object) {
for _, svc := range ec.servicesForNamespacedName(config.NamespacedName(se)) {
// Re-build the endpoints for this service with a new discoverability policy.
// Also update any internal caching.
endpoints := ec.buildEndpointsForService(svc, true)
shard := model.ShardKeyFromRegistry(ec)
ec.opts.XDSUpdater.EDSUpdate(shard, svc.Hostname.String(), se.GetNamespace(), endpoints)
}
}
func (ec *serviceExportCacheImpl) EndpointDiscoverabilityPolicy(svc *model.Service) model.EndpointDiscoverabilityPolicy {
if svc == nil {
// Default policy when the service doesn't exist.
return model.DiscoverableFromSameCluster
}
if strings.HasSuffix(svc.Hostname.String(), "."+constants.DefaultClusterSetLocalDomain) {
return ec.clusterSetLocalPolicySelector(svc)
}
return ec.clusterLocalPolicySelector(svc)
}
func (ec *serviceExportCacheImpl) isExported(name types.NamespacedName) bool {
return ec.serviceExports.Get(name.Name, name.Namespace) != nil
}
func (ec *serviceExportCacheImpl) ExportedServices() []exportedService {
// List all exports in this cluster.
exports := ec.serviceExports.List(metav1.NamespaceAll, klabels.Everything())
ec.RLock()
out := make([]exportedService, 0, len(exports))
for _, export := range exports {
uExport := export.(*unstructured.Unstructured)
es := exportedService{
namespacedName: config.NamespacedName(uExport),
discoverability: make(map[host.Name]string),
}
// Generate the map of all hosts for this service to their discoverability policies.
clusterLocalHost := kubesr.ServiceHostname(uExport.GetName(), uExport.GetNamespace(), ec.opts.DomainSuffix)
clusterSetLocalHost := serviceClusterSetLocalHostname(es.namespacedName)
for _, hostName := range []host.Name{clusterLocalHost, clusterSetLocalHost} {
if svc := ec.servicesMap[hostName]; svc != nil {
es.discoverability[hostName] = ec.EndpointDiscoverabilityPolicy(svc).String()
}
}
out = append(out, es)
}
ec.RUnlock()
return out
}
func (ec *serviceExportCacheImpl) Run(stop <-chan struct{}) {
}
func (ec *serviceExportCacheImpl) HasSynced() bool {
return ec.serviceExports.HasSynced()
}
type disabledServiceExportCache struct{}
var _ serviceExportCache = disabledServiceExportCache{}
func (c disabledServiceExportCache) EndpointDiscoverabilityPolicy(*model.Service) model.EndpointDiscoverabilityPolicy {
return model.AlwaysDiscoverable
}
func (c disabledServiceExportCache) Run(stop <-chan struct{}) {}
func (c disabledServiceExportCache) HasSynced() bool {
return true
}
func (c disabledServiceExportCache) ExportedServices() []exportedService {
// MCS is disabled - returning `nil`, which is semantically different here than an empty list.
return nil
}
func (c disabledServiceExportCache) HasCRDInstalled() bool {
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"sort"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/kube"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/kube/mcs"
"istio.io/istio/pkg/slices"
netutil "istio.io/istio/pkg/util/net"
"istio.io/istio/pkg/util/sets"
)
const (
mcsDomainSuffix = "." + constants.DefaultClusterSetLocalDomain
)
type importedService struct {
namespacedName types.NamespacedName
clusterSetVIP string
}
// serviceImportCache reads and processes Kubernetes Multi-Cluster Services (MCS) ServiceImport
// resources.
//
// An MCS controller is responsible for reading ServiceExport resources in one cluster and generating
// ServiceImport in all clusters of the ClusterSet (i.e. mesh). While the serviceExportCache reads
// ServiceExport to control the discoverability policy for individual endpoints, this controller
// reads ServiceImport in the cluster in order to extract the ClusterSet VIP and generate a
// synthetic service for the MCS host (i.e. clusterset.local). The aggregate.Controller will then
// merge together the MCS services from all the clusters, filling out the full map of Cluster IPs.
//
// The synthetic MCS service is a copy of the real k8s Service (e.g. cluster.local) with the same
// namespaced name, but with the hostname and VIPs changed to the appropriate ClusterSet values.
// The real k8s Service can live anywhere in the mesh and does not have to reside in the same
// cluster as the ServiceImport.
type serviceImportCache interface {
Run(stop <-chan struct{})
HasSynced() bool
ImportedServices() []importedService
}
// newServiceImportCache creates a new cache of ServiceImport resources in the cluster.
func newServiceImportCache(c *Controller) serviceImportCache {
if features.EnableMCSHost {
sic := &serviceImportCacheImpl{
Controller: c,
}
sic.serviceImports = kclient.NewDelayedInformer[controllers.Object](sic.client, mcs.ServiceImportGVR, kubetypes.DynamicInformer, kclient.Filter{
ObjectFilter: sic.opts.GetFilter(),
})
// Register callbacks for events.
registerHandlers(sic.Controller, sic.serviceImports, "ServiceImports", sic.onServiceImportEvent, nil)
sic.opts.MeshServiceController.AppendServiceHandlerForCluster(sic.Cluster(), sic.onServiceEvent)
return sic
}
// MCS Service discovery is disabled. Use a placeholder cache.
return disabledServiceImportCache{}
}
// serviceImportCacheImpl reads ServiceImport resources for a single cluster.
type serviceImportCacheImpl struct {
*Controller
serviceImports kclient.Untyped
}
// onServiceEvent is called when the controller receives an event for the kube Service (i.e. cluster.local).
// When this happens, we need to update the state of the associated synthetic MCS service.
func (ic *serviceImportCacheImpl) onServiceEvent(_, curr *model.Service, event model.Event) {
if strings.HasSuffix(curr.Hostname.String(), mcsDomainSuffix) {
// Ignore events for MCS services that were triggered by this controller.
return
}
// This method is called concurrently from each cluster's queue. Process it in `this` cluster's queue
// in order to synchronize event processing.
ic.queue.Push(func() error {
namespacedName := namespacedNameForService(curr)
// Lookup the previous MCS service if there was one.
mcsHost := serviceClusterSetLocalHostname(namespacedName)
prevMcsService := ic.GetService(mcsHost)
// Get the ClusterSet VIPs for this service in this cluster. Will only be populated if the
// service has a ServiceImport in this cluster.
vips := ic.getClusterSetIPs(namespacedName)
name := namespacedName.Name
ns := namespacedName.Namespace
if len(vips) == 0 || (event == model.EventDelete &&
ic.opts.MeshServiceController.GetService(kube.ServiceHostname(name, ns, ic.opts.DomainSuffix)) == nil) {
if prevMcsService != nil {
// There are no vips in this cluster. Just delete the MCS service now.
ic.deleteService(prevMcsService)
}
return nil
}
if prevMcsService != nil {
event = model.EventUpdate
} else {
event = model.EventAdd
}
mcsService := ic.genMCSService(curr, mcsHost, vips)
ic.addOrUpdateService(nil, nil, mcsService, event, false)
return nil
})
}
func (ic *serviceImportCacheImpl) onServiceImportEvent(_, obj controllers.Object, event model.Event) error {
si := controllers.Extract[*unstructured.Unstructured](obj)
if si == nil {
return nil
}
// We need a full push if the cluster VIP changes.
needsFullPush := false
// Get the updated MCS service.
mcsHost := serviceClusterSetLocalHostnameForKR(si)
mcsService := ic.GetService(mcsHost)
ips := GetServiceImportIPs(si)
if mcsService == nil {
if event == model.EventDelete || len(ips) == 0 {
// We never created the service. Nothing to delete.
return nil
}
// The service didn't exist prior. Treat it as an add.
event = model.EventAdd
// Create the MCS service, based on the cluster.local service. We get the merged, mesh-wide service
// from the aggregate controller so that we don't rely on the service existing in this cluster.
realService := ic.opts.MeshServiceController.GetService(kube.ServiceHostnameForKR(si, ic.opts.DomainSuffix))
if realService == nil {
log.Warnf("failed processing %s event for ServiceImport %s/%s in cluster %s. No matching service found in cluster",
event, si.GetNamespace(), si.GetName(), ic.Cluster())
return nil
}
// Create the MCS service from the cluster.local service.
mcsService = ic.genMCSService(realService, mcsHost, ips)
} else {
if event == model.EventDelete || len(ips) == 0 {
ic.deleteService(mcsService)
return nil
}
// The service already existed. Treat it as an update.
event = model.EventUpdate
mcsService = mcsService.DeepCopy()
if ic.updateIPs(mcsService, ips) {
needsFullPush = true
}
}
// Always force a rebuild of the endpoint cache in case this import caused
// a change to the discoverability policy.
ic.addOrUpdateService(nil, nil, mcsService, event, true)
// TODO: do we really need a full push, we should do it in `addOrUpdateService`.
if needsFullPush {
ic.doFullPush(mcsHost, si.GetNamespace())
}
return nil
}
func (ic *serviceImportCacheImpl) updateIPs(mcsService *model.Service, ips []string) (updated bool) {
prevIPs := mcsService.ClusterVIPs.GetAddressesFor(ic.Cluster())
if !slices.Equal(prevIPs, ips) {
// Update the VIPs
mcsService.ClusterVIPs.SetAddressesFor(ic.Cluster(), ips)
updated = true
}
return
}
func (ic *serviceImportCacheImpl) doFullPush(mcsHost host.Name, ns string) {
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: sets.New(model.ConfigKey{Kind: kind.ServiceEntry, Name: mcsHost.String(), Namespace: ns}),
Reason: model.NewReasonStats(model.ServiceUpdate),
}
ic.opts.XDSUpdater.ConfigUpdate(pushReq)
}
// GetServiceImportIPs returns the list of ClusterSet IPs for the ServiceImport.
// Exported for testing only.
func GetServiceImportIPs(si *unstructured.Unstructured) []string {
var ips []string
if spec, ok := si.Object["spec"].(map[string]any); ok {
if rawIPs, ok := spec["ips"].([]any); ok {
for _, rawIP := range rawIPs {
ip := rawIP.(string)
if netutil.IsValidIPAddress(ip) {
ips = append(ips, ip)
}
}
}
}
sort.Strings(ips)
return ips
}
// genMCSService generates an MCS service based on the given real k8s service. The list of vips must be non-empty.
func (ic *serviceImportCacheImpl) genMCSService(realService *model.Service, mcsHost host.Name, vips []string) *model.Service {
mcsService := realService.DeepCopy()
mcsService.Hostname = mcsHost
mcsService.DefaultAddress = vips[0]
mcsService.ClusterVIPs.Addresses = map[cluster.ID][]string{
ic.Cluster(): vips,
}
return mcsService
}
func (ic *serviceImportCacheImpl) getClusterSetIPs(name types.NamespacedName) []string {
si := ic.serviceImports.Get(name.Name, name.Namespace)
if si != nil {
return GetServiceImportIPs(si.(*unstructured.Unstructured))
}
return nil
}
func (ic *serviceImportCacheImpl) ImportedServices() []importedService {
sis := ic.serviceImports.List(metav1.NamespaceAll, klabels.Everything())
// Iterate over the ServiceImport resources in this cluster.
out := make([]importedService, 0, len(sis))
ic.RLock()
for _, si := range sis {
usi := si.(*unstructured.Unstructured)
info := importedService{
namespacedName: config.NamespacedName(usi),
}
// Lookup the synthetic MCS service.
hostName := serviceClusterSetLocalHostnameForKR(usi)
svc := ic.servicesMap[hostName]
if svc != nil {
if vips := svc.ClusterVIPs.GetAddressesFor(ic.Cluster()); len(vips) > 0 {
info.clusterSetVIP = vips[0]
}
}
out = append(out, info)
}
ic.RUnlock()
return out
}
func (ic *serviceImportCacheImpl) Run(stop <-chan struct{}) {
}
func (ic *serviceImportCacheImpl) HasSynced() bool {
return ic.serviceImports.HasSynced()
}
type disabledServiceImportCache struct{}
var _ serviceImportCache = disabledServiceImportCache{}
func (c disabledServiceImportCache) Run(stop <-chan struct{}) {}
func (c disabledServiceImportCache) HasSynced() bool {
return true
}
func (c disabledServiceImportCache) ImportedServices() []importedService {
// MCS is disabled - returning `nil`, which is semantically different here than an empty list.
return nil
}
func (c disabledServiceImportCache) HasCRDInstalled() bool {
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"encoding/json"
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/validation/field"
"istio.io/api/annotation"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
)
func getLabelValue(metadata metav1.ObjectMeta, label string, fallBackLabel string) string {
metaLabels := metadata.GetLabels()
val := metaLabels[label]
if val != "" {
return val
}
return metaLabels[fallBackLabel]
}
// Forked from Kubernetes k8s.io/kubernetes/pkg/api/v1/pod
// FindPort locates the container port for the given pod and portName. If the
// targetPort is a number, use that. If the targetPort is a string, look that
// string up in all named ports in all containers in the target pod. If no
// match is found, fail.
func FindPort(pod *v1.Pod, svcPort *v1.ServicePort) (int, error) {
portName := svcPort.TargetPort
switch portName.Type {
case intstr.String:
name := portName.StrVal
for _, container := range pod.Spec.Containers {
for _, port := range container.Ports {
if port.Name == name && port.Protocol == svcPort.Protocol {
return int(port.ContainerPort), nil
}
}
}
case intstr.Int:
return portName.IntValue(), nil
}
return 0, fmt.Errorf("no suitable port for manifest: %s", pod.UID)
}
// findPortFromMetadata resolves the TargetPort of a Service Port, by reading the Pod spec.
func findPortFromMetadata(svcPort v1.ServicePort, podPorts []model.PodPort) (int, error) {
target := svcPort.TargetPort
switch target.Type {
case intstr.String:
name := target.StrVal
for _, port := range podPorts {
if port.Name == name && port.Protocol == string(svcPort.Protocol) {
return port.ContainerPort, nil
}
}
case intstr.Int:
// For a direct reference we can just return the port number
return target.IntValue(), nil
}
return 0, fmt.Errorf("no matching port found for %+v", svcPort)
}
type serviceTargetPort struct {
// the mapped port number, or 0 if unspecified
num int
// the mapped port name
name string
// a bool indicating if the mapped port name was explicitly set on the TargetPort field, or inferred from k8s' port.Name
explicitName bool
}
func findServiceTargetPort(servicePort *model.Port, k8sService *v1.Service) serviceTargetPort {
for _, p := range k8sService.Spec.Ports {
// TODO(@hzxuzhonghu): check protocol as well as port
if p.Name == servicePort.Name || p.Port == int32(servicePort.Port) {
if p.TargetPort.Type == intstr.Int && p.TargetPort.IntVal > 0 {
return serviceTargetPort{num: int(p.TargetPort.IntVal), name: p.Name, explicitName: false}
}
return serviceTargetPort{num: 0, name: p.TargetPort.StrVal, explicitName: true}
}
}
// should never happen
log.Debugf("did not find matching target port for %v on service %s", servicePort, k8sService.Name)
return serviceTargetPort{num: 0, name: "", explicitName: false}
}
func getPodServices(allServices []*v1.Service, pod *v1.Pod) []*v1.Service {
var services []*v1.Service
for _, service := range allServices {
if labels.Instance(service.Spec.Selector).Match(pod.Labels) {
services = append(services, service)
}
}
return services
}
func getNodeSelectorsForService(svc *v1.Service) labels.Instance {
if nodeSelector := svc.Annotations[annotation.TrafficNodeSelector.Name]; nodeSelector != "" {
var nodeSelectorKV map[string]string
if err := json.Unmarshal([]byte(nodeSelector), &nodeSelectorKV); err != nil {
log.Debugf("failed to unmarshal node selector annotation value for service %s.%s: %v",
svc.Name, svc.Namespace, err)
}
return nodeSelectorKV
}
return nil
}
func nodeEquals(a, b kubernetesNode) bool {
return a.address == b.address && a.labels.Equals(b.labels)
}
func isNodePortGatewayService(svc *v1.Service) bool {
if svc == nil {
return false
}
_, ok := svc.Annotations[annotation.TrafficNodeSelector.Name]
return ok && svc.Spec.Type == v1.ServiceTypeNodePort
}
// Get the pod key of the proxy which can be used to get pod from the informer cache
func podKeyByProxy(proxy *model.Proxy) types.NamespacedName {
parts := strings.Split(proxy.ID, ".")
if len(parts) == 2 && proxy.Metadata.Namespace == parts[1] {
return types.NamespacedName{Name: parts[0], Namespace: parts[1]}
}
return types.NamespacedName{}
}
func namespacedNameForService(svc *model.Service) types.NamespacedName {
return types.NamespacedName{
Namespace: svc.Attributes.Namespace,
Name: svc.Attributes.Name,
}
}
// serviceClusterSetLocalHostname produces Kubernetes Multi-Cluster Services (MCS) ClusterSet FQDN for a k8s service
func serviceClusterSetLocalHostname(nn types.NamespacedName) host.Name {
return host.Name(nn.Name + "." + nn.Namespace + "." + "svc" + "." + constants.DefaultClusterSetLocalDomain)
}
// serviceClusterSetLocalHostnameForKR calls serviceClusterSetLocalHostname with the name and namespace of the given kubernetes resource.
func serviceClusterSetLocalHostnameForKR(obj metav1.Object) host.Name {
return serviceClusterSetLocalHostname(config.NamespacedName(obj))
}
func labelRequirement(key string, op selection.Operator, vals []string, opts ...field.PathOption) *klabels.Requirement {
out, err := klabels.NewRequirement(key, op, vals, opts...)
if err != nil {
panic(fmt.Sprintf("failed creating requirements for Service: %v", err))
}
return out
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"fmt"
"strings"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/gateway-api/apis/v1beta1"
"istio.io/api/annotation"
"istio.io/api/label"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/kube"
"istio.io/istio/pkg/config/visibility"
"istio.io/istio/pkg/spiffe"
"istio.io/istio/pkg/util/sets"
)
func convertPort(port corev1.ServicePort) *model.Port {
return &model.Port{
Name: port.Name,
Port: int(port.Port),
Protocol: kube.ConvertProtocol(port.Port, port.Name, port.Protocol, port.AppProtocol),
}
}
func ConvertService(svc corev1.Service, domainSuffix string, clusterID cluster.ID) *model.Service {
addrs := []string{constants.UnspecifiedIP}
resolution := model.ClientSideLB
externalName := ""
nodeLocal := false
if svc.Spec.Type == corev1.ServiceTypeExternalName && svc.Spec.ExternalName != "" {
externalName = svc.Spec.ExternalName
if features.EnableExternalNameAlias {
resolution = model.Alias
} else {
resolution = model.DNSLB
}
}
if svc.Spec.InternalTrafficPolicy != nil && *svc.Spec.InternalTrafficPolicy == corev1.ServiceInternalTrafficPolicyLocal {
nodeLocal = true
}
if svc.Spec.ClusterIP == corev1.ClusterIPNone { // headless services should not be load balanced
resolution = model.Passthrough
} else if svc.Spec.ClusterIP != "" {
addrs[0] = svc.Spec.ClusterIP
if len(svc.Spec.ClusterIPs) > 1 {
addrs = svc.Spec.ClusterIPs
}
}
ports := make([]*model.Port, 0, len(svc.Spec.Ports))
for _, port := range svc.Spec.Ports {
ports = append(ports, convertPort(port))
}
var exportTo sets.Set[visibility.Instance]
serviceaccounts := make([]string, 0)
if svc.Annotations[annotation.AlphaCanonicalServiceAccounts.Name] != "" {
serviceaccounts = append(serviceaccounts, strings.Split(svc.Annotations[annotation.AlphaCanonicalServiceAccounts.Name], ",")...)
}
if svc.Annotations[annotation.AlphaKubernetesServiceAccounts.Name] != "" {
for _, ksa := range strings.Split(svc.Annotations[annotation.AlphaKubernetesServiceAccounts.Name], ",") {
serviceaccounts = append(serviceaccounts, kubeToIstioServiceAccount(ksa, svc.Namespace))
}
}
if svc.Annotations[annotation.NetworkingExportTo.Name] != "" {
namespaces := strings.Split(svc.Annotations[annotation.NetworkingExportTo.Name], ",")
exportTo = sets.NewWithLength[visibility.Instance](len(namespaces))
for _, ns := range namespaces {
exportTo.Insert(visibility.Instance(ns))
}
}
istioService := &model.Service{
Hostname: ServiceHostname(svc.Name, svc.Namespace, domainSuffix),
ClusterVIPs: model.AddressMap{
Addresses: map[cluster.ID][]string{
clusterID: addrs,
},
},
Ports: ports,
DefaultAddress: addrs[0],
ServiceAccounts: serviceaccounts,
MeshExternal: len(externalName) > 0,
Resolution: resolution,
CreationTime: svc.CreationTimestamp.Time,
ResourceVersion: svc.ResourceVersion,
Attributes: model.ServiceAttributes{
ServiceRegistry: provider.Kubernetes,
Name: svc.Name,
Namespace: svc.Namespace,
Labels: svc.Labels,
ExportTo: exportTo,
LabelSelectors: svc.Spec.Selector,
},
}
switch svc.Spec.Type {
case corev1.ServiceTypeNodePort:
if _, ok := svc.Annotations[annotation.TrafficNodeSelector.Name]; !ok {
// only do this for istio ingress-gateway services
break
}
// store the service port to node port mappings
portMap := make(map[uint32]uint32)
for _, p := range svc.Spec.Ports {
portMap[uint32(p.Port)] = uint32(p.NodePort)
}
istioService.Attributes.ClusterExternalPorts = map[cluster.ID]map[uint32]uint32{clusterID: portMap}
// address mappings will be done elsewhere
case corev1.ServiceTypeLoadBalancer:
if len(svc.Status.LoadBalancer.Ingress) > 0 {
var lbAddrs []string
for _, ingress := range svc.Status.LoadBalancer.Ingress {
if len(ingress.IP) > 0 {
lbAddrs = append(lbAddrs, ingress.IP)
} else if len(ingress.Hostname) > 0 {
// DO NOT resolve the DNS here. In environments like AWS, the ELB hostname
// does not have a repeatable DNS address and IPs resolved at an earlier point
// in time may not work. So, when we get just hostnames instead of IPs, we need
// to smartly switch from EDS to strict_dns rather than doing the naive thing of
// resolving the DNS name and hoping the resolution is one-time task.
lbAddrs = append(lbAddrs, ingress.Hostname)
}
}
if len(lbAddrs) > 0 {
if istioService.Attributes.ClusterExternalAddresses == nil {
istioService.Attributes.ClusterExternalAddresses = &model.AddressMap{}
}
istioService.Attributes.ClusterExternalAddresses.SetAddressesFor(clusterID, lbAddrs)
}
}
}
istioService.Attributes.Type = string(svc.Spec.Type)
istioService.Attributes.ExternalName = externalName
istioService.Attributes.NodeLocal = nodeLocal
if len(svc.Spec.ExternalIPs) > 0 {
if istioService.Attributes.ClusterExternalAddresses == nil {
istioService.Attributes.ClusterExternalAddresses = &model.AddressMap{}
}
istioService.Attributes.ClusterExternalAddresses.AddAddressesFor(clusterID, svc.Spec.ExternalIPs)
}
return istioService
}
func ExternalNameEndpoints(svc *model.Service) []*model.IstioEndpoint {
if svc.Attributes.ExternalName == "" {
return nil
}
out := make([]*model.IstioEndpoint, 0, len(svc.Ports))
discoverabilityPolicy := model.AlwaysDiscoverable
if features.EnableMCSServiceDiscovery {
// MCS spec does not allow export of external name services.
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-multicluster/1645-multi-cluster-services-api#exporting-services.
discoverabilityPolicy = model.DiscoverableFromSameCluster
}
for _, portEntry := range svc.Ports {
out = append(out, &model.IstioEndpoint{
Address: svc.Attributes.ExternalName,
EndpointPort: uint32(portEntry.Port),
ServicePortName: portEntry.Name,
Labels: svc.Attributes.Labels,
DiscoverabilityPolicy: discoverabilityPolicy,
})
}
return out
}
// ServiceHostname produces FQDN for a k8s service
func ServiceHostname(name, namespace, domainSuffix string) host.Name {
return host.Name(name + "." + namespace + "." + "svc" + "." + domainSuffix) // Format: "%s.%s.svc.%s"
}
// ServiceHostnameForKR calls ServiceHostname with the name and namespace of the given kubernetes resource.
func ServiceHostnameForKR(obj metav1.Object, domainSuffix string) host.Name {
return ServiceHostname(obj.GetName(), obj.GetNamespace(), domainSuffix)
}
// kubeToIstioServiceAccount converts a K8s service account to an Istio service account
func kubeToIstioServiceAccount(saname string, ns string) string {
return spiffe.MustGenSpiffeURI(ns, saname)
}
// SecureNamingSAN creates the secure naming used for SAN verification from pod metadata
func SecureNamingSAN(pod *corev1.Pod) string {
return spiffe.MustGenSpiffeURI(pod.Namespace, pod.Spec.ServiceAccountName)
}
// PodTLSMode returns the tls mode associated with the pod if pod has been injected with sidecar
func PodTLSMode(pod *corev1.Pod) string {
if pod == nil {
return model.DisabledTLSModeLabel
}
return model.GetTLSModeFromEndpointLabels(pod.Labels)
}
// IsAutoPassthrough determines if a listener should use auto passthrough mode. This is used for
// multi-network. In the Istio API, this is an explicit tls.Mode. However, this mode is not part of
// the gateway-api, and leaks implementation details. We already have an API to declare a Gateway as
// a multi-network gateway, so we will use this as a signal.
// A user who wishes to expose multi-network connectivity should create a listener named "tls-passthrough"
// with TLS.Mode Passthrough.
// For some backwards compatibility, we assume any listener with TLS specified and a port matching
// 15443 (or the label-override for gateway port) is auto-passthrough as well.
func IsAutoPassthrough(gwLabels map[string]string, l v1beta1.Listener) bool {
if l.TLS == nil {
return false
}
if hasListenerMode(l, constants.ListenerModeAutoPassthrough) {
return true
}
_, networkSet := gwLabels[label.TopologyNetwork.Name]
if !networkSet {
return false
}
expectedPort := "15443"
if port, f := gwLabels[label.NetworkingGatewayPort.Name]; f {
expectedPort = port
}
return fmt.Sprint(l.Port) == expectedPort
}
func hasListenerMode(l v1beta1.Listener, mode string) bool {
// TODO if we add a hybrid mode for detecting HBONE/passthrough, also check that here
return l.TLS != nil && l.TLS.Options != nil && string(l.TLS.Options[constants.ListenerModeOption]) == mode
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package memory
import (
"fmt"
"net/netip"
"sync"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/workloadapi"
"istio.io/istio/pkg/workloadapi/security"
)
// ServiceDiscovery is a mock discovery interface
type ServiceDiscovery struct {
services map[host.Name]*model.Service
handlers model.ControllerHandlers
networkGateways []model.NetworkGateway
model.NetworkGatewaysHandler
// EndpointShards table. Key is the fqdn of the service, ':', port
instancesByPortNum map[string][]*model.ServiceInstance
instancesByPortName map[string][]*model.ServiceInstance
// Used by GetProxyServiceInstance, used to configure inbound (list of services per IP)
// We generally expect a single instance - conflicting services need to be reported.
ip2instance map[string][]*model.ServiceInstance
WantGetProxyServiceTargets []model.ServiceTarget
InstancesError error
Controller model.Controller
ClusterID cluster.ID
// Used by GetProxyWorkloadLabels
ip2workloadLabels map[string]labels.Instance
addresses map[string]*model.AddressInfo
// XDSUpdater will push EDS changes to the ADS model.
XdsUpdater model.XDSUpdater
// Single mutex for now - it's for debug only.
mutex sync.Mutex
}
var (
_ model.Controller = &ServiceDiscovery{}
_ model.ServiceDiscovery = &ServiceDiscovery{}
)
// NewServiceDiscovery builds an in-memory ServiceDiscovery
func NewServiceDiscovery(services ...*model.Service) *ServiceDiscovery {
svcs := map[host.Name]*model.Service{}
for _, svc := range services {
svcs[svc.Hostname] = svc
}
return &ServiceDiscovery{
services: svcs,
instancesByPortNum: map[string][]*model.ServiceInstance{},
instancesByPortName: map[string][]*model.ServiceInstance{},
ip2instance: map[string][]*model.ServiceInstance{},
ip2workloadLabels: map[string]labels.Instance{},
addresses: map[string]*model.AddressInfo{},
}
}
func (sd *ServiceDiscovery) shardKey() model.ShardKey {
return model.ShardKey{Cluster: sd.ClusterID, Provider: provider.Mock}
}
func (sd *ServiceDiscovery) AddWorkload(ip string, labels labels.Instance) {
sd.ip2workloadLabels[ip] = labels
}
// AddHTTPService is a helper to add a service of type http, named 'http-main', with the
// specified vip and port.
func (sd *ServiceDiscovery) AddHTTPService(name, vip string, port int) {
sd.AddService(&model.Service{
Hostname: host.Name(name),
DefaultAddress: vip,
Ports: model.PortList{
{
Name: "http-main",
Port: port,
Protocol: protocol.HTTP,
},
},
})
}
// AddService adds an in-memory service and notifies
func (sd *ServiceDiscovery) AddService(svc *model.Service) {
sd.mutex.Lock()
svc.Attributes.ServiceRegistry = provider.Mock
var old *model.Service
event := model.EventAdd
if o, f := sd.services[svc.Hostname]; f {
old = o
event = model.EventUpdate
}
sd.services[svc.Hostname] = svc
if sd.XdsUpdater != nil {
sd.XdsUpdater.SvcUpdate(sd.shardKey(), string(svc.Hostname), svc.Attributes.Namespace, model.EventAdd)
}
sd.handlers.NotifyServiceHandlers(old, svc, event)
sd.mutex.Unlock()
}
// RemoveService removes an in-memory service.
func (sd *ServiceDiscovery) RemoveService(name host.Name) {
sd.mutex.Lock()
svc := sd.services[name]
delete(sd.services, name)
if sd.XdsUpdater != nil {
sd.XdsUpdater.SvcUpdate(sd.shardKey(), string(svc.Hostname), svc.Attributes.Namespace, model.EventDelete)
}
sd.handlers.NotifyServiceHandlers(nil, svc, model.EventDelete)
sd.mutex.Unlock()
}
// AddInstance adds an in-memory instance and notifies the XDS updater
func (sd *ServiceDiscovery) AddInstance(instance *model.ServiceInstance) {
sd.mutex.Lock()
defer sd.mutex.Unlock()
service := instance.Service.Hostname
svc := sd.services[service]
if svc == nil {
return
}
if instance.Endpoint.ServicePortName == "" {
instance.Endpoint.ServicePortName = instance.ServicePort.Name
}
instance.Service = svc
sd.ip2instance[instance.Endpoint.Address] = append(sd.ip2instance[instance.Endpoint.Address], instance)
key := fmt.Sprintf("%s:%d", service, instance.ServicePort.Port)
instanceList := sd.instancesByPortNum[key]
sd.instancesByPortNum[key] = append(instanceList, instance)
key = fmt.Sprintf("%s:%s", service, instance.ServicePort.Name)
instanceList = sd.instancesByPortName[key]
sd.instancesByPortName[key] = append(instanceList, instance)
eps := make([]*model.IstioEndpoint, 0, len(sd.instancesByPortName[key]))
for _, port := range svc.Ports {
key := fmt.Sprintf("%s:%s", service, port.Name)
for _, i := range sd.instancesByPortName[key] {
eps = append(eps, i.Endpoint)
}
}
if sd.XdsUpdater != nil {
sd.XdsUpdater.EDSUpdate(sd.shardKey(), string(service), svc.Attributes.Namespace, eps)
}
}
// AddEndpoint adds an endpoint to a service.
func (sd *ServiceDiscovery) AddEndpoint(service host.Name, servicePortName string, servicePort int, address string, port int) *model.ServiceInstance {
instance := &model.ServiceInstance{
Service: &model.Service{Hostname: service},
Endpoint: &model.IstioEndpoint{
Address: address,
ServicePortName: servicePortName,
EndpointPort: uint32(port),
},
ServicePort: &model.Port{
Name: servicePortName,
Port: servicePort,
Protocol: protocol.HTTP,
},
}
sd.AddInstance(instance)
return instance
}
// SetEndpoints update the list of endpoints for a service, similar with K8S controller.
func (sd *ServiceDiscovery) SetEndpoints(service string, namespace string, endpoints []*model.IstioEndpoint) {
sh := host.Name(service)
sd.mutex.Lock()
svc := sd.services[sh]
if svc == nil {
sd.mutex.Unlock()
return
}
if svc.Attributes.Namespace != namespace {
log.Errorf("Service namespace %q != namespace %q", svc.Attributes.Namespace, namespace)
}
// remove old entries
for k, v := range sd.ip2instance {
if len(v) > 0 && v[0].Service.Hostname == sh {
delete(sd.ip2instance, k)
}
}
for k, v := range sd.instancesByPortNum {
if len(v) > 0 && v[0].Service.Hostname == sh {
delete(sd.instancesByPortNum, k)
}
}
for k, v := range sd.instancesByPortName {
if len(v) > 0 && v[0].Service.Hostname == sh {
delete(sd.instancesByPortName, k)
}
}
for _, e := range endpoints {
// servicePortName string, servicePort int, address string, port int
p, _ := svc.Ports.Get(e.ServicePortName)
instance := &model.ServiceInstance{
Service: svc,
ServicePort: &model.Port{
Name: e.ServicePortName,
Port: p.Port,
Protocol: p.Protocol,
},
Endpoint: e,
}
sd.ip2instance[instance.Endpoint.Address] = []*model.ServiceInstance{instance}
key := fmt.Sprintf("%s:%d", service, instance.ServicePort.Port)
instanceList := sd.instancesByPortNum[key]
sd.instancesByPortNum[key] = append(instanceList, instance)
key = fmt.Sprintf("%s:%s", service, instance.ServicePort.Name)
instanceList = sd.instancesByPortName[key]
sd.instancesByPortName[key] = append(instanceList, instance)
}
if sd.XdsUpdater != nil {
sd.XdsUpdater.EDSUpdate(sd.shardKey(), service, namespace, endpoints)
}
sd.mutex.Unlock()
}
// Services implements discovery interface
// Each call to Services() should return a list of new *model.Service
func (sd *ServiceDiscovery) Services() []*model.Service {
sd.mutex.Lock()
defer sd.mutex.Unlock()
out := make([]*model.Service, 0, len(sd.services))
for _, service := range sd.services {
out = append(out, service)
}
return out
}
// GetService implements discovery interface
// Each call to GetService() should return a new *model.Service
func (sd *ServiceDiscovery) GetService(hostname host.Name) *model.Service {
sd.mutex.Lock()
defer sd.mutex.Unlock()
return sd.services[hostname]
}
// GetProxyServiceTargets returns service instances associated with a node, resulting in
// 'in' services.
func (sd *ServiceDiscovery) GetProxyServiceTargets(node *model.Proxy) []model.ServiceTarget {
sd.mutex.Lock()
defer sd.mutex.Unlock()
if sd.WantGetProxyServiceTargets != nil {
return sd.WantGetProxyServiceTargets
}
out := make([]model.ServiceTarget, 0)
for _, ip := range node.IPAddresses {
si, found := sd.ip2instance[ip]
if found {
out = append(out, slices.Map(si, model.ServiceInstanceToTarget)...)
}
}
return out
}
func (sd *ServiceDiscovery) GetProxyWorkloadLabels(proxy *model.Proxy) labels.Instance {
sd.mutex.Lock()
defer sd.mutex.Unlock()
for _, ip := range proxy.IPAddresses {
if l, found := sd.ip2workloadLabels[ip]; found {
return l
}
}
return nil
}
func (sd *ServiceDiscovery) AddGateways(gws ...model.NetworkGateway) {
sd.networkGateways = append(sd.networkGateways, gws...)
sd.NotifyGatewayHandlers()
}
func (sd *ServiceDiscovery) NetworkGateways() []model.NetworkGateway {
return sd.networkGateways
}
func (sd *ServiceDiscovery) MCSServices() []model.MCSServiceInfo {
return nil
}
// Memory does not support workload handlers; everything is done in terms of instances
func (sd *ServiceDiscovery) AppendWorkloadHandler(func(*model.WorkloadInstance, model.Event)) {}
// AppendServiceHandler appends a service handler to the controller
func (sd *ServiceDiscovery) AppendServiceHandler(f model.ServiceHandler) {
sd.handlers.AppendServiceHandler(f)
}
// Run will run the controller
func (sd *ServiceDiscovery) Run(<-chan struct{}) {}
// HasSynced always returns true
func (sd *ServiceDiscovery) HasSynced() bool { return true }
func (sd *ServiceDiscovery) AddressInformation(requests sets.String) ([]*model.AddressInfo, sets.String) {
sd.mutex.Lock()
defer sd.mutex.Unlock()
if len(requests) == 0 {
return maps.Values(sd.addresses), nil
}
var infos []*model.AddressInfo
removed := sets.String{}
for req := range requests {
if _, found := sd.addresses[req]; !found {
removed.Insert(req)
} else {
infos = append(infos, sd.addresses[req])
}
}
return infos, removed
}
func (sd *ServiceDiscovery) AdditionalPodSubscriptions(
*model.Proxy,
sets.String,
sets.String,
) sets.String {
return nil
}
func (sd *ServiceDiscovery) Policies(sets.Set[model.ConfigKey]) []*security.Authorization {
return nil
}
func (sd *ServiceDiscovery) Waypoint(model.WaypointScope) []netip.Addr {
return nil
}
func (sd *ServiceDiscovery) WorkloadsForWaypoint(scope model.WaypointScope) []*model.WorkloadInfo {
return nil
}
func (sd *ServiceDiscovery) AddWorkloadInfo(infos ...*model.WorkloadInfo) {
sd.mutex.Lock()
defer sd.mutex.Unlock()
for _, info := range infos {
sd.addresses[info.ResourceName()] = workloadToAddressInfo(info.Workload)
}
}
func (sd *ServiceDiscovery) RemoveWorkloadInfo(info *model.WorkloadInfo) {
sd.mutex.Lock()
defer sd.mutex.Unlock()
delete(sd.addresses, info.ResourceName())
}
func (sd *ServiceDiscovery) AddServiceInfo(infos ...*model.ServiceInfo) {
sd.mutex.Lock()
defer sd.mutex.Unlock()
for _, info := range infos {
sd.addresses[info.ResourceName()] = serviceToAddressInfo(info.Service)
}
}
func (sd *ServiceDiscovery) RemoveServiceInfo(info *model.ServiceInfo) {
sd.mutex.Lock()
defer sd.mutex.Unlock()
delete(sd.addresses, info.ResourceName())
}
func workloadToAddressInfo(w *workloadapi.Workload) *model.AddressInfo {
return &model.AddressInfo{
Address: &workloadapi.Address{
Type: &workloadapi.Address_Workload{
Workload: w,
},
},
}
}
func serviceToAddressInfo(s *workloadapi.Service) *model.AddressInfo {
return &model.AddressInfo{
Address: &workloadapi.Address{
Type: &workloadapi.Address_Service{
Service: s,
},
},
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package provider
// ID defines underlying platform supporting service registry
type ID string
const (
// Mock is a service registry that contains 2 hard-coded test services
Mock ID = "Mock"
// Kubernetes is a service registry backed by k8s API server
Kubernetes ID = "Kubernetes"
// External is a service registry for externally provided ServiceEntries
External ID = "External"
)
func (id ID) String() string {
return string(id)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package serviceentry
import (
"fmt"
"hash/fnv"
"strconv"
"sync"
"time"
"k8s.io/apimachinery/pkg/types"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/model/status"
"istio.io/istio/pilot/pkg/serviceregistry"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pilot/pkg/serviceregistry/util/workloadinstances"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/kind"
istiolog "istio.io/istio/pkg/log"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/queue"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
)
var (
_ serviceregistry.Instance = &Controller{}
log = istiolog.RegisterScope("serviceentry", "ServiceEntry registry")
)
var (
prime = 65011 // Used for secondary hash function.
maxIPs = 256 * 254 // Maximum possible IPs for address allocation.
)
// instancesKey acts as a key to identify all instances for a given hostname/namespace pair
// This is mostly used as an index
type instancesKey struct {
hostname host.Name
namespace string
}
type octetPair struct {
thirdOctet int
fourthOctet int
}
func makeInstanceKey(i *model.ServiceInstance) instancesKey {
return instancesKey{i.Service.Hostname, i.Service.Attributes.Namespace}
}
type configType int
const (
serviceEntryConfigType configType = iota
workloadEntryConfigType
podConfigType
)
// configKey unique identifies a config object managed by this registry (ServiceEntry and WorkloadEntry)
type configKey struct {
kind configType
name string
namespace string
}
// Controller communicates with ServiceEntry CRDs and monitors for changes.
type Controller struct {
XdsUpdater model.XDSUpdater
store model.ConfigStore
clusterID cluster.ID
// This lock is to make multi ops on the below stores. For example, in some case,
// it requires delete all instances and then update new ones.
mutex sync.RWMutex
serviceInstances serviceInstancesStore
// NOTE: historically, one index for both WorkloadEntry(s) and Pod(s);
// beware of naming collisions
workloadInstances workloadinstances.Index
services serviceStore
// To make sure the eds update run in serial to prevent stale ones can override new ones
// when edsUpdate is called concurrently.
// If all share one lock, then all the threads can have an obvious performance downgrade.
edsQueue queue.Instance
workloadHandlers []func(*model.WorkloadInstance, model.Event)
// callback function used to get the networkID according to workload ip and labels.
networkIDCallback func(IP string, labels labels.Instance) network.ID
// Indicates whether this controller is for workload entries.
workloadEntryController bool
model.NoopAmbientIndexes
model.NetworkGatewaysHandler
}
type Option func(*Controller)
func WithClusterID(clusterID cluster.ID) Option {
return func(o *Controller) {
o.clusterID = clusterID
}
}
func WithNetworkIDCb(cb func(endpointIP string, labels labels.Instance) network.ID) Option {
return func(o *Controller) {
o.networkIDCallback = cb
}
}
// NewController creates a new ServiceEntry discovery service.
func NewController(configController model.ConfigStoreController, xdsUpdater model.XDSUpdater,
options ...Option,
) *Controller {
s := newController(configController, xdsUpdater, options...)
if configController != nil {
configController.RegisterEventHandler(gvk.ServiceEntry, s.serviceEntryHandler)
configController.RegisterEventHandler(gvk.WorkloadEntry, s.workloadEntryHandler)
}
return s
}
// NewWorkloadEntryController creates a new WorkloadEntry discovery service.
func NewWorkloadEntryController(configController model.ConfigStoreController, xdsUpdater model.XDSUpdater,
options ...Option,
) *Controller {
s := newController(configController, xdsUpdater, options...)
// Disable service entry processing for workload entry controller.
s.workloadEntryController = true
for _, o := range options {
o(s)
}
if configController != nil {
configController.RegisterEventHandler(gvk.WorkloadEntry, s.workloadEntryHandler)
}
return s
}
func newController(store model.ConfigStore, xdsUpdater model.XDSUpdater, options ...Option) *Controller {
s := &Controller{
XdsUpdater: xdsUpdater,
store: store,
serviceInstances: serviceInstancesStore{
ip2instance: map[string][]*model.ServiceInstance{},
instances: map[instancesKey]map[configKey][]*model.ServiceInstance{},
instancesBySE: map[types.NamespacedName]map[configKey][]*model.ServiceInstance{},
instancesByHostAndPort: sets.New[hostPort](),
},
workloadInstances: workloadinstances.NewIndex(),
services: serviceStore{
servicesBySE: map[types.NamespacedName][]*model.Service{},
},
edsQueue: queue.NewQueue(time.Second),
}
for _, o := range options {
o(s)
}
return s
}
// ConvertServiceEntry convert se from Config.Spec.
func ConvertServiceEntry(cfg config.Config) *networking.ServiceEntry {
se := cfg.Spec.(*networking.ServiceEntry)
if se == nil {
return nil
}
// shallow copy
copied := &networking.ServiceEntry{}
protomarshal.ShallowCopy(copied, se)
return copied
}
// ConvertWorkloadEntry convert wle from Config.Spec and populate the metadata labels into it.
func ConvertWorkloadEntry(cfg config.Config) *networking.WorkloadEntry {
wle := cfg.Spec.(*networking.WorkloadEntry)
if wle == nil {
return nil
}
// we will merge labels from metadata with spec, with precedence to the metadata
labels := maps.MergeCopy(wle.Labels, cfg.Labels)
// shallow copy
copied := &networking.WorkloadEntry{}
protomarshal.ShallowCopy(copied, wle)
copied.Labels = labels
return copied
}
// workloadEntryHandler defines the handler for workload entries
func (s *Controller) workloadEntryHandler(old, curr config.Config, event model.Event) {
log.Debugf("Handle event %s for workload entry %s/%s", event, curr.Namespace, curr.Name)
var oldWle *networking.WorkloadEntry
if old.Spec != nil {
oldWle = ConvertWorkloadEntry(old)
}
wle := ConvertWorkloadEntry(curr)
curr.Spec = wle
key := configKey{
kind: workloadEntryConfigType,
name: curr.Name,
namespace: curr.Namespace,
}
// If an entry is unhealthy, we will mark this as a delete instead
// This ensures we do not track unhealthy endpoints
if features.WorkloadEntryHealthChecks && !isHealthy(curr) {
event = model.EventDelete
}
wi := s.convertWorkloadEntryToWorkloadInstance(curr, s.Cluster())
if wi != nil && !wi.DNSServiceEntryOnly {
// fire off the k8s handlers
s.NotifyWorkloadInstanceHandlers(wi, event)
}
// includes instances new updated or unchanged, in other word it is the current state.
instancesUpdated := []*model.ServiceInstance{}
instancesDeleted := []*model.ServiceInstance{}
fullPush := false
configsUpdated := sets.New[model.ConfigKey]()
addConfigs := func(se *networking.ServiceEntry, services []*model.Service) {
// If serviceentry's resolution is DNS, make a full push
// TODO: maybe cds?
if se.Resolution == networking.ServiceEntry_DNS || se.Resolution == networking.ServiceEntry_DNS_ROUND_ROBIN {
fullPush = true
for key, value := range getUpdatedConfigs(services) {
configsUpdated[key] = value
}
}
}
cfgs := s.store.List(gvk.ServiceEntry, curr.Namespace)
currSes := getWorkloadServiceEntries(cfgs, wle)
var oldSes map[types.NamespacedName]*config.Config
if oldWle != nil {
if labels.Instance(oldWle.Labels).Equals(curr.Labels) {
oldSes = currSes
} else {
// labels update should trigger proxy update
s.XdsUpdater.ProxyUpdate(s.Cluster(), wle.Address)
oldSes = getWorkloadServiceEntries(cfgs, oldWle)
}
}
unSelected := difference(oldSes, currSes)
log.Debugf("workloadEntry %s/%s selected %v, unSelected %v serviceEntry", curr.Namespace, curr.Name, currSes, unSelected)
s.mutex.Lock()
for namespacedName, cfg := range currSes {
services := s.services.getServices(namespacedName)
se := cfg.Spec.(*networking.ServiceEntry)
if wi.DNSServiceEntryOnly && se.Resolution != networking.ServiceEntry_DNS &&
se.Resolution != networking.ServiceEntry_DNS_ROUND_ROBIN {
log.Debugf("skip selecting workload instance %v/%v for DNS service entry %v", wi.Namespace, wi.Name, se.Hosts)
continue
}
instance := s.convertWorkloadEntryToServiceInstances(wle, services, se, &key, s.Cluster())
instancesUpdated = append(instancesUpdated, instance...)
if event == model.EventDelete {
s.serviceInstances.deleteServiceEntryInstances(namespacedName, key)
} else {
s.serviceInstances.updateServiceEntryInstancesPerConfig(namespacedName, key, instance)
}
addConfigs(se, services)
}
for _, namespacedName := range unSelected {
services := s.services.getServices(namespacedName)
cfg := oldSes[namespacedName]
se := cfg.Spec.(*networking.ServiceEntry)
if wi.DNSServiceEntryOnly && se.Resolution != networking.ServiceEntry_DNS &&
se.Resolution != networking.ServiceEntry_DNS_ROUND_ROBIN {
log.Debugf("skip selecting workload instance %v/%v for DNS service entry %v", wi.Namespace, wi.Name, se.Hosts)
continue
}
instance := s.convertWorkloadEntryToServiceInstances(wle, services, se, &key, s.Cluster())
instancesDeleted = append(instancesDeleted, instance...)
s.serviceInstances.deleteServiceEntryInstances(namespacedName, key)
addConfigs(se, services)
}
s.serviceInstances.deleteInstanceKeys(key, instancesDeleted)
if event == model.EventDelete {
s.workloadInstances.Delete(wi)
s.serviceInstances.deleteInstanceKeys(key, instancesUpdated)
} else {
s.workloadInstances.Insert(wi)
s.serviceInstances.updateInstances(key, instancesUpdated)
}
s.mutex.Unlock()
allInstances := append(instancesUpdated, instancesDeleted...)
if !fullPush {
// trigger full xds push to the related sidecar proxy
if event == model.EventAdd {
s.XdsUpdater.ProxyUpdate(s.Cluster(), wle.Address)
}
s.edsUpdate(allInstances)
return
}
// update eds cache only
s.edsCacheUpdate(allInstances)
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: configsUpdated,
Reason: model.NewReasonStats(model.EndpointUpdate),
}
// trigger a full push
s.XdsUpdater.ConfigUpdate(pushReq)
}
func (s *Controller) NotifyWorkloadInstanceHandlers(wi *model.WorkloadInstance, event model.Event) {
for _, h := range s.workloadHandlers {
h(wi, event)
}
}
// getUpdatedConfigs returns related service entries when full push
func getUpdatedConfigs(services []*model.Service) sets.Set[model.ConfigKey] {
configsUpdated := sets.New[model.ConfigKey]()
for _, svc := range services {
configsUpdated.Insert(model.ConfigKey{
Kind: kind.ServiceEntry,
Name: string(svc.Hostname),
Namespace: svc.Attributes.Namespace,
})
}
return configsUpdated
}
// serviceEntryHandler defines the handler for service entries
func (s *Controller) serviceEntryHandler(_, curr config.Config, event model.Event) {
log.Debugf("Handle event %s for service entry %s/%s", event, curr.Namespace, curr.Name)
currentServiceEntry := curr.Spec.(*networking.ServiceEntry)
cs := convertServices(curr)
configsUpdated := sets.New[model.ConfigKey]()
key := curr.NamespacedName()
s.mutex.Lock()
// If it is add/delete event we should always do a full push. If it is update event, we should do full push,
// only when services have changed - otherwise, just push endpoint updates.
var addedSvcs, deletedSvcs, updatedSvcs, unchangedSvcs []*model.Service
switch event {
case model.EventUpdate:
addedSvcs, deletedSvcs, updatedSvcs, unchangedSvcs = servicesDiff(s.services.getServices(key), cs)
s.services.updateServices(key, cs)
case model.EventDelete:
deletedSvcs = cs
s.services.deleteServices(key)
case model.EventAdd:
addedSvcs = cs
s.services.updateServices(key, cs)
default:
// this should not happen
unchangedSvcs = cs
}
serviceInstancesByConfig, serviceInstances := s.buildServiceInstances(curr, cs)
oldInstances := s.serviceInstances.getServiceEntryInstances(key)
for configKey, old := range oldInstances {
s.serviceInstances.deleteInstanceKeys(configKey, old)
}
if event == model.EventDelete {
s.serviceInstances.deleteAllServiceEntryInstances(key)
} else {
// Update the indexes with new instances.
for ckey, value := range serviceInstancesByConfig {
s.serviceInstances.addInstances(ckey, value)
}
s.serviceInstances.updateServiceEntryInstances(key, serviceInstancesByConfig)
}
shard := model.ShardKeyFromRegistry(s)
for _, svc := range addedSvcs {
s.XdsUpdater.SvcUpdate(shard, string(svc.Hostname), svc.Attributes.Namespace, model.EventAdd)
configsUpdated.Insert(makeConfigKey(svc))
}
for _, svc := range updatedSvcs {
s.XdsUpdater.SvcUpdate(shard, string(svc.Hostname), svc.Attributes.Namespace, model.EventUpdate)
configsUpdated.Insert(makeConfigKey(svc))
}
// If service entry is deleted, call SvcUpdate to cleanup endpoint shards for services.
for _, svc := range deletedSvcs {
instanceKey := instancesKey{namespace: svc.Attributes.Namespace, hostname: svc.Hostname}
// There can be multiple service entries of same host reside in same namespace.
// Delete endpoint shards only if there are no service instances.
if len(s.serviceInstances.getByKey(instanceKey)) == 0 {
s.XdsUpdater.SvcUpdate(shard, string(svc.Hostname), svc.Attributes.Namespace, model.EventDelete)
} else {
// If there are some endpoints remaining for the host, add svc to updatedSvcs to trigger eds cache update
updatedSvcs = append(updatedSvcs, svc)
}
configsUpdated.Insert(makeConfigKey(svc))
}
// If a service is updated and is not part of updatedSvcs, that means its endpoints might have changed.
// If this service entry had endpoints with IPs (i.e. resolution STATIC), then we do EDS update.
// If the service entry had endpoints with FQDNs (i.e. resolution DNS), then we need to do
// full push (as fqdn endpoints go via strict_dns clusters in cds).
if len(unchangedSvcs) > 0 {
if currentServiceEntry.Resolution == networking.ServiceEntry_DNS || currentServiceEntry.Resolution == networking.ServiceEntry_DNS_ROUND_ROBIN {
for _, svc := range unchangedSvcs {
configsUpdated.Insert(makeConfigKey(svc))
}
}
}
s.mutex.Unlock()
fullPush := len(configsUpdated) > 0
// if not full push needed, at least one service unchanged
if !fullPush {
s.edsUpdate(serviceInstances)
return
}
// When doing a full push, the non DNS added, updated, unchanged services trigger an eds update
// so that endpoint shards are updated.
allServices := make([]*model.Service, 0, len(addedSvcs)+len(updatedSvcs)+len(unchangedSvcs))
allServices = append(allServices, addedSvcs...)
allServices = append(allServices, updatedSvcs...)
allServices = append(allServices, unchangedSvcs...)
// non dns service instances
keys := sets.NewWithLength[instancesKey](len(allServices))
for _, svc := range allServices {
keys.Insert(instancesKey{hostname: svc.Hostname, namespace: curr.Namespace})
}
s.queueEdsEvent(keys, s.doEdsCacheUpdate)
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: configsUpdated,
Reason: model.NewReasonStats(model.ServiceUpdate),
}
s.XdsUpdater.ConfigUpdate(pushReq)
}
// WorkloadInstanceHandler defines the handler for service instances generated by other registries
func (s *Controller) WorkloadInstanceHandler(wi *model.WorkloadInstance, event model.Event) {
log.Debugf("Handle event %s for workload instance (%s/%s) in namespace %s", event,
wi.Kind, wi.Endpoint.Address, wi.Namespace)
var oldWi *model.WorkloadInstance
key := configKey{
kind: podConfigType,
name: wi.Name,
namespace: wi.Namespace,
}
// Used to indicate if this event was fired for a pod->workloadentry conversion
// and that the event can be ignored due to no relevant change in the workloadentry
redundantEventForPod := false
// Used to indicate if the wi labels changed and we need to recheck all instances
labelsChanged := false
var addressToDelete string
s.mutex.Lock()
// this is from a pod. Store it in separate map so that
// the refreshIndexes function can use these as well as the store ones.
switch event {
case model.EventDelete:
redundantEventForPod = s.workloadInstances.Delete(wi) == nil
default: // add or update
if oldWi = s.workloadInstances.Insert(wi); oldWi != nil {
if oldWi.Endpoint.Address != wi.Endpoint.Address {
addressToDelete = oldWi.Endpoint.Address
}
// Check if the old labels still match the new labels. If they don't then we need to
// refresh the list of instances for this wi
if !oldWi.Endpoint.Labels.Equals(wi.Endpoint.Labels) {
labelsChanged = true
}
// If multiple k8s services select the same pod or a service has multiple ports,
// we may be getting multiple events ignore them as we only care about the Endpoint IP itself.
if model.WorkloadInstancesEqual(oldWi, wi) {
// ignore the update as nothing has changed
redundantEventForPod = true
}
}
}
if redundantEventForPod {
s.mutex.Unlock()
return
}
// We will only select entries in the same namespace
cfgs := s.store.List(gvk.ServiceEntry, wi.Namespace)
if len(cfgs) == 0 {
s.mutex.Unlock()
return
}
instances := []*model.ServiceInstance{}
instancesDeleted := []*model.ServiceInstance{}
configsUpdated := sets.New[model.ConfigKey]()
fullPush := false
for _, cfg := range cfgs {
se := cfg.Spec.(*networking.ServiceEntry)
if se.WorkloadSelector == nil || (!labelsChanged && !labels.Instance(se.WorkloadSelector.Labels).Match(wi.Endpoint.Labels)) {
// If the labels didn't change. And the new SE doesn't match then the old didn't match either and we can skip processing it.
continue
}
// If we are here, then there are 3 possible cases :
// Case 1 : The new wi is a subset of se
// Case 2 : The labelsChanged and the new wi is still a subset of se
// Case 3 : The labelsChanged and the new wi is NOT a subset of se anymore
seNamespacedName := cfg.NamespacedName()
services := s.services.getServices(seNamespacedName)
currInstance := convertWorkloadInstanceToServiceInstance(wi, services, se)
// We check if the wi is still a subset of se. This would cover Case 1 and Case 2 from above.
if labels.Instance(se.WorkloadSelector.Labels).Match(wi.Endpoint.Labels) {
// If the workload instance still matches. We take care of the possible events.
instances = append(instances, currInstance...)
if addressToDelete != "" {
for _, i := range currInstance {
di := i.DeepCopy()
di.Endpoint.Address = addressToDelete
instancesDeleted = append(instancesDeleted, di)
}
s.serviceInstances.deleteServiceEntryInstances(seNamespacedName, key)
} else if event == model.EventDelete {
s.serviceInstances.deleteServiceEntryInstances(seNamespacedName, key)
} else {
s.serviceInstances.updateServiceEntryInstancesPerConfig(seNamespacedName, key, currInstance)
}
// If serviceentry's resolution is DNS, make a full push
// TODO: maybe cds?
if (se.Resolution == networking.ServiceEntry_DNS || se.Resolution == networking.ServiceEntry_DNS_ROUND_ROBIN) &&
se.WorkloadSelector != nil {
fullPush = true
for _, inst := range currInstance {
configsUpdated[model.ConfigKey{
Kind: kind.ServiceEntry,
Name: string(inst.Service.Hostname),
Namespace: cfg.Namespace,
}] = struct{}{}
}
}
} else if labels.Instance(se.WorkloadSelector.Labels).Match(oldWi.Endpoint.Labels) {
// If we're here, it means that the labels changed and the new labels don't match the SE anymore (Case 3 from above) and the oldWi did
// match the SE.
// Since the instance doesn't match the SE anymore. We remove it from the list.
oldInstance := convertWorkloadInstanceToServiceInstance(oldWi, services, se)
instancesDeleted = append(instancesDeleted, oldInstance...)
s.serviceInstances.deleteServiceEntryInstances(seNamespacedName, key)
}
}
if len(instancesDeleted) > 0 {
s.serviceInstances.deleteInstanceKeys(key, instancesDeleted)
}
if event == model.EventDelete {
s.serviceInstances.deleteInstanceKeys(key, instances)
} else {
s.serviceInstances.updateInstances(key, instances)
}
s.mutex.Unlock()
s.edsUpdate(append(instances, instancesDeleted...))
// ServiceEntry with WorkloadEntry results in STRICT_DNS cluster with hardcoded endpoints
// need to update CDS to refresh endpoints
// https://github.com/istio/istio/issues/39505
if fullPush {
log.Debugf("Full push triggered during event %s for workload instance (%s/%s) in namespace %s", event,
wi.Kind, wi.Endpoint.Address, wi.Namespace)
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: configsUpdated,
Reason: model.NewReasonStats(model.EndpointUpdate),
}
s.XdsUpdater.ConfigUpdate(pushReq)
}
}
func (s *Controller) Provider() provider.ID {
return provider.External
}
func (s *Controller) Cluster() cluster.ID {
return s.clusterID
}
// AppendServiceHandler adds service resource event handler. Service Entries does not use these handlers.
func (s *Controller) AppendServiceHandler(_ model.ServiceHandler) {}
// AppendWorkloadHandler adds instance event handler. Service Entries does not use these handlers.
func (s *Controller) AppendWorkloadHandler(h func(*model.WorkloadInstance, model.Event)) {
s.workloadHandlers = append(s.workloadHandlers, h)
}
// Run is used by some controllers to execute background jobs after init is done.
func (s *Controller) Run(stopCh <-chan struct{}) {
s.edsQueue.Run(stopCh)
}
// HasSynced always returns true for SE
func (s *Controller) HasSynced() bool {
return true
}
// Services list declarations of all services in the system
func (s *Controller) Services() []*model.Service {
s.mutex.Lock()
allServices := s.services.getAllServices()
out := make([]*model.Service, 0, len(allServices))
if s.services.allocateNeeded {
autoAllocateIPs(allServices)
s.services.allocateNeeded = false
}
s.mutex.Unlock()
for _, svc := range allServices {
// shallow copy, copy `AutoAllocatedIPv4Address` and `AutoAllocatedIPv6Address`
// if return the pointer directly, there will be a race with `BuildNameTable`
// nolint: govet
shallowSvc := *svc
out = append(out, &shallowSvc)
}
return out
}
// GetService retrieves a service by host name if it exists.
// NOTE: The service entry implementation is used only for tests.
func (s *Controller) GetService(hostname host.Name) *model.Service {
if s.workloadEntryController {
return nil
}
// TODO(@hzxuzhonghu): only get the specific service instead of converting all the serviceEntries
services := s.Services()
for _, service := range services {
if service.Hostname == hostname {
return service
}
}
return nil
}
// ResyncEDS will do a full EDS update. This is needed for some tests where we have many configs loaded without calling
// the config handlers.
// This should probably not be used in production code.
func (s *Controller) ResyncEDS() {
s.mutex.RLock()
allInstances := s.serviceInstances.getAll()
s.mutex.RUnlock()
s.edsUpdate(allInstances)
// HACK to workaround Service syncing after WorkloadEntry: https://github.com/istio/istio/issues/45114
s.workloadInstances.ForEach(func(wi *model.WorkloadInstance) {
s.NotifyWorkloadInstanceHandlers(wi, model.EventAdd)
})
}
// edsUpdate triggers an EDS push serially such that we can prevent all instances
// got at t1 can accidentally override that got at t2 if multiple threads are
// running this function. Queueing ensures latest updated wins.
func (s *Controller) edsUpdate(instances []*model.ServiceInstance) {
// Find all keys we need to lookup
keys := map[instancesKey]struct{}{}
for _, i := range instances {
keys[makeInstanceKey(i)] = struct{}{}
}
s.queueEdsEvent(keys, s.doEdsUpdate)
}
// edsCacheUpdate updates eds cache serially such that we can prevent allinstances
// got at t1 can accidentally override that got at t2 if multiple threads are
// running this function. Queueing ensures latest updated wins.
func (s *Controller) edsCacheUpdate(instances []*model.ServiceInstance) {
// Find all keys we need to lookup
keys := map[instancesKey]struct{}{}
for _, i := range instances {
keys[makeInstanceKey(i)] = struct{}{}
}
s.queueEdsEvent(keys, s.doEdsCacheUpdate)
}
// queueEdsEvent processes eds events sequentially for the passed keys and invokes the passed function.
func (s *Controller) queueEdsEvent(keys sets.Set[instancesKey], edsFn func(keys sets.Set[instancesKey])) {
// wait for the cache update finished
waitCh := make(chan struct{})
// trigger update eds endpoint shards
s.edsQueue.Push(func() error {
defer close(waitCh)
edsFn(keys)
return nil
})
select {
case <-waitCh:
return
// To prevent goroutine leak in tests
// in case the queue is stopped but the task has not been executed..
case <-s.edsQueue.Closed():
return
}
}
// doEdsCacheUpdate invokes XdsUpdater's EDSCacheUpdate to update endpoint shards.
func (s *Controller) doEdsCacheUpdate(keys sets.Set[instancesKey]) {
endpoints := s.buildEndpoints(keys)
shard := model.ShardKeyFromRegistry(s)
// This is delete.
if len(endpoints) == 0 {
for k := range keys {
s.XdsUpdater.EDSCacheUpdate(shard, string(k.hostname), k.namespace, nil)
}
} else {
for k, eps := range endpoints {
s.XdsUpdater.EDSCacheUpdate(shard, string(k.hostname), k.namespace, eps)
}
}
}
// doEdsUpdate invokes XdsUpdater's eds update to trigger eds push.
func (s *Controller) doEdsUpdate(keys sets.Set[instancesKey]) {
endpoints := s.buildEndpoints(keys)
shard := model.ShardKeyFromRegistry(s)
// This is delete.
if len(endpoints) == 0 {
for k := range keys {
s.XdsUpdater.EDSUpdate(shard, string(k.hostname), k.namespace, nil)
}
} else {
for k, eps := range endpoints {
s.XdsUpdater.EDSUpdate(shard, string(k.hostname), k.namespace, eps)
}
}
}
// buildEndpoints builds endpoints for the instance keys.
func (s *Controller) buildEndpoints(keys map[instancesKey]struct{}) map[instancesKey][]*model.IstioEndpoint {
var endpoints map[instancesKey][]*model.IstioEndpoint
allInstances := []*model.ServiceInstance{}
s.mutex.RLock()
for key := range keys {
i := s.serviceInstances.getByKey(key)
allInstances = append(allInstances, i...)
}
s.mutex.RUnlock()
if len(allInstances) > 0 {
endpoints = make(map[instancesKey][]*model.IstioEndpoint)
for _, instance := range allInstances {
key := makeInstanceKey(instance)
endpoints[key] = append(endpoints[key], instance.Endpoint)
}
}
return endpoints
}
// GetProxyServiceTargets lists service targets co-located with a given proxy
// NOTE: The service objects in these instances do not have the auto allocated IP set.
func (s *Controller) GetProxyServiceTargets(node *model.Proxy) []model.ServiceTarget {
out := make([]model.ServiceTarget, 0)
s.mutex.RLock()
defer s.mutex.RUnlock()
for _, ip := range node.IPAddresses {
instances := s.serviceInstances.getByIP(ip)
for _, i := range instances {
// Insert all instances for this IP for services within the same namespace. This ensures we
// match Kubernetes logic where Services do not cross namespace boundaries and avoids
// possibility of other namespaces inserting service instances into namespaces they do not
// control.
if node.Metadata.Namespace == "" || i.Service.Attributes.Namespace == node.Metadata.Namespace {
out = append(out, model.ServiceInstanceToTarget(i))
}
}
}
return out
}
func (s *Controller) GetProxyWorkloadLabels(proxy *model.Proxy) labels.Instance {
s.mutex.RLock()
defer s.mutex.RUnlock()
for _, ip := range proxy.IPAddresses {
instances := s.serviceInstances.getByIP(ip)
for _, i := range instances {
// Insert first instances for this IP for services within the same namespace. This ensures we
// match Kubernetes logic where Services do not cross namespace boundaries and avoids
// possibility of other namespaces inserting service instances into namespaces they do not
// control.
// All instances should have the same labels so we just return the first
if proxy.Metadata.Namespace == "" || i.Service.Attributes.Namespace == proxy.Metadata.Namespace {
return i.Endpoint.Labels
}
}
}
return nil
}
func (s *Controller) NetworkGateways() []model.NetworkGateway {
// TODO implement mesh networks loading logic from kube controller if needed
return nil
}
func (s *Controller) MCSServices() []model.MCSServiceInfo {
return nil
}
func servicesDiff(os []*model.Service, ns []*model.Service) ([]*model.Service, []*model.Service, []*model.Service, []*model.Service) {
var added, deleted, updated, unchanged []*model.Service
oldServiceHosts := make(map[host.Name]*model.Service, len(os))
for _, s := range os {
oldServiceHosts[s.Hostname] = s
}
for _, s := range ns {
oldSvc, ok := oldServiceHosts[s.Hostname]
if ok && s.Equals(oldSvc) {
unchanged = append(unchanged, s)
} else if ok {
updated = append(updated, s)
} else {
added = append(added, s)
}
delete(oldServiceHosts, s.Hostname)
}
deleted = maps.Values(oldServiceHosts)
return added, deleted, updated, unchanged
}
// Automatically allocates IPs for service entry services WITHOUT an
// address field if the hostname is not a wildcard, or when resolution
// is not NONE. The IPs are allocated from the reserved Class E subnet
// (240.240.0.0/16) that is not reachable outside the pod or reserved
// Benchmarking IP range (2001:2::/48) in RFC5180. When DNS
// capture is enabled, Envoy will resolve the DNS to these IPs. The
// listeners for TCP services will also be set up on these IPs. The
// IPs allocated to a service entry may differ from istiod to istiod
// but it does not matter because these IPs only affect the listener
// IPs on a given proxy managed by a given istiod.
//
// NOTE: If DNS capture is not enabled by the proxy, the automatically
// allocated IP addresses do not take effect.
//
// The current algorithm to allocate IPs is deterministic across all istiods.
func autoAllocateIPs(services []*model.Service) []*model.Service {
hashedServices := make([]*model.Service, maxIPs)
hash := fnv.New32a()
// First iterate through the range of services and determine its position by hash
// so that we can deterministically allocate an IP.
// We use "Double Hashning" for collision detection.
// The hash algorithm is
// - h1(k) = Sum32 hash of the service key (namespace + "/" + hostname)
// - Check if we have an empty slot for h1(x) % MAXIPS. Use it if available.
// - If there is a collision, apply second hash i.e. h2(x) = PRIME - (Key % PRIME)
// where PRIME is the max prime number below MAXIPS.
// - Calculate new hash iteratively till we find an empty slot with (h1(k) + i*h2(k)) % MAXIPS
j := 0
for _, svc := range services {
// we can allocate IPs only if
// 1. the service has resolution set to static/dns. We cannot allocate
// for NONE because we will not know the original DST IP that the application requested.
// 2. the address is not set (0.0.0.0)
// 3. the hostname is not a wildcard
if svc.DefaultAddress == constants.UnspecifiedIP && !svc.Hostname.IsWildCarded() &&
svc.Resolution != model.Passthrough {
if j >= maxIPs {
log.Errorf("out of IPs to allocate for service entries. maxips:= %d", maxIPs)
break
}
// First hash is calculated by hashing the service key i.e. (namespace + "/" + hostname).
hash.Write([]byte(makeServiceKey(svc)))
s := hash.Sum32()
firstHash := s % uint32(maxIPs)
// Check if there is a service with this hash first. If there is no service
// at this location - then we can safely assign this position for this service.
if hashedServices[firstHash] == nil {
hashedServices[firstHash] = svc
} else {
// This means we have a collision. Resolve collision by "DoubleHashing".
i := uint32(1)
secondHash := uint32(prime) - (s % uint32(prime))
for {
nh := (s + i*secondHash) % uint32(maxIPs-1)
if hashedServices[nh] == nil {
hashedServices[nh] = svc
break
}
i++
}
}
hash.Reset()
j++
}
}
x := 0
hnMap := make(map[string]octetPair)
for _, svc := range hashedServices {
if svc == nil {
// There is no service in the slot. Just increment x and move forward.
x++
continue
}
n := makeServiceKey(svc)
if v, ok := hnMap[n]; ok {
log.Debugf("Reuse IP for domain %s", n)
setAutoAllocatedIPs(svc, v)
} else {
var thirdOctect, fourthOctect int
// To avoid allocating 240.240.(i).255, if X % 255 is 0, increment X.
// For example, when X=510, the resulting IP would be 240.240.2.0 (invalid)
// So we bump X to 511, so that the resulting IP is 240.240.2.1
x++
if x%255 == 0 {
x++
}
thirdOctect = x / 255
fourthOctect = x % 255
pair := octetPair{thirdOctect, fourthOctect}
setAutoAllocatedIPs(svc, pair)
hnMap[n] = pair
}
}
return services
}
func makeServiceKey(svc *model.Service) string {
return svc.Attributes.Namespace + "/" + svc.Hostname.String()
}
func setAutoAllocatedIPs(svc *model.Service, octets octetPair) {
a := octets.thirdOctet
b := octets.fourthOctet
svc.AutoAllocatedIPv4Address = fmt.Sprintf("240.240.%d.%d", a, b)
if a == 0 {
svc.AutoAllocatedIPv6Address = fmt.Sprintf("2001:2::f0f0:%x", b)
} else {
svc.AutoAllocatedIPv6Address = fmt.Sprintf("2001:2::f0f0:%x%x", a, b)
}
}
func makeConfigKey(svc *model.Service) model.ConfigKey {
return model.ConfigKey{
Kind: kind.ServiceEntry,
Name: string(svc.Hostname),
Namespace: svc.Attributes.Namespace,
}
}
// isHealthy checks that the provided WorkloadEntry is healthy. If health checks are not enabled,
// it is assumed to always be healthy
func isHealthy(cfg config.Config) bool {
if parseHealthAnnotation(cfg.Annotations[status.WorkloadEntryHealthCheckAnnotation]) {
// We default to false if the condition is not set. This ensures newly created WorkloadEntries
// are treated as unhealthy until we prove they are healthy by probe success.
return status.GetBoolConditionFromSpec(cfg, status.ConditionHealthy, false)
}
// If health check is not enabled, assume its healthy
return true
}
func parseHealthAnnotation(s string) bool {
if s == "" {
return false
}
p, err := strconv.ParseBool(s)
if err != nil {
return false
}
return p
}
func (s *Controller) buildServiceInstances(
curr config.Config,
services []*model.Service,
) (map[configKey][]*model.ServiceInstance, []*model.ServiceInstance) {
currentServiceEntry := curr.Spec.(*networking.ServiceEntry)
var serviceInstances []*model.ServiceInstance
serviceInstancesByConfig := map[configKey][]*model.ServiceInstance{}
// for service entry with labels
if currentServiceEntry.WorkloadSelector != nil {
selector := workloadinstances.ByServiceSelector(curr.Namespace, currentServiceEntry.WorkloadSelector.Labels)
workloadInstances := workloadinstances.FindAllInIndex(s.workloadInstances, selector)
for _, wi := range workloadInstances {
if wi.DNSServiceEntryOnly && currentServiceEntry.Resolution != networking.ServiceEntry_DNS &&
currentServiceEntry.Resolution != networking.ServiceEntry_DNS_ROUND_ROBIN {
log.Debugf("skip selecting workload instance %v/%v for DNS service entry %v", wi.Namespace, wi.Name,
currentServiceEntry.Hosts)
continue
}
instances := convertWorkloadInstanceToServiceInstance(wi, services, currentServiceEntry)
serviceInstances = append(serviceInstances, instances...)
ckey := configKey{namespace: wi.Namespace, name: wi.Name}
if wi.Kind == model.PodKind {
ckey.kind = podConfigType
} else {
ckey.kind = workloadEntryConfigType
}
serviceInstancesByConfig[ckey] = instances
}
} else {
serviceInstances = s.convertServiceEntryToInstances(curr, services)
ckey := configKey{
kind: serviceEntryConfigType,
name: curr.Name,
namespace: curr.Namespace,
}
serviceInstancesByConfig[ckey] = serviceInstances
}
return serviceInstancesByConfig, serviceInstances
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package serviceentry
import (
"net/netip"
"strings"
"time"
"istio.io/api/label"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
labelutil "istio.io/istio/pilot/pkg/serviceregistry/util/label"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/visibility"
"istio.io/istio/pkg/kube/labels"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/spiffe"
netutil "istio.io/istio/pkg/util/net"
"istio.io/istio/pkg/util/sets"
)
func convertPort(port *networking.ServicePort) *model.Port {
return &model.Port{
Name: port.Name,
Port: int(port.Number),
Protocol: protocol.Parse(port.Protocol),
}
}
type HostAddress struct {
host string
address string
}
// ServiceToServiceEntry converts from internal Service representation to ServiceEntry
// This does not include endpoints - they'll be represented as EndpointSlice or EDS.
//
// See convertServices() for the reverse conversion, used by Istio to handle ServiceEntry configs.
// See kube.ConvertService for the conversion from K8S to internal Service.
func ServiceToServiceEntry(svc *model.Service, proxy *model.Proxy) *config.Config {
gvk := gvk.ServiceEntry
se := &networking.ServiceEntry{
// Host is fully qualified: name, namespace, domainSuffix
Hosts: []string{string(svc.Hostname)},
// Internal Service and K8S Service have a single Address.
// ServiceEntry can represent multiple - but we are not using that. SE may be merged.
// Will be 0.0.0.0 if not specified as ClusterIP or ClusterIP==None. In such case resolution is Passthrough.
//
Addresses: svc.GetAddresses(proxy),
// Location: 0,
// Internal resolution:
// - Passthrough - for ClusterIP=None and no ExternalName
// - ClientSideLB - regular ClusterIP clusters (VIP, resolved via EDS)
// - DNSLB - if ExternalName is specified. Also meshExternal is set.
// This is based on alpha.istio.io/canonical-serviceaccounts and
// alpha.istio.io/kubernetes-serviceaccounts.
SubjectAltNames: svc.ServiceAccounts,
}
if len(svc.Attributes.LabelSelectors) > 0 {
se.WorkloadSelector = &networking.WorkloadSelector{Labels: svc.Attributes.LabelSelectors}
}
// Based on networking.istio.io/exportTo annotation
for k := range svc.Attributes.ExportTo {
// k is Private or Public
se.ExportTo = append(se.ExportTo, string(k))
}
if svc.MeshExternal {
se.Location = networking.ServiceEntry_MESH_EXTERNAL // 0 - default
} else {
se.Location = networking.ServiceEntry_MESH_INTERNAL
}
// Reverse in convertServices. Note that enum values are different
// TODO: make the enum match, should be safe (as long as they're used as enum)
var resolution networking.ServiceEntry_Resolution
switch svc.Resolution {
case model.Passthrough: // 2
resolution = networking.ServiceEntry_NONE // 0
case model.DNSLB: // 1
resolution = networking.ServiceEntry_DNS // 2
case model.DNSRoundRobinLB: // 3
resolution = networking.ServiceEntry_DNS_ROUND_ROBIN // 3
case model.ClientSideLB: // 0
resolution = networking.ServiceEntry_STATIC // 1
}
se.Resolution = resolution
// Port is mapped from ServicePort
for _, p := range svc.Ports {
se.Ports = append(se.Ports, &networking.ServicePort{
Number: uint32(p.Port),
Name: p.Name,
// Protocol is converted to protocol.Instance - reverse conversion will use the name.
Protocol: string(p.Protocol),
// TODO: target port
})
}
cfg := &config.Config{
Meta: config.Meta{
GroupVersionKind: gvk,
Name: "synthetic-" + svc.Attributes.Name,
Namespace: svc.Attributes.Namespace,
CreationTimestamp: svc.CreationTime,
ResourceVersion: svc.ResourceVersion,
},
Spec: se,
}
// TODO: WorkloadSelector
// TODO: preserve ServiceRegistry. The reverse conversion sets it to 'external'
// TODO: preserve UID ? It seems MCP didn't preserve it - but that code path was not used much.
// TODO: ClusterExternalPorts map - for NodePort services, with "traffic.istio.io/nodeSelector" ann
// It's a per-cluster map
// TODO: ClusterExternalAddresses - for LB types, per cluster. Populated from K8S, missing
// in SE. Used for multi-network support.
return cfg
}
// convertServices transforms a ServiceEntry config to a list of internal Service objects.
func convertServices(cfg config.Config) []*model.Service {
serviceEntry := cfg.Spec.(*networking.ServiceEntry)
creationTime := cfg.CreationTimestamp
var resolution model.Resolution
switch serviceEntry.Resolution {
case networking.ServiceEntry_NONE:
resolution = model.Passthrough
case networking.ServiceEntry_DNS:
resolution = model.DNSLB
case networking.ServiceEntry_DNS_ROUND_ROBIN:
resolution = model.DNSRoundRobinLB
case networking.ServiceEntry_STATIC:
resolution = model.ClientSideLB
}
svcPorts := make(model.PortList, 0, len(serviceEntry.Ports))
for _, port := range serviceEntry.Ports {
svcPorts = append(svcPorts, convertPort(port))
}
var exportTo sets.Set[visibility.Instance]
if len(serviceEntry.ExportTo) > 0 {
exportTo = sets.NewWithLength[visibility.Instance](len(serviceEntry.ExportTo))
for _, e := range serviceEntry.ExportTo {
exportTo.Insert(visibility.Instance(e))
}
}
var labelSelectors map[string]string
if serviceEntry.WorkloadSelector != nil {
labelSelectors = serviceEntry.WorkloadSelector.Labels
}
hostAddresses := []*HostAddress{}
for _, hostname := range serviceEntry.Hosts {
if len(serviceEntry.Addresses) > 0 {
for _, address := range serviceEntry.Addresses {
// Check if address is an IP first because that is the most common case.
if netutil.IsValidIPAddress(address) {
hostAddresses = append(hostAddresses, &HostAddress{hostname, address})
} else if cidr, cidrErr := netip.ParsePrefix(address); cidrErr == nil {
newAddress := address
if cidr.Bits() == cidr.Addr().BitLen() {
// /32 mask. Remove the /32 and make it a normal IP address
newAddress = cidr.Addr().String()
}
hostAddresses = append(hostAddresses, &HostAddress{hostname, newAddress})
}
}
} else {
hostAddresses = append(hostAddresses, &HostAddress{hostname, constants.UnspecifiedIP})
}
}
return buildServices(hostAddresses, cfg.Name, cfg.Namespace, svcPorts, serviceEntry.Location, resolution,
exportTo, labelSelectors, serviceEntry.SubjectAltNames, creationTime, cfg.Labels)
}
func buildServices(hostAddresses []*HostAddress, name, namespace string, ports model.PortList, location networking.ServiceEntry_Location,
resolution model.Resolution, exportTo sets.Set[visibility.Instance], selectors map[string]string, saccounts []string,
ctime time.Time, labels map[string]string,
) []*model.Service {
out := make([]*model.Service, 0, len(hostAddresses))
lbls := labels
if features.CanonicalServiceForMeshExternalServiceEntry && location == networking.ServiceEntry_MESH_EXTERNAL {
lbls = ensureCanonicalServiceLabels(name, labels)
}
for _, ha := range hostAddresses {
out = append(out, &model.Service{
CreationTime: ctime,
MeshExternal: location == networking.ServiceEntry_MESH_EXTERNAL,
Hostname: host.Name(ha.host),
DefaultAddress: ha.address,
Ports: ports,
Resolution: resolution,
Attributes: model.ServiceAttributes{
ServiceRegistry: provider.External,
Name: ha.host,
Namespace: namespace,
Labels: lbls,
ExportTo: exportTo,
LabelSelectors: selectors,
},
ServiceAccounts: saccounts,
})
}
return out
}
func ensureCanonicalServiceLabels(name string, srcLabels map[string]string) map[string]string {
if srcLabels == nil {
srcLabels = make(map[string]string)
}
_, svcLabelFound := srcLabels[model.IstioCanonicalServiceLabelName]
_, revLabelFound := srcLabels[model.IstioCanonicalServiceRevisionLabelName]
if svcLabelFound && revLabelFound {
return srcLabels
}
srcLabels[model.IstioCanonicalServiceLabelName], srcLabels[model.IstioCanonicalServiceRevisionLabelName] = labels.CanonicalService(srcLabels, name)
return srcLabels
}
func (s *Controller) convertEndpoint(service *model.Service, servicePort *networking.ServicePort,
wle *networking.WorkloadEntry, configKey *configKey, clusterID cluster.ID,
) *model.ServiceInstance {
var instancePort uint32
addr := wle.GetAddress()
// priority level: unixAddress > we.ports > se.port.targetPort > se.port.number
if strings.HasPrefix(addr, model.UnixAddressPrefix) {
instancePort = 0
addr = strings.TrimPrefix(addr, model.UnixAddressPrefix)
} else if port, ok := wle.Ports[servicePort.Name]; ok && port > 0 {
instancePort = port
} else if servicePort.TargetPort > 0 {
instancePort = servicePort.TargetPort
} else {
// final fallback is to the service port value
instancePort = servicePort.Number
}
tlsMode := getTLSModeFromWorkloadEntry(wle)
sa := ""
if wle.ServiceAccount != "" {
sa = spiffe.MustGenSpiffeURI(service.Attributes.Namespace, wle.ServiceAccount)
}
networkID := s.workloadEntryNetwork(wle)
locality := wle.Locality
if locality == "" && len(wle.Labels[model.LocalityLabel]) > 0 {
locality = model.GetLocalityLabel(wle.Labels[model.LocalityLabel])
}
labels := labelutil.AugmentLabels(wle.Labels, clusterID, locality, "", networkID)
return &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: addr,
EndpointPort: instancePort,
ServicePortName: servicePort.Name,
Network: network.ID(wle.Network),
Locality: model.Locality{
Label: locality,
ClusterID: clusterID,
},
LbWeight: wle.Weight,
Labels: labels,
TLSMode: tlsMode,
ServiceAccount: sa,
// Workload entry config name is used as workload name, which will appear in metric label.
// After VM auto registry is introduced, workload group annotation should be used for workload name.
WorkloadName: configKey.name,
Namespace: configKey.namespace,
},
Service: service,
ServicePort: convertPort(servicePort),
}
}
// convertWorkloadEntryToServiceInstances translates a WorkloadEntry into ServiceEndpoints. This logic is largely the
// same as the ServiceEntry convertServiceEntryToInstances.
func (s *Controller) convertWorkloadEntryToServiceInstances(wle *networking.WorkloadEntry, services []*model.Service,
se *networking.ServiceEntry, configKey *configKey, clusterID cluster.ID,
) []*model.ServiceInstance {
out := make([]*model.ServiceInstance, 0)
for _, service := range services {
for _, port := range se.Ports {
out = append(out, s.convertEndpoint(service, port, wle, configKey, clusterID))
}
}
return out
}
func (s *Controller) convertServiceEntryToInstances(cfg config.Config, services []*model.Service) []*model.ServiceInstance {
out := make([]*model.ServiceInstance, 0)
serviceEntry := cfg.Spec.(*networking.ServiceEntry)
if serviceEntry == nil {
return nil
}
if services == nil {
services = convertServices(cfg)
}
for _, service := range services {
for _, serviceEntryPort := range serviceEntry.Ports {
if len(serviceEntry.Endpoints) == 0 && serviceEntry.WorkloadSelector == nil &&
(serviceEntry.Resolution == networking.ServiceEntry_DNS || serviceEntry.Resolution == networking.ServiceEntry_DNS_ROUND_ROBIN) {
// Note: only convert the hostname to service instance if WorkloadSelector is not set
// when service entry has discovery type DNS and no endpoints
// we create endpoints from service's host
// Do not use serviceentry.hosts as a service entry is converted into
// multiple services (one for each host)
endpointPort := serviceEntryPort.Number
if serviceEntryPort.TargetPort > 0 {
endpointPort = serviceEntryPort.TargetPort
}
out = append(out, &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: string(service.Hostname),
EndpointPort: endpointPort,
ServicePortName: serviceEntryPort.Name,
Labels: nil,
TLSMode: model.DisabledTLSModeLabel,
},
Service: service,
ServicePort: convertPort(serviceEntryPort),
})
} else {
for _, endpoint := range serviceEntry.Endpoints {
out = append(out, s.convertEndpoint(service, serviceEntryPort, endpoint, &configKey{}, s.clusterID))
}
}
}
}
return out
}
func getTLSModeFromWorkloadEntry(wle *networking.WorkloadEntry) string {
// * Use security.istio.io/tlsMode if its present
// * If not, set TLS mode if ServiceAccount is specified
tlsMode := model.DisabledTLSModeLabel
if val, exists := wle.Labels[label.SecurityTlsMode.Name]; exists {
tlsMode = val
} else if wle.ServiceAccount != "" {
tlsMode = model.IstioMutualTLSModeLabel
}
return tlsMode
}
// The workload instance has pointer to the service and its service port.
// We need to create our own but we can retain the endpoint already created.
func convertWorkloadInstanceToServiceInstance(workloadInstance *model.WorkloadInstance, serviceEntryServices []*model.Service,
serviceEntry *networking.ServiceEntry,
) []*model.ServiceInstance {
out := make([]*model.ServiceInstance, 0)
for _, service := range serviceEntryServices {
for _, serviceEntryPort := range serviceEntry.Ports {
// note: this is same as workloadentry handler
// endpoint port will first use the port defined in wle with same port name,
// if not port name not match, use the targetPort specified in ServiceEntry
// if both not matched, fallback to ServiceEntry port number.
var targetPort uint32
if port, ok := workloadInstance.PortMap[serviceEntryPort.Name]; ok && port > 0 {
targetPort = port
} else if serviceEntryPort.TargetPort > 0 {
targetPort = serviceEntryPort.TargetPort
} else {
targetPort = serviceEntryPort.Number
}
ep := workloadInstance.Endpoint.ShallowCopy()
ep.ServicePortName = serviceEntryPort.Name
ep.EndpointPort = targetPort
ep.ComputeEnvoyEndpoint(nil)
out = append(out, &model.ServiceInstance{
Endpoint: ep,
Service: service,
ServicePort: convertPort(serviceEntryPort),
})
}
}
return out
}
// Convenience function to convert a workloadEntry into a WorkloadInstance object encoding the endpoint (without service
// port names) and the namespace - k8s will consume this workload instance when selecting workload entries
func (s *Controller) convertWorkloadEntryToWorkloadInstance(cfg config.Config, clusterID cluster.ID) *model.WorkloadInstance {
we := ConvertWorkloadEntry(cfg)
addr := we.GetAddress()
dnsServiceEntryOnly := false
if strings.HasPrefix(addr, model.UnixAddressPrefix) {
// k8s can't use uds for service objects
dnsServiceEntryOnly = true
}
if addr != "" && !netutil.IsValidIPAddress(addr) {
// k8s can't use workloads with hostnames in the address field.
dnsServiceEntryOnly = true
}
tlsMode := getTLSModeFromWorkloadEntry(we)
sa := ""
if we.ServiceAccount != "" {
sa = spiffe.MustGenSpiffeURI(cfg.Namespace, we.ServiceAccount)
}
networkID := s.workloadEntryNetwork(we)
locality := we.Locality
if locality == "" && len(we.Labels[model.LocalityLabel]) > 0 {
locality = model.GetLocalityLabel(we.Labels[model.LocalityLabel])
}
labels := labelutil.AugmentLabels(we.Labels, clusterID, locality, "", networkID)
return &model.WorkloadInstance{
Endpoint: &model.IstioEndpoint{
Address: addr,
// Not setting ports here as its done by k8s controller
Network: network.ID(we.Network),
Locality: model.Locality{
Label: locality,
ClusterID: clusterID,
},
LbWeight: we.Weight,
Namespace: cfg.Namespace,
// Workload entry config name is used as workload name, which will appear in metric label.
// After VM auto registry is introduced, workload group annotation should be used for workload name.
WorkloadName: cfg.Name,
Labels: labels,
TLSMode: tlsMode,
ServiceAccount: sa,
},
PortMap: we.Ports,
Namespace: cfg.Namespace,
Name: cfg.Name,
Kind: model.WorkloadEntryKind,
DNSServiceEntryOnly: dnsServiceEntryOnly,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package serviceentry
import (
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/schema/gvk"
)
// NamespaceDiscoveryHandler is to handle namespace selected or deselected because of discoverySelectors change,
// rather than namespace add/update/delete event triggered from namespace informer.
func (s *Controller) NamespaceDiscoveryHandler(namespace string, event model.Event) {
if event == model.EventDelete {
log.Debugf("Handle event namespace %s deselected", namespace)
} else {
log.Debugf("Handle event namespace %s selected", namespace)
}
cfgs := s.store.List(gvk.WorkloadEntry, namespace)
for _, cfg := range cfgs {
s.workloadEntryHandler(cfg, cfg, event)
}
if !s.workloadEntryController {
cfgs := s.store.List(gvk.ServiceEntry, namespace)
for _, cfg := range cfgs {
s.serviceEntryHandler(cfg, cfg, event)
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package serviceentry
import (
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/util/sets"
)
type hostPort struct {
host string
port int
}
// stores all the service instances from SE, WLE and pods
type serviceInstancesStore struct {
ip2instance map[string][]*model.ServiceInstance
// service instances by hostname -> config
instances map[instancesKey]map[configKey][]*model.ServiceInstance
// instances only for serviceentry
instancesBySE map[types.NamespacedName]map[configKey][]*model.ServiceInstance
// instancesByHostAndPort tells whether the host has instances.
// This is used to validate that we only have one instance for DNS_ROUNDROBIN_LB.
instancesByHostAndPort sets.Set[hostPort]
}
func (s *serviceInstancesStore) getByIP(ip string) []*model.ServiceInstance {
return s.ip2instance[ip]
}
func (s *serviceInstancesStore) getAll() []*model.ServiceInstance {
all := []*model.ServiceInstance{}
for _, instances := range s.ip2instance {
all = append(all, instances...)
}
return all
}
func (s *serviceInstancesStore) getByKey(key instancesKey) []*model.ServiceInstance {
all := []*model.ServiceInstance{}
for _, instances := range s.instances[key] {
all = append(all, instances...)
}
return all
}
// deleteInstanceKeys deletes all instances with the given configKey and instanceKey
// Note: as a convenience, this takes a []ServiceInstance instead of []instanceKey, as most callers have this format
// However, this function only operates on the instance keys
func (s *serviceInstancesStore) deleteInstanceKeys(key configKey, instances []*model.ServiceInstance) {
for _, i := range instances {
ikey := makeInstanceKey(i)
s.instancesByHostAndPort.Delete(hostPort{ikey.hostname.String(), i.ServicePort.Port})
oldInstances := s.instances[ikey][key]
delete(s.instances[ikey], key)
if len(s.instances[ikey]) == 0 {
delete(s.instances, ikey)
}
delete(s.ip2instance, i.Endpoint.Address)
// Cleanup stale IPs, if the IPs changed
for _, oi := range oldInstances {
s.instancesByHostAndPort.Delete(hostPort{ikey.hostname.String(), oi.ServicePort.Port})
delete(s.ip2instance, oi.Endpoint.Address)
}
}
}
// addInstances add the instances to the store.
func (s *serviceInstancesStore) addInstances(key configKey, instances []*model.ServiceInstance) {
for _, instance := range instances {
ikey := makeInstanceKey(instance)
hostPort := hostPort{ikey.hostname.String(), instance.ServicePort.Port}
// For DNSRoundRobinLB resolution type, check if service instances already exist and do not add
// if it already exist. This can happen if two Service Entries are created with same host name,
// resolution as DNS_ROUND_ROBIN and with same/different endpoints.
if instance.Service.Resolution == model.DNSRoundRobinLB &&
s.instancesByHostAndPort.Contains(hostPort) {
log.Debugf("skipping service %s from service entry %s with DnsRoundRobinLB. A service entry with the same host "+
"already exists. Only one locality lb end point is allowed for DnsRoundRobinLB services.",
ikey.hostname, key.name+"/"+key.namespace)
continue
}
if _, f := s.instances[ikey]; !f {
s.instances[ikey] = map[configKey][]*model.ServiceInstance{}
}
s.instancesByHostAndPort.Insert(hostPort)
s.instances[ikey][key] = append(s.instances[ikey][key], instance)
if instance.Endpoint.Address != "" {
s.ip2instance[instance.Endpoint.Address] = append(s.ip2instance[instance.Endpoint.Address], instance)
}
}
}
func (s *serviceInstancesStore) updateInstances(key configKey, instances []*model.ServiceInstance) {
// first delete
s.deleteInstanceKeys(key, instances)
// second add
s.addInstances(key, instances)
}
func (s *serviceInstancesStore) getServiceEntryInstances(key types.NamespacedName) map[configKey][]*model.ServiceInstance {
return s.instancesBySE[key]
}
func (s *serviceInstancesStore) updateServiceEntryInstances(key types.NamespacedName, instances map[configKey][]*model.ServiceInstance) {
s.instancesBySE[key] = instances
}
func (s *serviceInstancesStore) updateServiceEntryInstancesPerConfig(key types.NamespacedName, cKey configKey, instances []*model.ServiceInstance) {
if s.instancesBySE[key] == nil {
s.instancesBySE[key] = map[configKey][]*model.ServiceInstance{}
}
s.instancesBySE[key][cKey] = instances
}
func (s *serviceInstancesStore) deleteServiceEntryInstances(key types.NamespacedName, cKey configKey) {
delete(s.instancesBySE[key], cKey)
if len(s.instancesBySE[key]) == 0 {
delete(s.instancesBySE, key)
}
}
func (s *serviceInstancesStore) deleteAllServiceEntryInstances(key types.NamespacedName) {
delete(s.instancesBySE, key)
}
// stores all the services converted from serviceEntries
type serviceStore struct {
// services keeps track of all services - mainly used to return from Services() to avoid reconversion.
servicesBySE map[types.NamespacedName][]*model.Service
allocateNeeded bool
}
// getAllServices return all the services.
func (s *serviceStore) getAllServices() []*model.Service {
var out []*model.Service
for _, svcs := range s.servicesBySE {
out = append(out, svcs...)
}
return model.SortServicesByCreationTime(out)
}
func (s *serviceStore) getServices(key types.NamespacedName) []*model.Service {
return s.servicesBySE[key]
}
func (s *serviceStore) deleteServices(key types.NamespacedName) {
delete(s.servicesBySE, key)
}
func (s *serviceStore) updateServices(key types.NamespacedName, services []*model.Service) {
s.servicesBySE[key] = services
s.allocateNeeded = true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package serviceentry
import (
"k8s.io/apimachinery/pkg/types"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/labels"
)
func getWorkloadServiceEntries(ses []config.Config, wle *networking.WorkloadEntry) map[types.NamespacedName]*config.Config {
out := make(map[types.NamespacedName]*config.Config)
for i, cfg := range ses {
se := cfg.Spec.(*networking.ServiceEntry)
if se.WorkloadSelector != nil && labels.Instance(se.WorkloadSelector.Labels).Match(wle.Labels) {
out[cfg.NamespacedName()] = &ses[i]
}
}
return out
}
// returns a set of objects that are in `old` but not in `curr`
// For example:
// old = {a1, a2, a3}
// curr = {a1, a2, a4, a5}
// difference(old, curr) = {a3}
func difference(old, curr map[types.NamespacedName]*config.Config) []types.NamespacedName {
var out []types.NamespacedName
for key := range old {
if _, ok := curr[key]; !ok {
out = append(out, key)
}
}
return out
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package serviceentry
import (
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/network"
)
// return the mesh network for the workload entry. Empty string if not found.
func (s *Controller) workloadEntryNetwork(wle *networking.WorkloadEntry) network.ID {
if s == nil {
return ""
}
// 1. first check the wle.Network
if wle.Network != "" {
return network.ID(wle.Network)
}
// 2. fall back to the passed in getNetworkCb func.
if s.networkIDCallback != nil {
return s.networkIDCallback(wle.Address, wle.Labels)
}
return ""
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package label
import (
"strings"
"istio.io/api/label"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/network"
)
// copied from https://github.com/kubernetes/api/blob/master/core/v1/well_known_labels.go
// It is to remove dependency on k8s.io/api/core/v1
const (
LabelHostname = "kubernetes.io/hostname"
LabelTopologyZone = "topology.kubernetes.io/zone"
LabelTopologySubzone = "topology.istio.io/subzone"
LabelTopologyRegion = "topology.kubernetes.io/region"
)
// AugmentLabels adds additional labels to the those provided.
func AugmentLabels(in labels.Instance, clusterID cluster.ID, locality, k8sNode string, networkID network.ID) labels.Instance {
// Copy the original labels to a new map.
out := make(labels.Instance, len(in)+6)
for k, v := range in {
out[k] = v
}
region, zone, subzone := SplitLocalityLabel(locality)
if len(region) > 0 {
out[LabelTopologyRegion] = region
}
if len(zone) > 0 {
out[LabelTopologyZone] = zone
}
if len(subzone) > 0 {
out[label.TopologySubzone.Name] = subzone
}
if len(clusterID) > 0 {
out[label.TopologyCluster.Name] = clusterID.String()
}
if len(k8sNode) > 0 {
out[LabelHostname] = k8sNode
}
if len(networkID) > 0 {
out[label.TopologyNetwork.Name] = networkID.String()
}
return out
}
// SplitLocalityLabel splits a locality label into region, zone and subzone strings.
func SplitLocalityLabel(locality string) (region, zone, subzone string) {
items := strings.Split(locality, "/")
switch len(items) {
case 1:
return items[0], "", ""
case 2:
return items[0], items[1], ""
default:
return items[0], items[1], items[2]
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workloadinstances
import (
"sync"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/util/sets"
)
// Index reprensents an index over workload instances from workload entries.
//
// Indexes are thread-safe.
type Index interface {
// Insert adds/updates given workload instance to the index.
//
// Returns previous value in the index, or nil otherwise.
Insert(*model.WorkloadInstance) *model.WorkloadInstance
// Delete removes given workload instance from the index.
//
// Returns value removed from the index, or nil otherwise.
Delete(*model.WorkloadInstance) *model.WorkloadInstance
// GetByIP returns a list of all workload instances associated with a
// given IP address. The list is ordered by namespace/name.
//
// There are several use cases where multiple workload instances might
// have the same IP address:
// 1) there are multiple Istio Proxies running on a single host, e.g.
// in 'router' mode or even in 'sidecar' mode.
// 2) workload instances have the same IP but different networks
GetByIP(string) []*model.WorkloadInstance
// Empty returns whether the index is empty.
Empty() bool
// ForEach iterates over all workload instances in the index.
ForEach(func(*model.WorkloadInstance))
}
// indexKey returns index key for a given workload instance.
func indexKey(wi *model.WorkloadInstance) string {
return wi.Namespace + "/" + wi.Name
}
// NewIndex returns a new Index instance.
func NewIndex() Index {
return &index{
keyFunc: indexKey,
keyToInstance: make(map[string]*model.WorkloadInstance),
ipToKeys: make(MultiValueMap),
}
}
// index implements Index.
type index struct {
mu sync.RWMutex
// key function
keyFunc func(*model.WorkloadInstance) string
// map of namespace/name -> workload instance
keyToInstance map[string]*model.WorkloadInstance
// map of ip -> set of namespace/name
ipToKeys MultiValueMap
}
// Insert implements Index.
func (i *index) Insert(wi *model.WorkloadInstance) *model.WorkloadInstance {
i.mu.Lock()
defer i.mu.Unlock()
key := i.keyFunc(wi)
// Check to see if the workload entry changed. If it did, clear the old entry
previous := i.keyToInstance[key]
if previous != nil && previous.Endpoint.Address != wi.Endpoint.Address {
i.ipToKeys.Delete(previous.Endpoint.Address, key)
}
i.keyToInstance[key] = wi
if wi.Endpoint.Address != "" {
i.ipToKeys.Insert(wi.Endpoint.Address, key)
}
return previous
}
// Delete implements Index.
func (i *index) Delete(wi *model.WorkloadInstance) *model.WorkloadInstance {
i.mu.Lock()
defer i.mu.Unlock()
key := i.keyFunc(wi)
previous := i.keyToInstance[key]
if previous != nil {
i.ipToKeys.Delete(previous.Endpoint.Address, key)
}
i.ipToKeys.Delete(wi.Endpoint.Address, key)
delete(i.keyToInstance, key)
return previous
}
// GetByIP implements Index.
func (i *index) GetByIP(ip string) []*model.WorkloadInstance {
i.mu.RLock()
defer i.mu.RUnlock()
keys := i.ipToKeys[ip]
if len(keys) == 0 {
return nil
}
instances := make([]*model.WorkloadInstance, 0, len(keys))
for _, key := range sets.SortedList(keys) {
if instance, exists := i.keyToInstance[key]; exists {
instances = append(instances, instance)
}
}
return instances
}
// Empty implements Index.
func (i *index) Empty() bool {
i.mu.RLock()
defer i.mu.RUnlock()
return len(i.keyToInstance) == 0
}
// ForEach iterates over all workload instances in the index.
func (i *index) ForEach(fn func(*model.WorkloadInstance)) {
i.mu.RLock()
defer i.mu.RUnlock()
for _, instance := range i.keyToInstance {
fn(instance)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workloadinstances
import (
"istio.io/istio/pkg/util/sets"
)
// MultiValueMap represents a map where each key might be associated with
// multiple values.
type MultiValueMap map[string]sets.String
// Insert adds given (key, value) pair into the map.
func (m MultiValueMap) Insert(key, value string) MultiValueMap {
sets.InsertOrNew(m, key, value)
return m
}
// Delete removes given (key, value) pair out of the map.
func (m MultiValueMap) Delete(key, value string) MultiValueMap {
sets.DeleteCleanupLast(m, key, value)
return m
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workloadinstances
import (
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/labels"
)
// ByServiceSelector returns a predicate that matches workload instances
// of a given service.
func ByServiceSelector(namespace string, selector labels.Instance) func(*model.WorkloadInstance) bool {
return func(wi *model.WorkloadInstance) bool {
return wi.Namespace == namespace && selector.Match(wi.Endpoint.Labels)
}
}
// FindAllInIndex returns a list of workload instances in the index
// that match given predicate.
//
// The returned list is not ordered.
func FindAllInIndex(index Index, predicate func(*model.WorkloadInstance) bool) []*model.WorkloadInstance {
var instances []*model.WorkloadInstance
index.ForEach(func(instance *model.WorkloadInstance) {
if predicate(instance) {
instances = append(instances, instance)
}
})
return instances
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package workloadinstances
import (
"strings"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/slices"
)
// FindInstance returns the first workload instance matching given predicate.
func FindInstance(instances []*model.WorkloadInstance, predicate func(*model.WorkloadInstance) bool) *model.WorkloadInstance {
for _, instance := range instances {
if predicate(instance) {
return instance
}
}
return nil
}
// InstanceNameForProxy returns a name of the workload instance that
// corresponds to a given proxy, if any.
func InstanceNameForProxy(proxy *model.Proxy) types.NamespacedName {
parts := strings.Split(proxy.ID, ".")
if len(parts) == 2 && proxy.ConfigNamespace == parts[1] {
return types.NamespacedName{Name: parts[0], Namespace: parts[1]}
}
return types.NamespacedName{}
}
// GetInstanceForProxy returns a workload instance that
// corresponds to a given proxy, if any.
func GetInstanceForProxy(index Index, proxy *model.Proxy, proxyIP string) *model.WorkloadInstance {
if !slices.Contains(proxy.IPAddresses, proxyIP) {
return nil
}
instances := index.GetByIP(proxyIP) // list is ordered by namespace/name
if len(instances) == 0 {
return nil
}
if len(instances) == 1 { // dominant use case
// NOTE: for the sake of backwards compatibility, we don't enforce
// instance.Namespace == proxy.ConfigNamespace
return instances[0]
}
// try to find workload instance with the same name as proxy
proxyName := InstanceNameForProxy(proxy)
if proxyName != (types.NamespacedName{}) {
instance := FindInstance(instances, func(wi *model.WorkloadInstance) bool {
return wi.Name == proxyName.Name && wi.Namespace == proxyName.Namespace
})
if instance != nil {
return instance
}
}
// try to find workload instance in the same namespace as proxy
instance := FindInstance(instances, func(wi *model.WorkloadInstance) bool {
// TODO: take auto-registration group into account once it's included into workload instance
return wi.Namespace == proxy.ConfigNamespace
})
if instance != nil {
return instance
}
// fall back to choosing one of the workload instances
// NOTE: for the sake of backwards compatibility, we don't enforce
// instance.Namespace == proxy.ConfigNamespace
return instances[0]
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xdsfake
import (
"sort"
"strings"
"time"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/test"
)
// NewFakeXDS creates a XdsUpdater reporting events via a channel.
func NewFakeXDS() *Updater {
return &Updater{
Events: make(chan Event, 100),
}
}
// NewWithDelegate creates a XdsUpdater reporting events via a channel.
func NewWithDelegate(delegate model.XDSUpdater) *Updater {
return &Updater{
Events: make(chan Event, 100),
Delegate: delegate,
}
}
// Updater is used to test the registry.
type Updater struct {
// Events tracks notifications received by the updater
Events chan Event
Delegate model.XDSUpdater
}
var _ model.XDSUpdater = &Updater{}
func (fx *Updater) ConfigUpdate(req *model.PushRequest) {
names := []string{}
if req != nil && len(req.ConfigsUpdated) > 0 {
for key := range req.ConfigsUpdated {
names = append(names, key.Name)
}
}
sort.Strings(names)
id := strings.Join(names, ",")
event := "xds"
if req.Full {
event += " full"
}
select {
case fx.Events <- Event{Type: event, ID: id, Reason: req.Reason}:
default:
}
if fx.Delegate != nil {
fx.Delegate.ConfigUpdate(req)
}
}
func (fx *Updater) ProxyUpdate(c cluster.ID, ip string) {
select {
case fx.Events <- Event{Type: "proxy", ID: ip}:
default:
}
if fx.Delegate != nil {
fx.Delegate.ProxyUpdate(c, ip)
}
}
// Event is used to watch XdsEvents
type Event struct {
// Type of the event
Type string
// The id of the event
ID string
Reason model.ReasonStats
Namespace string
// The endpoints associated with an EDS push if any
Endpoints []*model.IstioEndpoint
// EndpointCount, used in matches only
EndpointCount int
}
func (fx *Updater) EDSUpdate(c model.ShardKey, hostname string, ns string, entry []*model.IstioEndpoint) {
select {
case fx.Events <- Event{Type: "eds", ID: hostname, Endpoints: entry, Namespace: ns}:
default:
}
if fx.Delegate != nil {
fx.Delegate.EDSUpdate(c, hostname, ns, entry)
}
}
func (fx *Updater) EDSCacheUpdate(c model.ShardKey, hostname, ns string, entry []*model.IstioEndpoint) {
select {
case fx.Events <- Event{Type: "eds cache", ID: hostname, Endpoints: entry, Namespace: ns}:
default:
}
if fx.Delegate != nil {
fx.Delegate.EDSCacheUpdate(c, hostname, ns, entry)
}
}
// SvcUpdate is called when a service port mapping definition is updated.
// This interface is WIP - labels, annotations and other changes to service may be
// updated to force a EDS and CDS recomputation and incremental push, as it doesn't affect
// LDS/RDS.
func (fx *Updater) SvcUpdate(c model.ShardKey, hostname string, ns string, ev model.Event) {
select {
case fx.Events <- Event{Type: "service", ID: hostname, Namespace: ns}:
default:
}
if fx.Delegate != nil {
fx.Delegate.SvcUpdate(c, hostname, ns, ev)
}
}
func (fx *Updater) RemoveShard(shardKey model.ShardKey) {
select {
case fx.Events <- Event{Type: "removeShard", ID: shardKey.String()}:
default:
}
if fx.Delegate != nil {
fx.Delegate.RemoveShard(shardKey)
}
}
func (fx *Updater) WaitOrFail(t test.Failer, et string) *Event {
t.Helper()
delay := time.NewTimer(time.Second * 5)
defer delay.Stop()
for {
select {
case e := <-fx.Events:
if e.Type == et {
return &e
}
log.Infof("skipping event %q want %q", e.Type, et)
continue
case <-delay.C:
t.Fatalf("timed out waiting for %v", et)
}
}
}
// MatchOrFail expects the provided events to arrive, skipping unmatched events
func (fx *Updater) MatchOrFail(t test.Failer, events ...Event) {
t.Helper()
fx.matchOrFail(t, false, events...)
}
// StrictMatchOrFail expects the provided events to arrive, and nothing else
func (fx *Updater) StrictMatchOrFail(t test.Failer, events ...Event) {
t.Helper()
fx.matchOrFail(t, true, events...)
}
func (fx *Updater) matchOrFail(t test.Failer, strict bool, events ...Event) {
t.Helper()
delay := time.NewTimer(time.Second * 5)
defer delay.Stop()
for {
if len(events) == 0 {
return
}
select {
case e := <-fx.Events:
found := false
for i, want := range events {
if e.Type == want.Type &&
(want.ID == "" || e.ID == want.ID) &&
(want.Namespace == "" || want.Namespace == e.Namespace) &&
(want.EndpointCount == 0 || want.EndpointCount == len(e.Endpoints)) {
// Matched - delete event from desired
events = slices.Delete(events, i)
found = true
break
}
}
if !found {
if strict {
t.Fatalf("unexpected event %q/%v", e.Type, e.ID)
} else {
log.Infof("skipping event %q/%v", e.Type, e.ID)
}
}
continue
case <-delay.C:
t.Fatalf("timed out waiting for %v", events)
}
}
}
// Clear any pending event
func (fx *Updater) Clear() {
wait := true
for wait {
select {
case <-fx.Events:
default:
wait = false
}
}
}
// AssertEmpty ensures there are no events in the channel
func (fx *Updater) AssertEmpty(t test.Failer, dur time.Duration) {
if dur == 0 {
select {
case e := <-fx.Events:
t.Fatalf("got unexpected event %+v", e)
default:
}
} else {
select {
case e := <-fx.Events:
t.Fatalf("got unexpected event %+v", e)
case <-time.After(dur):
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package distribution
import (
"strconv"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/ledger"
)
func tryLedgerPut(configLedger ledger.Ledger, obj config.Config) {
key := obj.Key()
if _, err := configLedger.Put(key, strconv.FormatInt(obj.Generation, 10)); err != nil {
scope.Errorf("Failed to update %s in ledger, status will be out of date.", key)
}
}
func tryLedgerDelete(configLedger ledger.Ledger, obj config.Config) {
key := obj.Key()
if err := configLedger.Delete(key); err != nil {
scope.Errorf("Failed to delete %s in ledger, status will be out of date.", key)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package distribution
import (
"gopkg.in/yaml.v2"
)
type Report struct {
Reporter string `json:"reporter"`
DataPlaneCount int `json:"dataPlaneCount"`
InProgressResources map[string]int `json:"inProgressResources"`
}
func ReportFromYaml(content []byte) (Report, error) {
out := Report{}
err := yaml.Unmarshal(content, &out)
return out, err
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package distribution
import (
"context"
"fmt"
"sync"
"time"
"gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/utils/clock"
"istio.io/istio/pilot/pkg/status"
"istio.io/istio/pilot/pkg/xds"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/ledger"
"istio.io/istio/pkg/util/sets"
)
func GenStatusReporterMapKey(conID string, distributionType xds.EventType) string {
key := conID + "~" + distributionType
return key
}
type inProgressEntry struct {
// the resource, including resourceVersion, we are currently tracking
status.Resource
// the number of reports we have written with this resource at 100%
completedIterations int
}
type Reporter struct {
mu sync.RWMutex
// map from connection id to latest nonce
status map[string]string
// map from nonce to connection ids for which it is current
// using map[string]struct to approximate a hashset
reverseStatus map[string]sets.String
inProgressResources map[string]*inProgressEntry
client v1.ConfigMapInterface
cm *corev1.ConfigMap
UpdateInterval time.Duration
PodName string
clock clock.Clock
ledger ledger.Ledger
distributionEventQueue chan distributionEvent
controller *Controller
}
var _ xds.DistributionStatusCache = &Reporter{}
const (
labelKey = "internal.istio.io/distribution-report"
dataField = "distribution-report"
)
// Init starts all the read only features of the reporter, used for nonce generation
// and responding to istioctl wait.
func (r *Reporter) Init(ledger ledger.Ledger, stop <-chan struct{}) {
r.ledger = ledger
if r.clock == nil {
r.clock = clock.RealClock{}
}
r.distributionEventQueue = make(chan distributionEvent, 100_000)
r.status = make(map[string]string)
r.reverseStatus = make(map[string]sets.String)
r.inProgressResources = make(map[string]*inProgressEntry)
go r.readFromEventQueue(stop)
}
// Start starts the reporter, which watches dataplane ack's and resource changes so that it can update status leader
// with distribution information.
func (r *Reporter) Start(clientSet kubernetes.Interface, namespace string, podname string, stop <-chan struct{}) {
scope.Info("Starting status follower controller")
r.client = clientSet.CoreV1().ConfigMaps(namespace)
r.cm = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: r.PodName + "-distribution",
Labels: map[string]string{labelKey: "true"},
},
Data: make(map[string]string),
}
t := r.clock.Tick(r.UpdateInterval)
ctx := status.NewIstioContext(stop)
x, err := clientSet.CoreV1().Pods(namespace).Get(ctx, podname, metav1.GetOptions{})
if err != nil {
scope.Errorf("can't identify pod %s context: %s", podname, err)
} else {
r.cm.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(x, corev1.SchemeGroupVersion.WithKind("Pod")),
}
}
go func() {
for {
select {
case <-ctx.Done():
if r.cm != nil {
// TODO: is the use of a cancelled context here a problem? Maybe set a short timeout context?
if err := r.client.Delete(context.Background(), r.cm.Name, metav1.DeleteOptions{}); err != nil {
scope.Errorf("failed to properly clean up distribution report: %v", err)
}
}
close(r.distributionEventQueue)
return
case <-t:
// TODO, check if report is necessary? May already be handled by client
r.writeReport(ctx)
}
}
}()
}
// build a distribution report to send to status leader
func (r *Reporter) buildReport() (Report, []status.Resource) {
r.mu.RLock()
defer r.mu.RUnlock()
var finishedResources []status.Resource
out := Report{
Reporter: r.PodName,
DataPlaneCount: len(r.status),
InProgressResources: map[string]int{},
}
// for every resource in flight
for _, ipr := range r.inProgressResources {
res := ipr.Resource
key := res.String()
// for every version (nonce) of the config currently in play
for nonce, dataplanes := range r.reverseStatus {
// check to see if this version of the config contains this version of the resource
// it might be more optimal to provide for a full dump of the config at a certain version?
dpVersion, err := r.ledger.GetPreviousValue(nonce, res.ToModelKey())
if err == nil && dpVersion == res.Generation {
if _, ok := out.InProgressResources[key]; !ok {
out.InProgressResources[key] = len(dataplanes)
} else {
out.InProgressResources[key] += len(dataplanes)
}
} else if err != nil {
scope.Errorf("Encountered error retrieving version %s of key %s from Store: %v", nonce, key, err)
continue
} else if nonce == r.ledger.RootHash() {
scope.Warnf("Cache appears to be missing latest version of %s", key)
}
if out.InProgressResources[key] >= out.DataPlaneCount {
// if this resource is done reconciling, let's not worry about it anymore
finishedResources = append(finishedResources, res)
// deleting it here doesn't work because we have a read lock and are inside an iterator.
// TODO: this will leak when a resource never reaches 100% before it is replaced.
// TODO: do deletes propagate through this thing?
}
}
}
return out, finishedResources
}
// For efficiency, we don't want to be checking on resources that have already reached 100% distribution.
// When this happens, we remove them from our watch list.
func (r *Reporter) removeCompletedResource(completedResources []status.Resource) {
r.mu.Lock()
defer r.mu.Unlock()
var toDelete []status.Resource
for _, item := range completedResources {
// TODO: handle cache miss
// if cache miss, need to skip current loop, otherwise is will cause errors like
// invalid memory address or nil pointer dereference
if _, ok := r.inProgressResources[item.ToModelKey()]; !ok {
continue
}
total := r.inProgressResources[item.ToModelKey()].completedIterations + 1
if int64(total) > (time.Minute.Milliseconds() / r.UpdateInterval.Milliseconds()) {
// remove from inProgressResources
// TODO: cleanup completedResources
toDelete = append(toDelete, item)
} else {
r.inProgressResources[item.ToModelKey()].completedIterations = total
}
}
for _, resource := range toDelete {
delete(r.inProgressResources, resource.ToModelKey())
}
}
// AddInProgressResource must be called every time a resource change is detected by pilot. This allows us to lookup
// only the resources we expect to be in flight, not the ones that have already distributed
func (r *Reporter) AddInProgressResource(res config.Config) {
tryLedgerPut(r.ledger, res)
myRes := status.ResourceFromModelConfig(res)
if myRes == (status.Resource{}) {
scope.Errorf("Unable to locate schema for %v, will not update status.", res)
return
}
r.mu.Lock()
defer r.mu.Unlock()
r.inProgressResources[myRes.ToModelKey()] = &inProgressEntry{
Resource: myRes,
completedIterations: 0,
}
}
func (r *Reporter) DeleteInProgressResource(res config.Config) {
tryLedgerDelete(r.ledger, res)
if r.controller != nil {
r.controller.configDeleted(res)
}
r.mu.Lock()
defer r.mu.Unlock()
delete(r.inProgressResources, res.Key())
}
// generate a distribution report and write it to a ConfigMap for the leader to read.
func (r *Reporter) writeReport(ctx context.Context) {
report, finishedResources := r.buildReport()
go r.removeCompletedResource(finishedResources)
// write to kubernetes here.
reportbytes, err := yaml.Marshal(report)
if err != nil {
scope.Errorf("Error serializing Distribution Report: %v", err)
return
}
r.cm.Data[dataField] = string(reportbytes)
// TODO: short circuit this write in the leader
_, err = CreateOrUpdateConfigMap(ctx, r.cm, r.client)
if err != nil {
scope.Errorf("Error writing Distribution Report: %v", err)
}
}
// CreateOrUpdateConfigMap is lifted with few modifications from kubeadm's apiclient
func CreateOrUpdateConfigMap(ctx context.Context, cm *corev1.ConfigMap, client v1.ConfigMapInterface) (res *corev1.ConfigMap, err error) {
if res, err = client.Create(ctx, cm, metav1.CreateOptions{}); err != nil {
if !kerrors.IsAlreadyExists(err) {
scope.Errorf("%v", err)
return nil, fmt.Errorf("unable to create ConfigMap: %w", err)
}
if res, err = client.Update(context.TODO(), cm, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("unable to update ConfigMap: %w", err)
}
}
return res, nil
}
type distributionEvent struct {
conID string
distributionType xds.EventType
nonce string
}
func (r *Reporter) QueryLastNonce(conID string, distributionType xds.EventType) (noncePrefix string) {
key := GenStatusReporterMapKey(conID, distributionType)
r.mu.RLock()
defer r.mu.RUnlock()
return r.status[key]
}
// Register that a dataplane has acknowledged a new version of the config.
// Theoretically, we could use the ads connections themselves to harvest this data,
// but the mutex there is pretty hot, and it seems best to trade memory for time.
func (r *Reporter) RegisterEvent(conID string, distributionType xds.EventType, nonce string) {
if nonce == "" {
return
}
// Skip unsupported event types. This ensures we do not leak memory for types
// which may not be handled properly. For example, a type not in AllEventTypes
// will not be properly unregistered.
if _, f := xds.AllTrackingEventTypes[distributionType]; !f {
return
}
d := distributionEvent{nonce: nonce, distributionType: distributionType, conID: conID}
select {
case r.distributionEventQueue <- d:
return
default:
scope.Errorf("Distribution Event Queue overwhelmed, status will be invalid.")
}
}
func (r *Reporter) readFromEventQueue(stop <-chan struct{}) {
for {
select {
case ev := <-r.distributionEventQueue:
// TODO might need to batch this to prevent lock contention
r.processEvent(ev.conID, ev.distributionType, ev.nonce)
case <-stop:
return
}
}
}
func (r *Reporter) processEvent(conID string, distributionType xds.EventType, nonce string) {
r.mu.Lock()
defer r.mu.Unlock()
key := GenStatusReporterMapKey(conID, distributionType)
r.deleteKeyFromReverseMap(key)
var version string
if len(nonce) > 12 {
version = nonce[:xds.VersionLen]
} else {
version = nonce
}
// touch
r.status[key] = version
sets.InsertOrNew(r.reverseStatus, version, key)
}
// This is a helper function for keeping our reverseStatus map in step with status.
// must have write lock before calling.
func (r *Reporter) deleteKeyFromReverseMap(key string) {
if old, ok := r.status[key]; ok {
sets.DeleteCleanupLast(r.reverseStatus, old, key)
}
}
// RegisterDisconnect : when a dataplane disconnects, we should no longer count it, nor expect it to ack config.
func (r *Reporter) RegisterDisconnect(conID string, types sets.Set[xds.EventType]) {
r.mu.Lock()
defer r.mu.Unlock()
for xdsType := range types {
key := GenStatusReporterMapKey(conID, xdsType)
r.deleteKeyFromReverseMap(key)
delete(r.status, key)
}
}
func (r *Reporter) SetController(controller *Controller) {
r.controller = controller
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package distribution
import (
"fmt"
"strings"
"sync"
"time"
"google.golang.org/protobuf/types/known/timestamppb"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/utils/clock"
"istio.io/api/meta/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/status"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/log"
)
var scope = log.RegisterScope("status",
"CRD distribution status debugging")
type Progress struct {
AckedInstances int
TotalInstances int
}
func (p *Progress) PlusEquals(p2 Progress) {
p.TotalInstances += p2.TotalInstances
p.AckedInstances += p2.AckedInstances
}
type Controller struct {
configStore model.ConfigStore
mu sync.RWMutex
CurrentState map[status.Resource]map[string]Progress
ObservationTime map[string]time.Time
UpdateInterval time.Duration
dynamicClient dynamic.Interface
clock clock.Clock
workers *status.Controller
StaleInterval time.Duration
cmInformer cache.SharedIndexInformer
cmHandle cache.ResourceEventHandlerRegistration
}
func NewController(restConfig *rest.Config, namespace string, cs model.ConfigStore, m *status.Manager) *Controller {
c := &Controller{
CurrentState: make(map[status.Resource]map[string]Progress),
ObservationTime: make(map[string]time.Time),
UpdateInterval: 200 * time.Millisecond,
StaleInterval: time.Minute,
clock: clock.RealClock{},
configStore: cs,
workers: m.CreateIstioStatusController(func(status *v1alpha1.IstioStatus, context any) *v1alpha1.IstioStatus {
if status == nil {
return nil
}
distributionState := context.(Progress)
if needsReconcile, desiredStatus := ReconcileStatuses(status, distributionState); needsReconcile {
return desiredStatus
}
return status
}),
}
// client-go defaults to 5 QPS, with 10 Boost, which is insufficient for updating status on all the config
// in the mesh. These values can be configured using environment variables for tuning (see pilot/pkg/features)
restConfig.QPS = float32(features.StatusQPS)
restConfig.Burst = features.StatusBurst
var err error
if c.dynamicClient, err = dynamic.NewForConfig(restConfig); err != nil {
scope.Fatalf("Could not connect to kubernetes: %s", err)
}
// configmap informer
i := informers.NewSharedInformerFactoryWithOptions(kubernetes.NewForConfigOrDie(restConfig), 1*time.Minute,
informers.WithNamespace(namespace),
informers.WithTweakListOptions(func(listOptions *metav1.ListOptions) {
listOptions.LabelSelector = labels.Set(map[string]string{labelKey: "true"}).AsSelector().String()
})).
Core().V1().ConfigMaps()
c.cmInformer = i.Informer()
c.cmHandle, _ = c.cmInformer.AddEventHandler(&DistroReportHandler{dc: c})
return c
}
func (c *Controller) Start(stop <-chan struct{}) {
scope.Info("Starting status leader controller")
// this will list all existing configmaps, as well as updates, right?
go c.cmInformer.Run(stop)
// create Status Writer
t := c.clock.Tick(c.UpdateInterval)
for {
select {
case <-stop:
_ = c.cmInformer.RemoveEventHandler(c.cmHandle)
return
case <-t:
staleReporters := c.writeAllStatus()
if len(staleReporters) > 0 {
c.removeStaleReporters(staleReporters)
}
}
}
}
func (c *Controller) handleReport(d Report) {
defer c.mu.Unlock()
c.mu.Lock()
for resstr := range d.InProgressResources {
res := *status.ResourceFromString(resstr)
if _, ok := c.CurrentState[res]; !ok {
c.CurrentState[res] = make(map[string]Progress)
}
c.CurrentState[res][d.Reporter] = Progress{d.InProgressResources[resstr], d.DataPlaneCount}
}
c.ObservationTime[d.Reporter] = c.clock.Now()
}
func (c *Controller) writeAllStatus() (staleReporters []string) {
defer c.mu.RUnlock()
c.mu.RLock()
for config, fractions := range c.CurrentState {
if !strings.HasSuffix(config.Group, "istio.io") {
// don't try to write status for non-istio types
continue
}
var distributionState Progress
for reporter, w := range fractions {
// check for stale data here
if c.clock.Since(c.ObservationTime[reporter]) > c.StaleInterval {
scope.Warnf("Status reporter %s has not been heard from since %v, deleting report.",
reporter, c.ObservationTime[reporter])
staleReporters = append(staleReporters, reporter)
} else {
distributionState.PlusEquals(w)
}
}
if distributionState.TotalInstances > 0 { // this is necessary when all reports are stale.
c.queueWriteStatus(config, distributionState)
}
}
return
}
func (c *Controller) removeStaleReporters(staleReporters []string) {
defer c.mu.Unlock()
c.mu.Lock()
for key, fractions := range c.CurrentState {
for _, staleReporter := range staleReporters {
delete(fractions, staleReporter)
}
c.CurrentState[key] = fractions
}
}
func (c *Controller) queueWriteStatus(config status.Resource, state Progress) {
c.workers.EnqueueStatusUpdateResource(state, config)
}
func (c *Controller) configDeleted(res config.Config) {
r := status.ResourceFromModelConfig(res)
c.workers.Delete(r)
}
func boolToConditionStatus(b bool) string {
if b {
return "True"
}
return "False"
}
func ReconcileStatuses(current *v1alpha1.IstioStatus, desired Progress) (bool, *v1alpha1.IstioStatus) {
needsReconcile := false
desiredCondition := v1alpha1.IstioCondition{
Type: "Reconciled",
Status: boolToConditionStatus(desired.AckedInstances == desired.TotalInstances),
LastProbeTime: timestamppb.Now(),
LastTransitionTime: timestamppb.Now(),
Message: fmt.Sprintf("%d/%d proxies up to date.", desired.AckedInstances, desired.TotalInstances),
}
current = current.DeepCopy()
var currentCondition *v1alpha1.IstioCondition
conditionIndex := -1
for i, c := range current.Conditions {
if c.Type == "Reconciled" {
currentCondition = current.Conditions[i]
conditionIndex = i
break
}
}
if currentCondition == nil ||
currentCondition.Message != desiredCondition.Message ||
currentCondition.Status != desiredCondition.Status {
needsReconcile = true
}
if conditionIndex > -1 {
current.Conditions[conditionIndex] = &desiredCondition
} else {
current.Conditions = append(current.Conditions, &desiredCondition)
}
return needsReconcile, current
}
type DistroReportHandler struct {
dc *Controller
}
func (drh *DistroReportHandler) OnAdd(obj any, _ bool) {
drh.HandleNew(obj)
}
func (drh *DistroReportHandler) OnUpdate(oldObj, newObj any) {
drh.HandleNew(newObj)
}
func (drh *DistroReportHandler) HandleNew(obj any) {
cm, ok := obj.(*v1.ConfigMap)
if !ok {
scope.Warnf("expected configmap, but received %v, discarding", obj)
return
}
rptStr := cm.Data[dataField]
scope.Debugf("using report: %s", rptStr)
dr, err := ReportFromYaml([]byte(cm.Data[dataField]))
if err != nil {
scope.Warnf("received malformed distributionReport %s, discarding: %v", cm.Name, err)
return
}
drh.dc.handleReport(dr)
}
func (drh *DistroReportHandler) OnDelete(obj any) {
// TODO: what do we do here? will these ever be deleted?
}
/*
Copyright Istio Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package status
import (
"istio.io/api/meta/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvk"
)
// Manager allows multiple controllers to provide input into configuration
// status without needlessly doubling the number of writes, or overwriting
// one another. Each status controller calls newController, passing in
// an arbitrary status modification function, and then calls EnqueueStatusUpdate
// when an individual resource is ready to be updated with the relevant data.
type Manager struct {
// TODO: is Resource the right abstraction?
store model.ConfigStore
workers WorkerQueue
}
func NewManager(store model.ConfigStore) *Manager {
writeFunc := func(m *config.Config, istatus any) {
scope.Debugf("writing status for resource %s/%s", m.Namespace, m.Name)
status := istatus.(GenerationProvider)
m.Status = status.Unwrap()
_, err := store.UpdateStatus(*m)
if err != nil {
// TODO: need better error handling
scope.Errorf("Encountered unexpected error updating status for %v, will try again later: %s", m, err)
return
}
}
retrieveFunc := func(resource Resource) *config.Config {
scope.Debugf("retrieving config for status update: %s/%s", resource.Namespace, resource.Name)
k, ok := gvk.FromGVR(resource.GroupVersionResource)
if !ok {
scope.Warnf("GVR %v could not be identified", resource.GroupVersionResource)
return nil
}
current := store.Get(k, resource.Name, resource.Namespace)
return current
}
return &Manager{
store: store,
workers: NewWorkerPool(writeFunc, retrieveFunc, uint(features.StatusMaxWorkers)),
}
}
func (m *Manager) Start(stop <-chan struct{}) {
scope.Info("Starting status manager")
ctx := NewIstioContext(stop)
m.workers.Run(ctx)
}
// CreateGenericController provides an interface for a status update function to be
// called in series with other controllers, minimizing the number of actual
// api server writes sent from various status controllers. The UpdateFunc
// must take the target resource status and arbitrary context information as
// parameters, and return the updated status value. Multiple controllers
// will be called in series, so the input status may not have been written
// to the API server yet, and the output status may be modified by other
// controllers before it is written to the server.
func (m *Manager) CreateGenericController(fn UpdateFunc) *Controller {
result := &Controller{
fn: fn,
workers: m.workers,
}
return result
}
func (m *Manager) CreateIstioStatusController(fn func(status *v1alpha1.IstioStatus, context any) *v1alpha1.IstioStatus) *Controller {
wrapper := func(status any, context any) GenerationProvider {
var input *v1alpha1.IstioStatus
if status != nil {
converted := status.(*IstioGenerationProvider)
input = converted.IstioStatus
}
result := fn(input, context)
return &IstioGenerationProvider{result}
}
result := &Controller{
fn: wrapper,
workers: m.workers,
}
return result
}
type UpdateFunc func(status any, context any) GenerationProvider
type Controller struct {
fn UpdateFunc
workers WorkerQueue
}
// EnqueueStatusUpdateResource informs the manager that this controller would like to
// update the status of target, using the information in context. Once the status
// workers are ready to perform this update, the controller's UpdateFunc
// will be called with target and context as input.
func (c *Controller) EnqueueStatusUpdateResource(context any, target Resource) {
// TODO: buffer this with channel
c.workers.Push(target, c, context)
}
func (c *Controller) Delete(r Resource) {
c.workers.Delete(r)
}
/*
Copyright Istio Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package status
import (
"context"
"fmt"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/runtime/schema"
"istio.io/api/meta/v1alpha1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/log"
)
var scope = log.RegisterScope("status",
"status controller for istio")
func ResourceFromString(s string) *Resource {
pieces := strings.Split(s, "/")
if len(pieces) != 6 {
scope.Errorf("cannot unmarshal %s into resource identifier", s)
return nil
}
return &Resource{
GroupVersionResource: schema.GroupVersionResource{
Group: pieces[0],
Version: pieces[1],
Resource: pieces[2],
},
Namespace: pieces[3],
Name: pieces[4],
Generation: pieces[5],
}
}
// TODO: maybe replace with a kubernetes resource identifier, if that's a thing
type Resource struct {
schema.GroupVersionResource
Namespace string
Name string
Generation string
}
func (r Resource) String() string {
return strings.Join([]string{r.Group, r.Version, r.GroupVersionResource.Resource, r.Namespace, r.Name, r.Generation}, "/")
}
func (r *Resource) ToModelKey() string {
// we have a resource here, but model keys use kind. Use the schema to find the correct kind.
found, _ := collections.All.FindByGroupVersionResource(r.GroupVersionResource)
return config.Key(
found.Group(), found.Version(), found.Kind(),
r.Name, r.Namespace)
}
func ResourceFromMetadata(i resource.Metadata) Resource {
return Resource{
GroupVersionResource: i.Schema.GroupVersionResource(),
Namespace: i.FullName.Namespace.String(),
Name: i.FullName.Name.String(),
Generation: strconv.FormatInt(i.Generation, 10),
}
}
func ResourceFromModelConfig(c config.Config) Resource {
gvr, ok := gvk.ToGVR(c.GroupVersionKind)
if !ok {
return Resource{}
}
return Resource{
GroupVersionResource: gvr,
Namespace: c.Namespace,
Name: c.Name,
Generation: strconv.FormatInt(c.Generation, 10),
}
}
func GetTypedStatus(in any) (out *v1alpha1.IstioStatus, err error) {
if ret, ok := in.(*v1alpha1.IstioStatus); ok {
return ret, nil
}
return nil, fmt.Errorf("cannot cast %T: %v to IstioStatus", in, in)
}
func GetOGProvider(in any) (out GenerationProvider, err error) {
if ret, ok := in.(*v1alpha1.IstioStatus); ok && ret != nil {
return &IstioGenerationProvider{ret}, nil
}
return nil, fmt.Errorf("cannot cast %T: %v to GenerationProvider", in, in)
}
func NewIstioContext(stop <-chan struct{}) context.Context {
ctx, cancel := context.WithCancel(context.Background())
go func() {
<-stop
cancel()
}()
return ctx
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package status
import (
"context"
"strconv"
"sync"
"k8s.io/apimachinery/pkg/runtime/schema"
"istio.io/api/meta/v1alpha1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/util/sets"
)
// Task to be performed.
type Task func(entry cacheEntry)
// WorkerQueue implements an expandable goroutine pool which executes at most one concurrent routine per target
// resource. Multiple calls to Push() will not schedule multiple executions per target resource, but will ensure that
// the single execution uses the latest value.
type WorkerQueue interface {
// Push a task.
Push(target Resource, controller *Controller, context any)
// Run the loop until a signal on the context
Run(ctx context.Context)
// Delete a task
Delete(target Resource)
}
type cacheEntry struct {
// the cacheVale represents the latest version of the resource, including ResourceVersion
cacheResource Resource
// the perControllerStatus represents the latest version of the ResourceStatus
perControllerStatus map[*Controller]any
}
type lockResource struct {
schema.GroupVersionResource
Namespace string
Name string
}
func convert(i Resource) lockResource {
return lockResource{
GroupVersionResource: i.GroupVersionResource,
Namespace: i.Namespace,
Name: i.Name,
}
}
type WorkQueue struct {
// tasks which are not currently executing but need to run
tasks []lockResource
// a lock to govern access to data in the cache
lock sync.Mutex
// for each task, a cacheEntry which can be updated before the task is run so that execution will have latest values
cache map[lockResource]cacheEntry
OnPush func()
}
func (wq *WorkQueue) Push(target Resource, ctl *Controller, progress any) {
wq.lock.Lock()
key := convert(target)
if item, inqueue := wq.cache[key]; inqueue {
item.perControllerStatus[ctl] = progress
wq.cache[key] = item
} else {
wq.cache[key] = cacheEntry{
cacheResource: target,
perControllerStatus: map[*Controller]any{ctl: progress},
}
wq.tasks = append(wq.tasks, key)
}
wq.lock.Unlock()
if wq.OnPush != nil {
wq.OnPush()
}
}
// Pop returns the first item in the queue not in exclusion, along with it's latest progress
func (wq *WorkQueue) Pop(exclusion sets.Set[lockResource]) (target Resource, progress map[*Controller]any) {
wq.lock.Lock()
defer wq.lock.Unlock()
for i := 0; i < len(wq.tasks); i++ {
if !exclusion.Contains(wq.tasks[i]) {
// remove from tasks
t, ok := wq.cache[wq.tasks[i]]
wq.tasks = append(wq.tasks[:i], wq.tasks[i+1:]...)
if !ok {
return Resource{}, nil
}
return t.cacheResource, t.perControllerStatus
}
}
return Resource{}, nil
}
func (wq *WorkQueue) Length() int {
wq.lock.Lock()
defer wq.lock.Unlock()
return len(wq.tasks)
}
func (wq *WorkQueue) Delete(target Resource) {
wq.lock.Lock()
defer wq.lock.Unlock()
delete(wq.cache, convert(target))
}
type WorkerPool struct {
q WorkQueue
// indicates the queue is closing
closing bool
// the function which will be run for each task in queue
write func(*config.Config, any)
// the function to retrieve the initial status
get func(Resource) *config.Config
// current worker routine count
workerCount uint
// maximum worker routine count
maxWorkers uint
currentlyWorking sets.Set[lockResource]
lock sync.Mutex
}
func NewWorkerPool(write func(*config.Config, any), get func(Resource) *config.Config, maxWorkers uint) WorkerQueue {
return &WorkerPool{
write: write,
get: get,
maxWorkers: maxWorkers,
currentlyWorking: sets.New[lockResource](),
q: WorkQueue{
tasks: make([]lockResource, 0),
cache: make(map[lockResource]cacheEntry),
OnPush: nil,
},
}
}
func (wp *WorkerPool) Delete(target Resource) {
wp.q.Delete(target)
}
func (wp *WorkerPool) Push(target Resource, controller *Controller, context any) {
wp.q.Push(target, controller, context)
wp.maybeAddWorker()
}
func (wp *WorkerPool) Run(ctx context.Context) {
context.AfterFunc(ctx, func() {
wp.lock.Lock()
wp.closing = true
wp.lock.Unlock()
})
}
// maybeAddWorker adds a worker unless we are at maxWorkers. Workers exit when there are no more tasks, except for the
// last worker, which stays alive indefinitely.
func (wp *WorkerPool) maybeAddWorker() {
wp.lock.Lock()
if wp.workerCount >= wp.maxWorkers || wp.q.Length() == 0 {
wp.lock.Unlock()
return
}
wp.workerCount++
wp.lock.Unlock()
go func() {
for {
wp.lock.Lock()
if wp.closing || wp.q.Length() == 0 {
wp.workerCount--
wp.lock.Unlock()
return
}
target, perControllerWork := wp.q.Pop(wp.currentlyWorking)
if target == (Resource{}) {
// continue or return?
// could have been deleted, or could be no items in queue not currently worked on. need a way to differentiate.
wp.lock.Unlock()
continue
}
wp.q.Delete(target)
wp.currentlyWorking.Insert(convert(target))
wp.lock.Unlock()
// work should be done without holding the lock
cfg := wp.get(target)
if cfg != nil {
// Check that generation matches
if strconv.FormatInt(cfg.Generation, 10) == target.Generation {
x, err := GetOGProvider(cfg.Status)
if err == nil {
// Not all controllers user generation, so we can ignore errors
x.SetObservedGeneration(cfg.Generation)
}
for c, i := range perControllerWork {
// TODO: this does not guarantee controller order. perhaps it should?
x = c.fn(x, i)
}
wp.write(cfg, x)
}
}
wp.lock.Lock()
wp.currentlyWorking.Delete(convert(target))
wp.lock.Unlock()
}
}()
}
type GenerationProvider interface {
SetObservedGeneration(int64)
Unwrap() any
}
type IstioGenerationProvider struct {
*v1alpha1.IstioStatus
}
func (i *IstioGenerationProvider) SetObservedGeneration(in int64) {
i.ObservedGeneration = in
}
func (i *IstioGenerationProvider) Unwrap() any {
return i.IstioStatus
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trustbundle
import (
"crypto/x509"
"encoding/pem"
"fmt"
"sort"
"strings"
"sync"
"time"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/spiffe"
"istio.io/istio/pkg/util/sets"
)
// Source is all possible sources of MeshConfig
type Source int
const (
SourceIstioCA Source = iota
SourceMeshConfig
SourceIstioRA
sourceSpiffeEndpoints
RemoteDefaultPollPeriod = 30 * time.Minute
)
func (s Source) String() string {
switch s {
case SourceIstioCA:
return "IstioCA"
case SourceMeshConfig:
return "MeshConfig"
case SourceIstioRA:
return "IstioRA"
case sourceSpiffeEndpoints:
return "SpiffeEndpoints"
default:
return "Unknown"
}
}
type TrustAnchorConfig struct {
Certs []string
}
type TrustAnchorUpdate struct {
TrustAnchorConfig
Source Source
}
type TrustBundle struct {
sourceConfig map[Source]TrustAnchorConfig
mutex sync.RWMutex
mergedCerts []string
updatecb func()
endpointMutex sync.RWMutex
endpoints []string
endpointUpdateChan chan struct{}
remoteCaCertPool *x509.CertPool
}
var (
trustBundleLog = log.RegisterScope("trustBundle", "Workload mTLS trust bundle logs")
remoteTimeout = 10 * time.Second
)
// NewTrustBundle returns a new trustbundle
func NewTrustBundle(remoteCaCertPool *x509.CertPool) *TrustBundle {
var err error
tb := &TrustBundle{
sourceConfig: map[Source]TrustAnchorConfig{
SourceIstioCA: {Certs: []string{}},
SourceMeshConfig: {Certs: []string{}},
SourceIstioRA: {Certs: []string{}},
sourceSpiffeEndpoints: {Certs: []string{}},
},
mergedCerts: []string{},
updatecb: nil,
endpointUpdateChan: make(chan struct{}, 1),
endpoints: []string{},
}
if remoteCaCertPool == nil {
tb.remoteCaCertPool, err = x509.SystemCertPool()
if err != nil {
trustBundleLog.Errorf("failed to initialize remote Cert pool: %v", err)
}
} else {
tb.remoteCaCertPool = remoteCaCertPool
}
return tb
}
func (tb *TrustBundle) UpdateCb(updatecb func()) {
tb.updatecb = updatecb
}
// GetTrustBundle : Retrieves all the trustAnchors for current Spiffee Trust Domain
func (tb *TrustBundle) GetTrustBundle() []string {
tb.mutex.RLock()
defer tb.mutex.RUnlock()
trustedCerts := make([]string, len(tb.mergedCerts))
copy(trustedCerts, tb.mergedCerts)
return trustedCerts
}
func verifyTrustAnchor(trustAnchor string) error {
block, _ := pem.Decode([]byte(trustAnchor))
if block == nil {
return fmt.Errorf("failed to decode pem certificate")
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return fmt.Errorf("failed to parse X.509 certificate: %v", err)
}
if !cert.IsCA {
return fmt.Errorf("certificate is not a CA certificate")
}
return nil
}
func (tb *TrustBundle) mergeInternal() {
var mergeCerts []string
certMap := sets.New[string]()
tb.mutex.Lock()
defer tb.mutex.Unlock()
for _, configSource := range tb.sourceConfig {
for _, cert := range configSource.Certs {
if !certMap.InsertContains(cert) {
mergeCerts = append(mergeCerts, cert)
}
}
}
tb.mergedCerts = mergeCerts
sort.Strings(tb.mergedCerts)
}
// UpdateTrustAnchor : External Function to merge a TrustAnchor config with the existing TrustBundle
func (tb *TrustBundle) UpdateTrustAnchor(anchorConfig *TrustAnchorUpdate) error {
var ok bool
var err error
tb.mutex.RLock()
cachedConfig, ok := tb.sourceConfig[anchorConfig.Source]
tb.mutex.RUnlock()
if !ok {
return fmt.Errorf("invalid source of TrustBundle configuration %v", anchorConfig.Source)
}
// Check if anything needs to be changed at all
if slices.Equal(anchorConfig.Certs, cachedConfig.Certs) {
trustBundleLog.Debugf("no change to trustAnchor configuration after recent update")
return nil
}
for _, cert := range anchorConfig.Certs {
err = verifyTrustAnchor(cert)
if err != nil {
return err
}
}
tb.mutex.Lock()
tb.sourceConfig[anchorConfig.Source] = anchorConfig.TrustAnchorConfig
tb.mutex.Unlock()
tb.mergeInternal()
trustBundleLog.Infof("updating Source %v with certs %v",
anchorConfig.Source,
strings.Join(anchorConfig.TrustAnchorConfig.Certs, "\n"))
if tb.updatecb != nil {
tb.updatecb()
}
return nil
}
func (tb *TrustBundle) updateRemoteEndpoint(spiffeEndpoints []string) {
tb.endpointMutex.RLock()
remoteEndpoints := tb.endpoints
tb.endpointMutex.RUnlock()
if slices.Equal(spiffeEndpoints, remoteEndpoints) {
return
}
trustBundleLog.Infof("updated remote endpoints :%v", spiffeEndpoints)
tb.endpointMutex.Lock()
tb.endpoints = spiffeEndpoints
tb.endpointMutex.Unlock()
tb.endpointUpdateChan <- struct{}{}
}
// AddMeshConfigUpdate : Update trustAnchor configurations from meshConfig
func (tb *TrustBundle) AddMeshConfigUpdate(cfg *meshconfig.MeshConfig) error {
var err error
if cfg != nil {
certs := []string{}
endpoints := []string{}
for _, pemCert := range cfg.GetCaCertificates() {
cert := pemCert.GetPem()
if cert != "" {
certs = append(certs, cert)
} else if pemCert.GetSpiffeBundleUrl() != "" {
endpoints = append(endpoints, pemCert.GetSpiffeBundleUrl())
}
}
err = tb.UpdateTrustAnchor(&TrustAnchorUpdate{
TrustAnchorConfig: TrustAnchorConfig{Certs: certs},
Source: SourceMeshConfig,
})
if err != nil {
trustBundleLog.Errorf("failed to update meshConfig PEM trustAnchors: %v", err)
return err
}
tb.updateRemoteEndpoint(endpoints)
}
return nil
}
func (tb *TrustBundle) fetchRemoteTrustAnchors() {
var err error
tb.endpointMutex.RLock()
remoteEndpoints := tb.endpoints
tb.endpointMutex.RUnlock()
remoteCerts := []string{}
currentTrustDomain := spiffe.GetTrustDomain()
for _, endpoint := range remoteEndpoints {
trustDomainAnchorMap, err := spiffe.RetrieveSpiffeBundleRootCerts(
map[string]string{currentTrustDomain: endpoint}, tb.remoteCaCertPool, remoteTimeout)
if err != nil {
trustBundleLog.Errorf("unable to fetch trust Anchors from endpoint %s: %s", endpoint, err)
continue
}
certs := trustDomainAnchorMap[currentTrustDomain]
for _, cert := range certs {
certStr := string(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))
trustBundleLog.Debugf("from endpoint %v, fetched trust anchor cert: %v", endpoint, certStr)
remoteCerts = append(remoteCerts, certStr)
}
}
err = tb.UpdateTrustAnchor(&TrustAnchorUpdate{
TrustAnchorConfig: TrustAnchorConfig{Certs: remoteCerts},
Source: sourceSpiffeEndpoints,
})
if err != nil {
trustBundleLog.Errorf("failed to update meshConfig Spiffe trustAnchors: %v", err)
}
}
func (tb *TrustBundle) ProcessRemoteTrustAnchors(stop <-chan struct{}, pollInterval time.Duration) {
ticker := time.NewTicker(pollInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
trustBundleLog.Infof("waking up to perform periodic checks")
tb.fetchRemoteTrustAnchors()
case <-stop:
trustBundleLog.Infof("stop processing endpoint trustAnchor updates")
return
case <-tb.endpointUpdateChan:
tb.fetchRemoteTrustAnchors()
trustBundleLog.Infof("processing endpoint trustAnchor Updates for config change")
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package informermetric
import (
"sync"
"k8s.io/client-go/tools/cache"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/monitoring"
)
var (
clusterLabel = monitoring.CreateLabel("cluster")
errorMetric = monitoring.NewSum(
"controller_sync_errors_total",
"Total number of errorMetric syncing controllers.",
)
mu sync.RWMutex
handlers = map[cluster.ID]cache.WatchErrorHandler{}
)
// ErrorHandlerForCluster fetches or creates an ErrorHandler that emits a metric
// and logs when a watch error occurs. For use with SetWatchErrorHandler on SharedInformer.
func ErrorHandlerForCluster(clusterID cluster.ID) cache.WatchErrorHandler {
mu.RLock()
handler, ok := handlers[clusterID]
mu.RUnlock()
if ok {
return handler
}
mu.Lock()
defer mu.Unlock()
clusterMetric := errorMetric.With(clusterLabel.Value(clusterID.String()))
h := func(_ *cache.Reflector, err error) {
clusterMetric.Increment()
log.Errorf("watch error in cluster %s: %v", clusterID, err)
}
handlers[clusterID] = h
return h
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package network
import (
"context"
"errors"
"fmt"
"net"
"net/netip"
"strconv"
"time"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/sleep"
)
// Network-related utility functions
const (
waitInterval = 100 * time.Millisecond
waitTimeout = 2 * time.Minute
)
// ip family enum
type IPFamilyType int
const (
IPv4 = iota
IPv6
UNKNOWN
)
type lookupIPAddrType = func(ctx context.Context, addr string) ([]netip.Addr, error)
// ErrResolveNoAddress error occurs when IP address resolution is attempted,
// but no address was provided.
var ErrResolveNoAddress = fmt.Errorf("no address specified")
// GetPrivateIPs blocks until private IP addresses are available, or a timeout is reached.
func GetPrivateIPs(ctx context.Context) ([]string, bool) {
if _, ok := ctx.Deadline(); !ok {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, waitTimeout)
defer cancel()
}
for {
select {
case <-ctx.Done():
return GetPrivateIPsIfAvailable()
default:
addr, ok := GetPrivateIPsIfAvailable()
if ok {
return addr, true
}
sleep.UntilContext(ctx, waitInterval)
}
}
}
// GetPrivateIPsIfAvailable returns all the private IP addresses
func GetPrivateIPsIfAvailable() ([]string, bool) {
ok := true
ipAddresses := make([]string, 0)
ifaces, _ := net.Interfaces()
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 {
continue // loopback interface
}
addrs, _ := iface.Addrs()
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
default:
continue
}
ipAddr, okay := netip.AddrFromSlice(ip)
if !okay {
continue
}
// unwrap the IPv4-mapped IPv6 address
unwrapAddr := ipAddr.Unmap()
if !unwrapAddr.IsValid() || unwrapAddr.IsLoopback() || unwrapAddr.IsLinkLocalUnicast() || unwrapAddr.IsLinkLocalMulticast() {
continue
}
if unwrapAddr.IsUnspecified() {
ok = false
continue
}
ipAddresses = append(ipAddresses, unwrapAddr.String())
}
}
return ipAddresses, ok
}
// ResolveAddr resolves an authority address to an IP address. Incoming
// addr can be an IP address or hostname. If addr is an IPv6 address, the IP
// part must be enclosed in square brackets.
//
// LookupIPAddr() may return multiple IP addresses, of which this function returns
// the first IPv4 entry. To use this function in an IPv6 only environment, either
// provide an IPv6 address or ensure the hostname resolves to only IPv6 addresses.
func ResolveAddr(addr string, lookupIPAddr ...lookupIPAddrType) (string, error) {
if addr == "" {
return "", ErrResolveNoAddress
}
host, port, err := net.SplitHostPort(addr)
if err != nil {
return "", err
}
log.Infof("Attempting to lookup address: %s", host)
defer log.Infof("Finished lookup of address: %s", host)
// lookup the udp address with a timeout of 15 seconds.
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
var addrs []netip.Addr
var lookupErr error
if (len(lookupIPAddr) > 0) && (lookupIPAddr[0] != nil) {
// if there are more than one lookup function, ignore all but first
addrs, lookupErr = lookupIPAddr[0](ctx, host)
} else {
addrs, lookupErr = net.DefaultResolver.LookupNetIP(ctx, "ip", host)
}
if lookupErr != nil || len(addrs) == 0 {
return "", fmt.Errorf("lookup failed for IP address: %w", lookupErr)
}
var resolvedAddr string
for _, addr := range addrs {
// unwrap the IPv4-mapped IPv6 address
unwrapAddr := addr.Unmap()
if !unwrapAddr.IsValid() {
continue
}
pPort, pErr := strconv.ParseUint(port, 10, 16)
if pErr != nil {
continue
}
tmpAddPort := netip.AddrPortFrom(unwrapAddr, uint16(pPort))
resolvedAddr = tmpAddPort.String()
if unwrapAddr.Is4() {
break
}
}
log.Infof("Addr resolved to: %s", resolvedAddr)
return resolvedAddr, nil
}
// AllIPv6 checks the addresses slice and returns true if all addresses
// are valid IPv6 address, for all other cases it returns false.
func AllIPv6(ipAddrs []string) bool {
for i := 0; i < len(ipAddrs); i++ {
addr, err := netip.ParseAddr(ipAddrs[i])
if err != nil {
// Should not happen, invalid IP in proxy's IPAddresses slice should have been caught earlier,
// skip it to prevent a panic.
continue
}
if addr.Is4() {
return false
}
}
return true
}
// AllIPv4 checks the addresses slice and returns true if all addresses
// are valid IPv4 address, for all other cases it returns false.
func AllIPv4(ipAddrs []string) bool {
for i := 0; i < len(ipAddrs); i++ {
addr, err := netip.ParseAddr(ipAddrs[i])
if err != nil {
// Should not happen, invalid IP in proxy's IPAddresses slice should have been caught earlier,
// skip it to prevent a panic.
continue
}
if !addr.Is4() && addr.Is6() {
return false
}
}
return true
}
// CheckIPFamilyTypeForFirstIPs checks the ip family type for the first ip addresses
func CheckIPFamilyTypeForFirstIPs(ipAddrs []string) (IPFamilyType, error) {
if len(ipAddrs) == 0 {
return UNKNOWN, errors.New("the ipAddr slice is empty")
}
netIP, err := netip.ParseAddr(ipAddrs[0])
if err != nil {
return UNKNOWN, err
}
if netIP.Is6() && !netIP.IsLinkLocalUnicast() {
return IPv6, nil
}
return IPv4, nil
}
// GlobalUnicastIP returns the first global unicast address in the passed in addresses.
func GlobalUnicastIP(ipAddrs []string) string {
for i := 0; i < len(ipAddrs); i++ {
addr, err := netip.ParseAddr(ipAddrs[i])
if err != nil {
// Should not happen, invalid IP in proxy's IPAddresses slice should have been caught earlier,
// skip it to prevent a panic.
continue
}
if addr.IsGlobalUnicast() {
return addr.String()
}
}
return ""
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protoconv
import (
"fmt"
udpa "github.com/cncf/xds/go/udpa/type/v1"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/structpb"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/log"
)
// MessageToAnyWithError converts from proto message to proto Any
func MessageToAnyWithError(msg proto.Message) (*anypb.Any, error) {
b, err := marshal(msg)
if err != nil {
return nil, err
}
return &anypb.Any{
// nolint: staticcheck
TypeUrl: "type.googleapis.com/" + string(msg.ProtoReflect().Descriptor().FullName()),
Value: b,
}, nil
}
func marshal(msg proto.Message) ([]byte, error) {
if features.EnableVtprotobuf {
if vt, ok := msg.(vtStrictMarshal); ok {
// Attempt to use more efficient implementation
// "Strict" is the equivalent to Deterministic=true below
return vt.MarshalVTStrict()
}
}
// If not available, fallback to normal implementation
return proto.MarshalOptions{Deterministic: true}.Marshal(msg)
}
// MessageToAny converts from proto message to proto Any
func MessageToAny(msg proto.Message) *anypb.Any {
out, err := MessageToAnyWithError(msg)
if err != nil {
log.Error(fmt.Sprintf("error marshaling Any %s: %v", prototext.Format(msg), err))
return nil
}
return out
}
func TypedStructWithFields(typeURL string, fields map[string]interface{}) *anypb.Any {
value, err := structpb.NewStruct(fields)
if err != nil {
log.Error(fmt.Sprintf("error marshaling struct %s: %v", typeURL, err))
}
return MessageToAny(&udpa.TypedStruct{
TypeUrl: typeURL,
Value: value,
})
}
func SilentlyUnmarshalAny[T any](a *anypb.Any) *T {
res, err := UnmarshalAny[T](a)
if err != nil {
return nil
}
return res
}
func UnmarshalAny[T any](a *anypb.Any) (*T, error) {
dst := any(new(T)).(proto.Message)
if err := a.UnmarshalTo(dst); err != nil {
return nil, fmt.Errorf("failed to unmarshal to %T: %v", dst, err)
}
return any(dst).(*T), nil
}
// https://github.com/planetscale/vtprotobuf#available-features
type vtStrictMarshal interface {
MarshalVTStrict() ([]byte, error)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package runtime
import (
"runtime"
"istio.io/istio/pkg/log"
)
// LogPanic logs the caller tree when a panic occurs.
func LogPanic(r any) {
// Same as stdlib http server code. Manually allocate stack trace buffer size
// to prevent excessively large logs
const size = 64 << 10
stacktrace := make([]byte, size)
stacktrace = stacktrace[:runtime.Stack(stacktrace, false)]
log.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace)
}
// HandleCrash catches the crash and calls additional handlers.
func HandleCrash(handlers ...func(any)) {
if r := recover(); r != nil {
for _, handler := range handlers {
handler(r)
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"strconv"
"strings"
"sync/atomic"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
uatomic "go.uber.org/atomic"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
"istio.io/api/label"
"istio.io/istio/pilot/pkg/autoregistration"
"istio.io/istio/pilot/pkg/features"
istiogrpc "istio.io/istio/pilot/pkg/grpc"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
labelutil "istio.io/istio/pilot/pkg/serviceregistry/util/label"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/env"
istiolog "istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
var (
log = istiolog.RegisterScope("ads", "ads debugging")
// Tracks connections, increment on each new connection.
connectionNumber = int64(0)
)
// Used only when running in KNative, to handle the load balancing behavior.
var firstRequest = uatomic.NewBool(true)
var knativeEnv = env.Register("K_REVISION", "",
"KNative revision, set if running in knative").Get()
// DiscoveryStream is a server interface for XDS.
type DiscoveryStream = discovery.AggregatedDiscoveryService_StreamAggregatedResourcesServer
// DeltaDiscoveryStream is a server interface for Delta XDS.
type DeltaDiscoveryStream = discovery.AggregatedDiscoveryService_DeltaAggregatedResourcesServer
// DiscoveryClient is a client interface for XDS.
type DiscoveryClient = discovery.AggregatedDiscoveryService_StreamAggregatedResourcesClient
// DeltaDiscoveryClient is a client interface for Delta XDS.
type DeltaDiscoveryClient = discovery.AggregatedDiscoveryService_DeltaAggregatedResourcesClient
// Connection holds information about connected client.
type Connection struct {
// peerAddr is the address of the client, from network layer.
peerAddr string
// Time of connection, for debugging
connectedAt time.Time
// conID is the connection conID, used as a key in the connection table.
// Currently based on the node name and a counter.
conID string
// proxy is the client to which this connection is established.
proxy *model.Proxy
// Sending on this channel results in a push.
pushChannel chan *Event
// Both ADS and SDS streams implement this interface
stream DiscoveryStream
// deltaStream is used for Delta XDS. Only one of deltaStream or stream will be set
deltaStream DeltaDiscoveryStream
// Original node metadata, to avoid unmarshal/marshal.
// This is included in internal events.
node *core.Node
// initialized channel will be closed when proxy is initialized. Pushes, or anything accessing
// the proxy, should not be started until this channel is closed.
initialized chan struct{}
// stop can be used to end the connection manually via debug endpoints. Only to be used for testing.
stop chan struct{}
// reqChan is used to receive discovery requests for this connection.
reqChan chan *discovery.DiscoveryRequest
deltaReqChan chan *discovery.DeltaDiscoveryRequest
// errorChan is used to process error during discovery request processing.
errorChan chan error
}
func (conn *Connection) ID() string {
return conn.conID
}
func (conn *Connection) Proxy() *model.Proxy {
return conn.proxy
}
func (conn *Connection) ConnectedAt() time.Time {
return conn.connectedAt
}
func (conn *Connection) Stop() {
close(conn.stop)
}
// Event represents a config or registry event that results in a push.
type Event struct {
// pushRequest PushRequest to use for the push.
pushRequest *model.PushRequest
// function to call once a push is finished. This must be called or future changes may be blocked.
done func()
}
func newConnection(peerAddr string, stream DiscoveryStream) *Connection {
return &Connection{
pushChannel: make(chan *Event),
initialized: make(chan struct{}),
stop: make(chan struct{}),
reqChan: make(chan *discovery.DiscoveryRequest, 1),
errorChan: make(chan error, 1),
peerAddr: peerAddr,
connectedAt: time.Now(),
stream: stream,
}
}
func (s *DiscoveryServer) receive(con *Connection, identities []string) {
defer func() {
close(con.errorChan)
close(con.reqChan)
// Close the initialized channel, if its not already closed, to prevent blocking the stream.
select {
case <-con.initialized:
default:
close(con.initialized)
}
}()
firstRequest := true
for {
req, err := con.stream.Recv()
if err != nil {
if istiogrpc.IsExpectedGRPCError(err) {
log.Infof("ADS: %q %s terminated", con.peerAddr, con.conID)
return
}
con.errorChan <- err
log.Errorf("ADS: %q %s terminated with error: %v", con.peerAddr, con.conID, err)
totalXDSInternalErrors.Increment()
return
}
// This should be only set for the first request. The node id may not be set - for example malicious clients.
if firstRequest {
// probe happens before envoy sends first xDS request
if req.TypeUrl == v3.HealthInfoType {
log.Warnf("ADS: %q %s send health check probe before normal xDS request", con.peerAddr, con.conID)
continue
}
firstRequest = false
if req.Node == nil || req.Node.Id == "" {
con.errorChan <- status.New(codes.InvalidArgument, "missing node information").Err()
return
}
if err := s.initConnection(req.Node, con, identities); err != nil {
con.errorChan <- err
return
}
defer s.closeConnection(con)
log.Infof("ADS: new connection for node:%s", con.conID)
}
select {
case con.reqChan <- req:
case <-con.stream.Context().Done():
log.Infof("ADS: %q %s terminated with stream closed", con.peerAddr, con.conID)
return
}
}
}
// processRequest handles one discovery request. This is currently called from the 'main' thread, which also
// handles 'push' requests and close - the code will eventually call the 'push' code, and it needs more mutex
// protection. Original code avoided the mutexes by doing both 'push' and 'process requests' in same thread.
func (s *DiscoveryServer) processRequest(req *discovery.DiscoveryRequest, con *Connection) error {
stype := v3.GetShortType(req.TypeUrl)
log.Debugf("ADS:%s: REQ %s resources:%d nonce:%s version:%s ", stype,
con.conID, len(req.ResourceNames), req.ResponseNonce, req.VersionInfo)
if req.TypeUrl == v3.HealthInfoType {
s.handleWorkloadHealthcheck(con.proxy, req)
return nil
}
// For now, don't let xDS piggyback debug requests start watchers.
if strings.HasPrefix(req.TypeUrl, v3.DebugType) {
return s.pushXds(con,
&model.WatchedResource{TypeUrl: req.TypeUrl, ResourceNames: req.ResourceNames},
&model.PushRequest{Full: true, Push: con.proxy.LastPushContext})
}
if s.StatusReporter != nil {
s.StatusReporter.RegisterEvent(con.conID, req.TypeUrl, req.ResponseNonce)
}
shouldRespond, delta := s.shouldRespond(con, req)
if !shouldRespond {
return nil
}
request := &model.PushRequest{
Full: true,
Push: con.proxy.LastPushContext,
Reason: model.NewReasonStats(model.ProxyRequest),
// The usage of LastPushTime (rather than time.Now()), is critical here for correctness; This time
// is used by the XDS cache to determine if a entry is stale. If we use Now() with an old push context,
// we may end up overriding active cache entries with stale ones.
Start: con.proxy.LastPushTime,
Delta: delta,
}
// SidecarScope for the proxy may not have been updated based on this pushContext.
// It can happen when `processRequest` comes after push context has been updated(s.initPushContext),
// but proxy's SidecarScope has been updated(s.computeProxyState -> SetSidecarScope) due to optimizations that skip sidecar scope
// computation.
if con.proxy.SidecarScope != nil && con.proxy.SidecarScope.Version != request.Push.PushVersion {
s.computeProxyState(con.proxy, request)
}
return s.pushXds(con, con.Watched(req.TypeUrl), request)
}
// StreamAggregatedResources implements the ADS interface.
func (s *DiscoveryServer) StreamAggregatedResources(stream DiscoveryStream) error {
return s.Stream(stream)
}
func (s *DiscoveryServer) Stream(stream DiscoveryStream) error {
if knativeEnv != "" && firstRequest.Load() {
// How scaling works in knative is the first request is the "loading" request. During
// loading request, concurrency=1. Once that request is done, concurrency is enabled.
// However, the XDS stream is long lived, so the first request would block all others. As a
// result, we should exit the first request immediately; clients will retry.
firstRequest.Store(false)
return status.Error(codes.Unavailable, "server warmup not complete; try again")
}
// Check if server is ready to accept clients and process new requests.
// Currently ready means caches have been synced and hence can build
// clusters correctly. Without this check, InitContext() call below would
// initialize with empty config, leading to reconnected Envoys loosing
// configuration. This is an additional safety check inaddition to adding
// cachesSynced logic to readiness probe to handle cases where kube-proxy
// ip tables update latencies.
// See https://github.com/istio/istio/issues/25495.
if !s.IsServerReady() {
return status.Error(codes.Unavailable, "server is not ready to serve discovery information")
}
ctx := stream.Context()
peerAddr := "0.0.0.0"
if peerInfo, ok := peer.FromContext(ctx); ok {
peerAddr = peerInfo.Addr.String()
}
if err := s.WaitForRequestLimit(stream.Context()); err != nil {
log.Warnf("ADS: %q exceeded rate limit: %v", peerAddr, err)
return status.Errorf(codes.ResourceExhausted, "request rate limit exceeded: %v", err)
}
ids, err := s.authenticate(ctx)
if err != nil {
return status.Error(codes.Unauthenticated, err.Error())
}
if ids != nil {
log.Debugf("Authenticated XDS: %v with identity %v", peerAddr, ids)
} else {
log.Debugf("Unauthenticated XDS: %s", peerAddr)
}
// InitContext returns immediately if the context was already initialized.
if err = s.globalPushContext().InitContext(s.Env, nil, nil); err != nil {
// Error accessing the data - log and close, maybe a different pilot replica
// has more luck
log.Warnf("Error reading config %v", err)
return status.Error(codes.Unavailable, "error reading config")
}
con := newConnection(peerAddr, stream)
// Do not call: defer close(con.pushChannel). The push channel will be garbage collected
// when the connection is no longer used. Closing the channel can cause subtle race conditions
// with push. According to the spec: "It's only necessary to close a channel when it is important
// to tell the receiving goroutines that all data have been sent."
// Block until either a request is received or a push is triggered.
// We need 2 go routines because 'read' blocks in Recv().
go s.receive(con, ids)
// Wait for the proxy to be fully initialized before we start serving traffic. Because
// initialization doesn't have dependencies that will block, there is no need to add any timeout
// here. Prior to this explicit wait, we were implicitly waiting by receive() not sending to
// reqChannel and the connection not being enqueued for pushes to pushChannel until the
// initialization is complete.
<-con.initialized
for {
// Go select{} statements are not ordered; the same channel can be chosen many times.
// For requests, these are higher priority (client may be blocked on startup until these are done)
// and often very cheap to handle (simple ACK), so we check it first.
select {
case req, ok := <-con.reqChan:
if ok {
if err := s.processRequest(req, con); err != nil {
return err
}
} else {
// Remote side closed connection or error processing the request.
return <-con.errorChan
}
case <-con.stop:
return nil
default:
}
// If there wasn't already a request, poll for requests and pushes. Note: if we have a huge
// amount of incoming requests, we may still send some pushes, as we do not `continue` above;
// however, requests will be handled ~2x as much as pushes. This ensures a wave of requests
// cannot completely starve pushes. However, this scenario is unlikely.
select {
case req, ok := <-con.reqChan:
if ok {
if err := s.processRequest(req, con); err != nil {
return err
}
} else {
// Remote side closed connection or error processing the request.
return <-con.errorChan
}
case pushEv := <-con.pushChannel:
err := s.pushConnection(con, pushEv)
pushEv.done()
if err != nil {
return err
}
case <-con.stop:
return nil
}
}
}
var emptyResourceDelta = model.ResourceDelta{}
// shouldRespond determines whether this request needs to be responded back. It applies the ack/nack rules as per xds protocol
// using WatchedResource for previous state and discovery request for the current state.
func (s *DiscoveryServer) shouldRespond(con *Connection, request *discovery.DiscoveryRequest) (bool, model.ResourceDelta) {
stype := v3.GetShortType(request.TypeUrl)
// If there is an error in request that means previous response is erroneous.
// We do not have to respond in that case. In this case request's version info
// will be different from the version sent. But it is fragile to rely on that.
if request.ErrorDetail != nil {
errCode := codes.Code(request.ErrorDetail.Code)
log.Warnf("ADS:%s: ACK ERROR %s %s:%s", stype, con.conID, errCode.String(), request.ErrorDetail.GetMessage())
incrementXDSRejects(request.TypeUrl, con.proxy.ID, errCode.String())
return false, emptyResourceDelta
}
if shouldUnsubscribe(request) {
log.Debugf("ADS:%s: UNSUBSCRIBE %s %s %s", stype, con.conID, request.VersionInfo, request.ResponseNonce)
con.proxy.Lock()
delete(con.proxy.WatchedResources, request.TypeUrl)
con.proxy.Unlock()
return false, emptyResourceDelta
}
con.proxy.RLock()
previousInfo := con.proxy.WatchedResources[request.TypeUrl]
con.proxy.RUnlock()
// This can happen in two cases:
// 1. When Envoy starts for the first time, it sends an initial Discovery request to Istiod.
// 2. When Envoy reconnects to a new Istiod that does not have information about this typeUrl
// i.e. non empty response nonce.
// We should always respond with the current resource names.
if request.ResponseNonce == "" || previousInfo == nil {
log.Debugf("ADS:%s: INIT/RECONNECT %s %s %s", stype, con.conID, request.VersionInfo, request.ResponseNonce)
con.proxy.Lock()
con.proxy.WatchedResources[request.TypeUrl] = &model.WatchedResource{TypeUrl: request.TypeUrl, ResourceNames: request.ResourceNames}
// For all EDS requests that we have already responded with in the same stream let us
// force the response. It is important to respond to those requests for Envoy to finish
// warming of those resources(Clusters).
// This can happen with the following sequence
// 1. Envoy disconnects and reconnects to Istiod.
// 2. Envoy sends EDS request and we respond with it.
// 3. Envoy sends CDS request and we respond with clusters.
// 4. Envoy detects a change in cluster state and tries to warm those clusters and send EDS request for them.
// 5. We should respond to the EDS request with Endpoints to let Envoy finish cluster warming.
// Refer to https://github.com/envoyproxy/envoy/issues/13009 for more details.
for _, dependent := range warmingDependencies(request.TypeUrl) {
if dwr, exists := con.proxy.WatchedResources[dependent]; exists {
dwr.AlwaysRespond = true
}
}
con.proxy.Unlock()
return true, emptyResourceDelta
}
// If there is mismatch in the nonce, that is a case of expired/stale nonce.
// A nonce becomes stale following a newer nonce being sent to Envoy.
// previousInfo.NonceSent can be empty if we previously had shouldRespond=true but didn't send any resources.
if request.ResponseNonce != previousInfo.NonceSent {
if features.EnableUnsafeAssertions && previousInfo.NonceSent == "" {
// Assert we do not end up in an invalid state
log.Fatalf("ADS:%s: REQ %s Expired nonce received %s, but we never sent any nonce", stype,
con.conID, request.ResponseNonce)
}
log.Debugf("ADS:%s: REQ %s Expired nonce received %s, sent %s", stype,
con.conID, request.ResponseNonce, previousInfo.NonceSent)
xdsExpiredNonce.With(typeTag.Value(v3.GetMetricType(request.TypeUrl))).Increment()
return false, emptyResourceDelta
}
// If it comes here, that means nonce match.
con.proxy.Lock()
previousResources := con.proxy.WatchedResources[request.TypeUrl].ResourceNames
con.proxy.WatchedResources[request.TypeUrl].NonceAcked = request.ResponseNonce
con.proxy.WatchedResources[request.TypeUrl].ResourceNames = request.ResourceNames
alwaysRespond := previousInfo.AlwaysRespond
previousInfo.AlwaysRespond = false
con.proxy.Unlock()
// Envoy can send two DiscoveryRequests with same version and nonce.
// when it detects a new resource. We should respond if they change.
prev := sets.New(previousResources...)
cur := sets.New(request.ResourceNames...)
removed := prev.Difference(cur)
added := cur.Difference(prev)
// We should always respond "alwaysRespond" marked requests to let Envoy finish warming
// even though Nonce match and it looks like an ACK.
if alwaysRespond {
log.Infof("ADS:%s: FORCE RESPONSE %s for warming.", stype, con.conID)
return true, emptyResourceDelta
}
if len(removed) == 0 && len(added) == 0 {
log.Debugf("ADS:%s: ACK %s %s %s", stype, con.conID, request.VersionInfo, request.ResponseNonce)
return false, emptyResourceDelta
}
log.Debugf("ADS:%s: RESOURCE CHANGE added %v removed %v %s %s %s", stype,
added, removed, con.conID, request.VersionInfo, request.ResponseNonce)
// For non wildcard resource, if no new resources are subscribed, it means we do not need to push.
if !isWildcardTypeURL(request.TypeUrl) && len(added) == 0 {
return false, emptyResourceDelta
}
return true, model.ResourceDelta{
Subscribed: added,
// we do not need to set unsubscribed for StoW
}
}
// shouldUnsubscribe checks if we should unsubscribe. This is done when Envoy is
// no longer watching. For example, we remove all RDS references, we will
// unsubscribe from RDS. NOTE: This may happen as part of the initial request. If
// there are no routes needed, Envoy will send an empty request, which this
// properly handles by not adding it to the watched resource list.
func shouldUnsubscribe(request *discovery.DiscoveryRequest) bool {
return len(request.ResourceNames) == 0 && !isWildcardTypeURL(request.TypeUrl)
}
// isWildcardTypeURL checks whether a given type is a wildcard type
// https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#how-the-client-specifies-what-resources-to-return
// If the list of resource names becomes empty, that means that the client is no
// longer interested in any resources of the specified type. For Listener and
// Cluster resource types, there is also a “wildcard” mode, which is triggered
// when the initial request on the stream for that resource type contains no
// resource names.
func isWildcardTypeURL(typeURL string) bool {
switch typeURL {
case v3.SecretType, v3.EndpointType, v3.RouteType, v3.ExtensionConfigurationType:
// By XDS spec, these are not wildcard
return false
case v3.ClusterType, v3.ListenerType:
// By XDS spec, these are wildcard
return true
default:
// All of our internal types use wildcard semantics
return true
}
}
// warmingDependencies returns the dependent typeURLs that need to be responded with
// for warming of this typeURL.
func warmingDependencies(typeURL string) []string {
switch typeURL {
case v3.ClusterType:
return []string{v3.EndpointType}
default:
return nil
}
}
// update the node associated with the connection, after receiving a packet from envoy, also adds the connection
// to the tracking map.
func (s *DiscoveryServer) initConnection(node *core.Node, con *Connection, identities []string) error {
// Setup the initial proxy metadata
proxy, err := s.initProxyMetadata(node)
if err != nil {
return err
}
// Check if proxy cluster has an alias configured, if yes use that as cluster ID for this proxy.
if alias, exists := s.ClusterAliases[proxy.Metadata.ClusterID]; exists {
proxy.Metadata.ClusterID = alias
}
// To ensure push context is monotonically increasing, setup LastPushContext before we addCon. This
// way only new push contexts will be registered for this proxy.
proxy.LastPushContext = s.globalPushContext()
// First request so initialize connection id and start tracking it.
con.conID = connectionID(proxy.ID)
con.node = node
con.proxy = proxy
// Authorize xds clients
if err := s.authorize(con, identities); err != nil {
return err
}
// Register the connection. this allows pushes to be triggered for the proxy. Note: the timing of
// this and initializeProxy important. While registering for pushes *after* initialization is complete seems like
// a better choice, it introduces a race condition; If we complete initialization of a new push
// context between initializeProxy and addCon, we would not get any pushes triggered for the new
// push context, leading the proxy to have a stale state until the next full push.
s.addCon(con.conID, con)
// Register that initialization is complete. This triggers to calls that it is safe to access the
// proxy
defer close(con.initialized)
// Complete full initialization of the proxy
if err := s.initializeProxy(con); err != nil {
s.closeConnection(con)
return err
}
return nil
}
func (s *DiscoveryServer) closeConnection(con *Connection) {
if con.conID == "" {
return
}
s.removeCon(con.conID)
if s.StatusReporter != nil {
s.StatusReporter.RegisterDisconnect(con.conID, AllTrackingEventTypes)
}
s.WorkloadEntryController.OnDisconnect(con)
}
func connectionID(node string) string {
id := atomic.AddInt64(&connectionNumber, 1)
return node + "-" + strconv.FormatInt(id, 10)
}
// Only used for test
func ResetConnectionNumberForTest() {
atomic.StoreInt64(&connectionNumber, 0)
}
// initProxyMetadata initializes just the basic metadata of a proxy. This is decoupled from
// initProxyState such that we can perform authorization before attempting expensive computations to
// fully initialize the proxy.
func (s *DiscoveryServer) initProxyMetadata(node *core.Node) (*model.Proxy, error) {
meta, err := model.ParseMetadata(node.Metadata)
if err != nil {
return nil, status.New(codes.InvalidArgument, err.Error()).Err()
}
proxy, err := model.ParseServiceNodeWithMetadata(node.Id, meta)
if err != nil {
return nil, status.New(codes.InvalidArgument, err.Error()).Err()
}
// Update the config namespace associated with this proxy
proxy.ConfigNamespace = model.GetProxyConfigNamespace(proxy)
proxy.XdsNode = node
return proxy, nil
}
// setTopologyLabels sets locality, cluster, network label
// must be called after `SetWorkloadLabels` and `SetServiceTargets`.
func setTopologyLabels(proxy *model.Proxy) {
// This is a bit un-intuitive, but pull the locality from Labels first. The service registries have the best access to
// locality information, as they can read from various sources (Node on Kubernetes, for example). They will take this
// information and add it to the labels. So while the proxy may not originally have these labels,
// it will by the time we get here (as a result of calling this after SetWorkloadLabels).
proxy.Locality = localityFromProxyLabels(proxy)
if proxy.Locality == nil {
// If there is no locality in the registry then use the one sent as part of the discovery request.
// This is not preferable as only the connected Pilot is aware of this proxies location, but it
// can still help provide some client-side Envoy context when load balancing based on location.
proxy.Locality = &core.Locality{
Region: proxy.XdsNode.Locality.GetRegion(),
Zone: proxy.XdsNode.Locality.GetZone(),
SubZone: proxy.XdsNode.Locality.GetSubZone(),
}
}
// add topology labels to proxy labels
proxy.Labels = labelutil.AugmentLabels(
proxy.Labels,
proxy.Metadata.ClusterID,
util.LocalityToString(proxy.Locality),
proxy.GetNodeName(),
proxy.Metadata.Network,
)
}
func localityFromProxyLabels(proxy *model.Proxy) *core.Locality {
region, f1 := proxy.Labels[labelutil.LabelTopologyRegion]
zone, f2 := proxy.Labels[labelutil.LabelTopologyZone]
subzone, f3 := proxy.Labels[label.TopologySubzone.Name]
if !f1 && !f2 && !f3 {
// If no labels set, we didn't find the locality from the service registry. We do support a (mostly undocumented/internal)
// label to override the locality, so respect that here as well.
ls, f := proxy.Labels[model.LocalityLabel]
if f {
return util.ConvertLocality(ls)
}
return nil
}
return &core.Locality{
Region: region,
Zone: zone,
SubZone: subzone,
}
}
// initializeProxy completes the initialization of a proxy. It is expected to be called only after
// initProxyMetadata.
func (s *DiscoveryServer) initializeProxy(con *Connection) error {
proxy := con.proxy
// this should be done before we look for service instances, but after we load metadata
// TODO fix check in kubecontroller treat echo VMs like there isn't a pod
if err := s.WorkloadEntryController.OnConnect(con); err != nil {
return err
}
s.computeProxyState(proxy, nil)
// Discover supported IP Versions of proxy so that appropriate config can be delivered.
proxy.DiscoverIPMode()
proxy.WatchedResources = map[string]*model.WatchedResource{}
// Based on node metadata and version, we can associate a different generator.
if proxy.Metadata.Generator != "" {
proxy.XdsResourceGenerator = s.Generators[proxy.Metadata.Generator]
}
return nil
}
func (s *DiscoveryServer) computeProxyState(proxy *model.Proxy, request *model.PushRequest) {
proxy.SetServiceTargets(s.Env.ServiceDiscovery)
// only recompute workload labels when
// 1. stream established and proxy first time initialization
// 2. proxy update
recomputeLabels := request == nil || request.IsProxyUpdate()
if recomputeLabels {
proxy.SetWorkloadLabels(s.Env)
setTopologyLabels(proxy)
}
// Precompute the sidecar scope and merged gateways associated with this proxy.
// Saves compute cycles in networking code. Though this might be redundant sometimes, we still
// have to compute this because as part of a config change, a new Sidecar could become
// applicable to this proxy
var sidecar, gateway bool
push := proxy.LastPushContext
if request == nil {
sidecar = true
gateway = true
} else {
push = request.Push
if len(request.ConfigsUpdated) == 0 {
sidecar = true
gateway = true
}
for conf := range request.ConfigsUpdated {
switch conf.Kind {
case kind.ServiceEntry, kind.DestinationRule, kind.VirtualService, kind.Sidecar, kind.HTTPRoute, kind.TCPRoute:
sidecar = true
case kind.Gateway, kind.KubernetesGateway, kind.GatewayClass, kind.ReferenceGrant:
gateway = true
case kind.Ingress:
sidecar = true
gateway = true
}
if sidecar && gateway {
break
}
}
}
// compute the sidecarscope for both proxy type whenever it changes.
if sidecar {
proxy.SetSidecarScope(push)
}
// only compute gateways for "router" type proxy.
if gateway && proxy.Type == model.Router {
proxy.SetGatewaysForProxy(push)
}
proxy.LastPushContext = push
if request != nil {
proxy.LastPushTime = request.Start
}
}
// handleWorkloadHealthcheck processes HealthInformation type Url.
func (s *DiscoveryServer) handleWorkloadHealthcheck(proxy *model.Proxy, req *discovery.DiscoveryRequest) {
if features.WorkloadEntryHealthChecks {
event := autoregistration.HealthEvent{}
event.Healthy = req.ErrorDetail == nil
if !event.Healthy {
event.Message = req.ErrorDetail.Message
}
s.WorkloadEntryController.QueueWorkloadEntryHealth(proxy, event)
}
}
// DeltaAggregatedResources is not implemented.
// Instead, Generators may send only updates/add, with Delete indicated by an empty spec.
// This works if both ends follow this model. For example EDS and the API generator follow this
// pattern.
//
// The delta protocol changes the request, adding unsubscribe/subscribe instead of sending full
// list of resources. On the response it adds 'removed resources' and sends changes for everything.
func (s *DiscoveryServer) DeltaAggregatedResources(stream discovery.AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error {
return s.StreamDeltas(stream)
}
// Compute and send the new configuration for a connection.
func (s *DiscoveryServer) pushConnection(con *Connection, pushEv *Event) error {
pushRequest := pushEv.pushRequest
if pushRequest.Full {
// Update Proxy with current information.
s.computeProxyState(con.proxy, pushRequest)
}
if !s.ProxyNeedsPush(con.proxy, pushRequest) {
log.Debugf("Skipping push to %v, no updates required", con.conID)
if pushRequest.Full {
// Only report for full versions, incremental pushes do not have a new version.
reportAllEventsForProxyNoPush(con, s.StatusReporter, pushRequest.Push.LedgerVersion)
}
return nil
}
// Send pushes to all generators
// Each Generator is responsible for determining if the push event requires a push
wrl := con.orderWatchedResources()
for _, w := range wrl {
if err := s.pushXds(con, w, pushRequest); err != nil {
return err
}
}
if pushRequest.Full {
// Report all events for unwatched resources. Watched resources will be reported in pushXds or on ack.
reportEventsForUnWatched(con, s.StatusReporter, pushRequest.Push.LedgerVersion)
}
proxiesConvergeDelay.Record(time.Since(pushRequest.Start).Seconds())
return nil
}
// PushOrder defines the order that updates will be pushed in. Any types not listed here will be pushed in random
// order after the types listed here
var PushOrder = []string{v3.ClusterType, v3.EndpointType, v3.ListenerType, v3.RouteType, v3.SecretType}
// KnownOrderedTypeUrls has typeUrls for which we know the order of push.
var KnownOrderedTypeUrls = sets.New(PushOrder...)
func (s *DiscoveryServer) adsClientCount() int {
s.adsClientsMutex.RLock()
defer s.adsClientsMutex.RUnlock()
return len(s.adsClients)
}
func (s *DiscoveryServer) ProxyUpdate(clusterID cluster.ID, ip string) {
var connection *Connection
for _, v := range s.Clients() {
if v.proxy.Metadata.ClusterID == clusterID && v.proxy.IPAddresses[0] == ip {
connection = v
break
}
}
// It is possible that the envoy has not connected to this pilot, maybe connected to another pilot
if connection == nil {
return
}
if log.DebugEnabled() {
currentlyPending := s.pushQueue.Pending()
if currentlyPending != 0 {
log.Debugf("Starting new push while %v were still pending", currentlyPending)
}
}
s.pushQueue.Enqueue(connection, &model.PushRequest{
Full: true,
Push: s.globalPushContext(),
Start: time.Now(),
Reason: model.NewReasonStats(model.ProxyUpdate),
})
}
// AdsPushAll will send updates to all nodes, with a full push.
// Mainly used in Debug interface.
func AdsPushAll(s *DiscoveryServer) {
s.AdsPushAll(&model.PushRequest{
Full: true,
Push: s.globalPushContext(),
Reason: model.NewReasonStats(model.DebugTrigger),
})
}
// AdsPushAll will send updates to all nodes, for a full config or incremental EDS.
func (s *DiscoveryServer) AdsPushAll(req *model.PushRequest) {
if !req.Full {
log.Infof("XDS: Incremental Pushing ConnectedEndpoints:%d Version:%s",
s.adsClientCount(), req.Push.PushVersion)
} else {
totalService := len(req.Push.GetAllServices())
log.Infof("XDS: Pushing Services:%d ConnectedEndpoints:%d Version:%s",
totalService, s.adsClientCount(), req.Push.PushVersion)
monServices.Record(float64(totalService))
// Make sure the ConfigsUpdated map exists
if req.ConfigsUpdated == nil {
req.ConfigsUpdated = make(sets.Set[model.ConfigKey])
}
}
s.StartPush(req)
}
// Send a signal to all connections, with a push event.
func (s *DiscoveryServer) StartPush(req *model.PushRequest) {
// Push config changes, iterating over connected envoys.
if log.DebugEnabled() {
currentlyPending := s.pushQueue.Pending()
if currentlyPending != 0 {
log.Debugf("Starting new push while %v were still pending", currentlyPending)
}
}
req.Start = time.Now()
for _, p := range s.AllClients() {
s.pushQueue.Enqueue(p, req)
}
}
func (s *DiscoveryServer) addCon(conID string, con *Connection) {
s.adsClientsMutex.Lock()
defer s.adsClientsMutex.Unlock()
s.adsClients[conID] = con
recordXDSClients(con.proxy.Metadata.IstioVersion, 1)
}
func (s *DiscoveryServer) removeCon(conID string) {
s.adsClientsMutex.Lock()
defer s.adsClientsMutex.Unlock()
if con, exist := s.adsClients[conID]; !exist {
log.Errorf("ADS: Removing connection for non-existing node:%v.", conID)
totalXDSInternalErrors.Increment()
} else {
delete(s.adsClients, conID)
recordXDSClients(con.proxy.Metadata.IstioVersion, -1)
}
}
// Send with timeout if configured.
func (conn *Connection) send(res *discovery.DiscoveryResponse) error {
sendHandler := func() error {
start := time.Now()
defer func() { recordSendTime(time.Since(start)) }()
return conn.stream.Send(res)
}
err := istiogrpc.Send(conn.stream.Context(), sendHandler)
if err == nil {
if res.Nonce != "" && !strings.HasPrefix(res.TypeUrl, v3.DebugType) {
conn.proxy.Lock()
if conn.proxy.WatchedResources[res.TypeUrl] == nil {
conn.proxy.WatchedResources[res.TypeUrl] = &model.WatchedResource{TypeUrl: res.TypeUrl}
}
conn.proxy.WatchedResources[res.TypeUrl].NonceSent = res.Nonce
conn.proxy.Unlock()
}
} else if status.Convert(err).Code() == codes.DeadlineExceeded {
log.Infof("Timeout writing %s: ", conn.conID, v3.GetShortType(res.TypeUrl))
xdsResponseWriteTimeouts.Increment()
}
return err
}
// nolint
func (conn *Connection) NonceAcked(typeUrl string) string {
conn.proxy.RLock()
defer conn.proxy.RUnlock()
if conn.proxy.WatchedResources != nil && conn.proxy.WatchedResources[typeUrl] != nil {
return conn.proxy.WatchedResources[typeUrl].NonceAcked
}
return ""
}
// nolint
func (conn *Connection) NonceSent(typeUrl string) string {
conn.proxy.RLock()
defer conn.proxy.RUnlock()
if conn.proxy.WatchedResources != nil && conn.proxy.WatchedResources[typeUrl] != nil {
return conn.proxy.WatchedResources[typeUrl].NonceSent
}
return ""
}
func (conn *Connection) Clusters() []string {
conn.proxy.RLock()
defer conn.proxy.RUnlock()
if conn.proxy.WatchedResources != nil && conn.proxy.WatchedResources[v3.EndpointType] != nil {
return conn.proxy.WatchedResources[v3.EndpointType].ResourceNames
}
return []string{}
}
func (conn *Connection) Routes() []string {
conn.proxy.RLock()
defer conn.proxy.RUnlock()
if conn.proxy.WatchedResources != nil && conn.proxy.WatchedResources[v3.RouteType] != nil {
return conn.proxy.WatchedResources[v3.RouteType].ResourceNames
}
return []string{}
}
// nolint
func (conn *Connection) Watching(typeUrl string) bool {
conn.proxy.RLock()
defer conn.proxy.RUnlock()
if conn.proxy.WatchedResources != nil && conn.proxy.WatchedResources[typeUrl] != nil {
return true
}
return false
}
// nolint
func (conn *Connection) Watched(typeUrl string) *model.WatchedResource {
conn.proxy.RLock()
defer conn.proxy.RUnlock()
if conn.proxy.WatchedResources != nil && conn.proxy.WatchedResources[typeUrl] != nil {
return conn.proxy.WatchedResources[typeUrl]
}
return nil
}
// orderWatchedResources returns the ordered list of
// watched resources for the proxy, ordered in accordance with known push order.
// nolint
func (conn *Connection) orderWatchedResources() []*model.WatchedResource {
conn.proxy.RLock()
defer conn.proxy.RUnlock()
resources := conn.proxy.WatchedResources
wr := make([]*model.WatchedResource, 0, len(resources))
// first add all known types, in order
for _, tp := range PushOrder {
if w, f := resources[tp]; f {
wr = append(wr, w)
}
}
// Then add any undeclared types
for tp, w := range resources {
if _, f := KnownOrderedTypeUrls[tp]; !f {
wr = append(wr, w)
}
}
return wr
}
// reportAllEventsForProxyNoPush reports all tracking events for a proxy without need to push xds.
func reportAllEventsForProxyNoPush(con *Connection, statusReporter DistributionStatusCache, nonce string) {
if statusReporter == nil {
return
}
for distributionType := range AllTrackingEventTypes {
statusReporter.RegisterEvent(con.conID, distributionType, nonce)
}
}
// reportEventsForUnWatched is to report events for unwatched types after push.
// e.g. there is no rds if no route configured for gateway.
// nolint
func reportEventsForUnWatched(con *Connection, statusReporter DistributionStatusCache, nonce string) {
if statusReporter == nil {
return
}
// if typeUrl is not empty, report all events that are not being watched
unWatched := sets.NewWithLength[EventType](len(AllTrackingEventTypes))
con.proxy.RLock()
for tyeUrl := range AllTrackingEventTypes {
if _, exists := con.proxy.WatchedResources[tyeUrl]; !exists {
unWatched.Insert(tyeUrl)
}
}
con.proxy.RUnlock()
for tyeUrl := range unWatched {
statusReporter.RegisterEvent(con.conID, tyeUrl, nonce)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"context"
"sync"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
sds "github.com/envoyproxy/go-control-plane/envoy/service/secret/v3"
"google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"istio.io/istio/pilot/pkg/model"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/test"
)
func NewAdsTest(t test.Failer, conn *grpc.ClientConn) *AdsTest {
return NewXdsTest(t, conn, func(conn *grpc.ClientConn) (DiscoveryClient, error) {
xds := discovery.NewAggregatedDiscoveryServiceClient(conn)
return xds.StreamAggregatedResources(context.Background())
})
}
func NewSdsTest(t test.Failer, conn *grpc.ClientConn) *AdsTest {
return NewXdsTest(t, conn, func(conn *grpc.ClientConn) (DiscoveryClient, error) {
xds := sds.NewSecretDiscoveryServiceClient(conn)
return xds.StreamSecrets(context.Background())
}).WithType(v3.SecretType)
}
func NewXdsTest(t test.Failer, conn *grpc.ClientConn, getClient func(conn *grpc.ClientConn) (DiscoveryClient, error)) *AdsTest {
ctx, cancel := context.WithCancel(context.Background())
cl, err := getClient(conn)
if err != nil {
t.Fatal(err)
}
resp := &AdsTest{
client: cl,
conn: conn,
context: ctx,
cancelContext: cancel,
ID: "sidecar~1.1.1.1~test.default~default.svc.cluster.local",
timeout: time.Second,
Type: v3.ClusterType,
responses: make(chan *discovery.DiscoveryResponse),
error: make(chan error),
}
t.Cleanup(resp.Cleanup)
go resp.adsReceiveChannel()
return resp
}
type AdsTest struct {
client DiscoveryClient
responses chan *discovery.DiscoveryResponse
error chan error
conn *grpc.ClientConn
metadata model.NodeMetadata
ID string
Type string
cancelOnce sync.Once
context context.Context
cancelContext context.CancelFunc
timeout time.Duration
}
func (a *AdsTest) Cleanup() {
// Place in once to avoid race when two callers attempt to cleanup
a.cancelOnce.Do(func() {
a.cancelContext()
_ = a.client.CloseSend()
if a.conn != nil {
_ = a.conn.Close()
}
})
}
func (a *AdsTest) adsReceiveChannel() {
context.AfterFunc(a.context, a.Cleanup)
for {
resp, err := a.client.Recv()
if err != nil {
if isUnexpectedError(err) {
log.Warnf("ads received error: %v", err)
}
select {
case a.error <- err:
case <-a.context.Done():
}
return
}
select {
case a.responses <- resp:
case <-a.context.Done():
return
}
}
}
// DrainResponses reads all responses, but does nothing to them
func (a *AdsTest) DrainResponses() {
for {
select {
case <-a.context.Done():
return
case r := <-a.responses:
log.Infof("drained response %v", r.TypeUrl)
}
}
}
// ExpectResponse waits until a response is received and returns it
func (a *AdsTest) ExpectResponse(t test.Failer) *discovery.DiscoveryResponse {
t.Helper()
select {
case <-time.After(a.timeout):
t.Fatalf("did not get response in time")
case resp := <-a.responses:
if resp == nil || len(resp.Resources) == 0 {
t.Fatalf("got empty response")
}
return resp
case err := <-a.error:
t.Fatalf("got error: %v", err)
}
return nil
}
// ExpectError waits until an error is received and returns it
func (a *AdsTest) ExpectError(t test.Failer) error {
t.Helper()
select {
case <-time.After(a.timeout):
t.Fatalf("did not get error in time")
case err := <-a.error:
return err
}
return nil
}
// ExpectNoResponse waits a short period of time and ensures no response is received
func (a *AdsTest) ExpectNoResponse(t test.Failer) {
t.Helper()
select {
case <-time.After(time.Millisecond * 50):
return
case resp := <-a.responses:
t.Fatalf("got unexpected response: %v", resp)
}
}
func (a *AdsTest) fillInRequestDefaults(req *discovery.DiscoveryRequest) *discovery.DiscoveryRequest {
if req == nil {
req = &discovery.DiscoveryRequest{}
}
if req.TypeUrl == "" {
req.TypeUrl = a.Type
}
if req.Node == nil {
req.Node = &core.Node{
Id: a.ID,
Metadata: a.metadata.ToStruct(),
}
}
return req
}
func (a *AdsTest) Request(t test.Failer, req *discovery.DiscoveryRequest) {
t.Helper()
req = a.fillInRequestDefaults(req)
if err := a.client.Send(req); err != nil {
t.Fatal(err)
}
}
// RequestResponseAck does a full XDS exchange: Send a request, get a response, and ACK the response
func (a *AdsTest) RequestResponseAck(t test.Failer, req *discovery.DiscoveryRequest) *discovery.DiscoveryResponse {
t.Helper()
req = a.fillInRequestDefaults(req)
a.Request(t, req)
resp := a.ExpectResponse(t)
req.ResponseNonce = resp.Nonce
req.VersionInfo = resp.VersionInfo
a.Request(t, req)
return resp
}
// RequestResponseAck does a full XDS exchange with an error: Send a request, get a response, and NACK the response
func (a *AdsTest) RequestResponseNack(t test.Failer, req *discovery.DiscoveryRequest) *discovery.DiscoveryResponse {
t.Helper()
if req == nil {
req = &discovery.DiscoveryRequest{}
}
a.Request(t, req)
resp := a.ExpectResponse(t)
req.ResponseNonce = resp.Nonce
req.ErrorDetail = &status.Status{Message: "Test request NACK"}
a.Request(t, req)
return resp
}
func (a *AdsTest) WithID(id string) *AdsTest {
a.ID = id
return a
}
func (a *AdsTest) WithType(typeURL string) *AdsTest {
a.Type = typeURL
return a
}
func (a *AdsTest) WithMetadata(m model.NodeMetadata) *AdsTest {
a.metadata = m
return a
}
func (a *AdsTest) WithTimeout(t time.Duration) *AdsTest {
a.timeout = t
return a
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"context"
"fmt"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/spiffe"
)
// authenticate authenticates the ADS request using the configured authenticators.
// Returns the validated principals or an error.
// If no authenticators are configured, or if the request is on a non-secure
// stream ( 15010 ) - returns an empty list of principals and no errors.
func (s *DiscoveryServer) authenticate(ctx context.Context) ([]string, error) {
c, err := security.Authenticate(ctx, s.Authenticators)
if c != nil {
return c.Identities, nil
}
return nil, err
}
func (s *DiscoveryServer) authorize(con *Connection, identities []string) error {
if con == nil || con.proxy == nil {
return nil
}
if features.EnableXDSIdentityCheck && identities != nil {
// TODO: allow locking down, rejecting unauthenticated requests.
id, err := checkConnectionIdentity(con.proxy, identities)
if err != nil {
log.Warnf("Unauthorized XDS: %v with identity %v: %v", con.peerAddr, identities, err)
return status.Newf(codes.PermissionDenied, "authorization failed: %v", err).Err()
}
con.proxy.VerifiedIdentity = id
}
return nil
}
func checkConnectionIdentity(proxy *model.Proxy, identities []string) (*spiffe.Identity, error) {
for _, rawID := range identities {
spiffeID, err := spiffe.ParseIdentity(rawID)
if err != nil {
continue
}
if proxy.ConfigNamespace != "" && spiffeID.Namespace != proxy.ConfigNamespace {
continue
}
if proxy.Metadata.ServiceAccount != "" && spiffeID.ServiceAccount != proxy.Metadata.ServiceAccount {
continue
}
return &spiffeID, nil
}
return nil, fmt.Errorf("no identities (%v) matched %v/%v", identities, proxy.ConfigNamespace, proxy.Metadata.ServiceAccount)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/jwt"
"istio.io/istio/pkg/util/sets"
)
type CdsGenerator struct {
ConfigGenerator core.ConfigGenerator
}
var _ model.XdsDeltaResourceGenerator = &CdsGenerator{}
// Map of all configs that do not impact CDS
var skippedCdsConfigs = sets.New(
kind.Gateway,
kind.WorkloadEntry,
kind.WorkloadGroup,
kind.AuthorizationPolicy,
kind.RequestAuthentication,
kind.Secret,
kind.Telemetry,
kind.WasmPlugin,
kind.ProxyConfig,
)
// Map all configs that impact CDS for gateways when `PILOT_FILTER_GATEWAY_CLUSTER_CONFIG = true`.
var pushCdsGatewayConfig = func() sets.Set[kind.Kind] {
s := sets.New(
kind.VirtualService,
kind.Gateway,
)
if features.JwksFetchMode != jwt.Istiod {
s.Insert(kind.RequestAuthentication)
}
return s
}()
func cdsNeedsPush(req *model.PushRequest, proxy *model.Proxy) bool {
if req == nil {
return true
}
if !req.Full {
// CDS only handles full push
return false
}
// If none set, we will always push
if len(req.ConfigsUpdated) == 0 {
return true
}
for config := range req.ConfigsUpdated {
if features.FilterGatewayClusterConfig && proxy.Type == model.Router {
if _, f := pushCdsGatewayConfig[config.Kind]; f {
return true
}
}
if _, f := skippedCdsConfigs[config.Kind]; !f {
return true
}
}
return false
}
func (c CdsGenerator) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
if !cdsNeedsPush(req, proxy) {
return nil, model.DefaultXdsLogDetails, nil
}
clusters, logs := c.ConfigGenerator.BuildClusters(proxy, req)
return clusters, logs, nil
}
// GenerateDeltas for CDS currently only builds deltas when services change. todo implement changes for DestinationRule, etc
func (c CdsGenerator) GenerateDeltas(proxy *model.Proxy, req *model.PushRequest,
w *model.WatchedResource,
) (model.Resources, model.DeletedResources, model.XdsLogDetails, bool, error) {
if !cdsNeedsPush(req, proxy) {
return nil, nil, model.DefaultXdsLogDetails, false, nil
}
updatedClusters, removedClusters, logs, usedDelta := c.ConfigGenerator.BuildDeltaClusters(proxy, req, w)
return updatedClusters, removedClusters, logs, usedDelta, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"encoding/json"
"fmt"
"html/template"
"net"
"net/http"
"net/http/pprof"
"net/netip"
"runtime"
"sort"
"strings"
"sync"
"time"
admin "github.com/envoyproxy/go-control-plane/envoy/admin/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
wasm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/wasm/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
discoveryv3 "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"google.golang.org/protobuf/proto"
anypb "google.golang.org/protobuf/types/known/anypb"
"istio.io/istio/pilot/pkg/config/kube/crd"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pilot/pkg/xds/endpoints"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/config/xds"
istiolog "istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
)
var indexTmpl = template.Must(template.New("index").Parse(`<html>
<head>
<title>Pilot Debug Console</title>
</head>
<style>
#endpoints {
font-family: "Trebuchet MS", Arial, Helvetica, sans-serif;
border-collapse: collapse;
}
#endpoints td, #endpoints th {
border: 1px solid #ddd;
padding: 8px;
}
#endpoints tr:nth-child(even){background-color: #f2f2f2;}
#endpoints tr:hover {background-color: #ddd;}
#endpoints th {
padding-top: 12px;
padding-bottom: 12px;
text-align: left;
background-color: black;
color: white;
}
</style>
<body>
<br/>
<p style = "font-family:Arial,Helvetica,sans-serif;">
Note: Use <b>pretty</b> in query string (like <b>debug/configz?pretty</b>) to format the output.
</p>
<table id="endpoints">
<tr><th>Endpoint</th><th>Description</th></tr>
{{range .}}
<tr>
<td><a href='{{.Href}}'>{{.Name}}</a></td><td>{{.Help}}</td>
</tr>
{{end}}
</table>
<br/>
</body>
</html>
`))
// AdsClient defines the data that is displayed on "/adsz" endpoint.
type AdsClient struct {
ConnectionID string `json:"connectionId"`
ConnectedAt time.Time `json:"connectedAt"`
PeerAddress string `json:"address"`
Labels map[string]string `json:"labels"`
Metadata *model.NodeMetadata `json:"metadata,omitempty"`
Locality *core.Locality `json:"locality,omitempty"`
Watches map[string][]string `json:"watches,omitempty"`
}
// AdsClients is collection of AdsClient connected to this Istiod.
type AdsClients struct {
Total int `json:"totalClients"`
Connected []AdsClient `json:"clients,omitempty"`
}
// SyncStatus is the synchronization status between Pilot and a given Envoy
type SyncStatus struct {
ClusterID string `json:"cluster_id,omitempty"`
ProxyID string `json:"proxy,omitempty"`
ProxyType model.NodeType `json:"proxy_type,omitempty"`
ProxyVersion string `json:"proxy_version,omitempty"`
IstioVersion string `json:"istio_version,omitempty"`
ClusterSent string `json:"cluster_sent,omitempty"`
ClusterAcked string `json:"cluster_acked,omitempty"`
ListenerSent string `json:"listener_sent,omitempty"`
ListenerAcked string `json:"listener_acked,omitempty"`
RouteSent string `json:"route_sent,omitempty"`
RouteAcked string `json:"route_acked,omitempty"`
EndpointSent string `json:"endpoint_sent,omitempty"`
EndpointAcked string `json:"endpoint_acked,omitempty"`
ExtensionConfigSent string `json:"extensionconfig_sent,omitempty"`
ExtensionConfigAcked string `json:"extensionconfig_acked,omitempty"`
}
// SyncedVersions shows what resourceVersion of a given resource has been acked by Envoy.
type SyncedVersions struct {
ProxyID string `json:"proxy,omitempty"`
ClusterVersion string `json:"cluster_acked,omitempty"`
ListenerVersion string `json:"listener_acked,omitempty"`
RouteVersion string `json:"route_acked,omitempty"`
EndpointVersion string `json:"endpoint_acked,omitempty"`
}
// InitDebug initializes the debug handlers and adds a debug in-memory registry.
func (s *DiscoveryServer) InitDebug(
mux *http.ServeMux,
enableProfiling bool,
fetchWebhook func() map[string]string,
) *http.ServeMux {
internalMux := http.NewServeMux()
s.AddDebugHandlers(mux, internalMux, enableProfiling, fetchWebhook)
return internalMux
}
func (s *DiscoveryServer) AddDebugHandlers(mux, internalMux *http.ServeMux, enableProfiling bool, webhook func() map[string]string) {
// Debug handlers on HTTP ports are added for backward compatibility.
// They will be exposed on XDS-over-TLS in future releases.
if !features.EnableDebugOnHTTP {
return
}
if enableProfiling {
runtime.SetMutexProfileFraction(features.MutexProfileFraction)
runtime.SetBlockProfileRate(features.MutexProfileFraction)
s.addDebugHandler(mux, internalMux, "/debug/pprof/", "Displays pprof index", pprof.Index)
s.addDebugHandler(mux, internalMux, "/debug/pprof/cmdline", "The command line invocation of the current program", pprof.Cmdline)
s.addDebugHandler(mux, internalMux, "/debug/pprof/profile", "CPU profile", pprof.Profile)
s.addDebugHandler(mux, internalMux, "/debug/pprof/symbol", "Symbol looks up the program counters listed in the request", pprof.Symbol)
s.addDebugHandler(mux, internalMux, "/debug/pprof/trace", "A trace of execution of the current program.", pprof.Trace)
}
mux.HandleFunc("/debug", s.Debug)
if features.EnableUnsafeAdminEndpoints {
s.addDebugHandler(mux, internalMux, "/debug/force_disconnect", "Disconnects a proxy from this Pilot", s.forceDisconnect)
}
s.addDebugHandler(mux, internalMux, "/debug/ecdsz", "Status and debug interface for ECDS", s.ecdsz)
s.addDebugHandler(mux, internalMux, "/debug/edsz", "Status and debug interface for EDS", s.Edsz)
s.addDebugHandler(mux, internalMux, "/debug/ndsz", "Status and debug interface for NDS", s.ndsz)
s.addDebugHandler(mux, internalMux, "/debug/adsz", "Status and debug interface for ADS", s.adsz)
s.addDebugHandler(mux, internalMux, "/debug/adsz?push=true", "Initiates push of the current state to all connected endpoints", s.adsz)
s.addDebugHandler(mux, internalMux, "/debug/syncz", "Synchronization status of all Envoys connected to this Pilot instance", s.Syncz)
s.addDebugHandler(mux, internalMux, "/debug/config_distribution", "Version status of all Envoys connected to this Pilot instance", s.distributedVersions)
s.addDebugHandler(mux, internalMux, "/debug/registryz", "Debug support for registry", s.registryz)
s.addDebugHandler(mux, internalMux, "/debug/endpointz", "Obsolete, use endpointShardz", s.endpointShardz)
s.addDebugHandler(mux, internalMux, "/debug/endpointShardz", "Info about the endpoint shards", s.endpointShardz)
s.addDebugHandler(mux, internalMux, "/debug/cachez", "Info about the internal XDS caches", s.cachez)
s.addDebugHandler(mux, internalMux, "/debug/cachez?sizes=true", "Info about the size of the internal XDS caches", s.cachez)
s.addDebugHandler(mux, internalMux, "/debug/cachez?clear=true", "Clear the XDS caches", s.cachez)
s.addDebugHandler(mux, internalMux, "/debug/configz", "Debug support for config", s.configz)
s.addDebugHandler(mux, internalMux, "/debug/sidecarz", "Debug sidecar scope for a proxy", s.sidecarz)
s.addDebugHandler(mux, internalMux, "/debug/resourcesz", "Debug support for watched resources", s.resourcez)
s.addDebugHandler(mux, internalMux, "/debug/instancesz", "Debug support for service instances", s.instancesz)
s.addDebugHandler(mux, internalMux, "/debug/authorizationz", "Internal authorization policies", s.authorizationz)
s.addDebugHandler(mux, internalMux, "/debug/telemetryz", "Debug Telemetry configuration", s.telemetryz)
s.addDebugHandler(mux, internalMux, "/debug/config_dump", "ConfigDump in the form of the Envoy admin config dump API for passed in proxyID", s.ConfigDump)
s.addDebugHandler(mux, internalMux, "/debug/push_status", "Last PushContext Details", s.pushStatusHandler)
s.addDebugHandler(mux, internalMux, "/debug/pushcontext", "Debug support for current push context", s.pushContextHandler)
s.addDebugHandler(mux, internalMux, "/debug/connections", "Info about the connected XDS clients", s.connectionsHandler)
s.addDebugHandler(mux, internalMux, "/debug/inject", "Active inject template", s.injectTemplateHandler(webhook))
s.addDebugHandler(mux, internalMux, "/debug/mesh", "Active mesh config", s.meshHandler)
s.addDebugHandler(mux, internalMux, "/debug/clusterz", "List remote clusters where istiod reads endpoints", s.clusterz)
s.addDebugHandler(mux, internalMux, "/debug/networkz", "List cross-network gateways", s.networkz)
s.addDebugHandler(mux, internalMux, "/debug/mcsz", "List information about Kubernetes MCS services", s.mcsz)
s.addDebugHandler(mux, internalMux, "/debug/list", "List all supported debug commands in json", s.list)
}
func (s *DiscoveryServer) addDebugHandler(mux *http.ServeMux, internalMux *http.ServeMux,
path string, help string, handler func(http.ResponseWriter, *http.Request),
) {
s.debugHandlers[path] = help
// Add handler without auth. This mux is never exposed on an HTTP server and only used internally
if internalMux != nil {
internalMux.HandleFunc(path, handler)
}
// Add handler with auth; this is expose on an HTTP server
mux.HandleFunc(path, s.allowAuthenticatedOrLocalhost(http.HandlerFunc(handler)))
}
func (s *DiscoveryServer) allowAuthenticatedOrLocalhost(next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
// Request is from localhost, no need to authenticate
if isRequestFromLocalhost(req) {
next.ServeHTTP(w, req)
return
}
// Authenticate request with the same method as XDS
authFailMsgs := make([]string, 0)
var ids []string
authRequest := security.AuthContext{Request: req}
for _, authn := range s.Authenticators {
u, err := authn.Authenticate(authRequest)
// If one authenticator passes, return
if u != nil && u.Identities != nil && err == nil {
ids = u.Identities
break
}
authFailMsgs = append(authFailMsgs, fmt.Sprintf("Authenticator %s: %v", authn.AuthenticatorType(), err))
}
if ids == nil {
istiolog.Errorf("Failed to authenticate %s %v", req.URL, authFailMsgs)
// Not including detailed info in the response, XDS doesn't either (returns a generic "authentication failure).
w.WriteHeader(http.StatusUnauthorized)
return
}
// TODO: Check that the identity contains istio-system namespace, else block or restrict to only info that
// is visible to the authenticated SA. Will require changes in docs and istioctl too.
next.ServeHTTP(w, req)
}
}
func isRequestFromLocalhost(r *http.Request) bool {
ip, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
return false
}
userIP, _ := netip.ParseAddr(ip)
return userIP.IsLoopback()
}
// Syncz dumps the synchronization status of all Envoys connected to this Pilot instance
func (s *DiscoveryServer) Syncz(w http.ResponseWriter, req *http.Request) {
namespace := req.URL.Query().Get("namespace")
syncz := make([]SyncStatus, 0)
for _, con := range s.SortedClients() {
node := con.proxy
if node != nil && (namespace == "" || node.GetNamespace() == namespace) {
syncz = append(syncz, SyncStatus{
ProxyID: node.ID,
ProxyType: node.Type,
ClusterID: node.GetClusterID().String(),
IstioVersion: node.GetIstioVersion(),
ClusterSent: con.NonceSent(v3.ClusterType),
ClusterAcked: con.NonceAcked(v3.ClusterType),
ListenerSent: con.NonceSent(v3.ListenerType),
ListenerAcked: con.NonceAcked(v3.ListenerType),
RouteSent: con.NonceSent(v3.RouteType),
RouteAcked: con.NonceAcked(v3.RouteType),
EndpointSent: con.NonceSent(v3.EndpointType),
EndpointAcked: con.NonceAcked(v3.EndpointType),
ExtensionConfigSent: con.NonceSent(v3.ExtensionConfigurationType),
ExtensionConfigAcked: con.NonceAcked(v3.ExtensionConfigurationType),
})
}
}
writeJSON(w, syncz, req)
}
// registryz providees debug support for registry - adding and listing model items.
// Can be combined with the push debug interface to reproduce changes.
func (s *DiscoveryServer) registryz(w http.ResponseWriter, req *http.Request) {
all := s.Env.ServiceDiscovery.Services()
writeJSON(w, all, req)
}
// Dumps info about the endpoint shards, tracked using the new direct interface.
// Legacy registry provides are synced to the new data structure as well, during
// the full push.
func (s *DiscoveryServer) endpointShardz(w http.ResponseWriter, req *http.Request) {
writeJSON(w, s.Env.EndpointIndex.Shardz(), req)
}
func (s *DiscoveryServer) cachez(w http.ResponseWriter, req *http.Request) {
if err := req.ParseForm(); err != nil {
w.WriteHeader(http.StatusBadRequest)
_, _ = w.Write([]byte("Failed to parse request\n"))
return
}
if req.Form.Get("clear") != "" {
s.Cache.ClearAll()
_, _ = w.Write([]byte("Cache cleared\n"))
return
}
if req.Form.Get("sizes") != "" {
snapshot := s.Cache.Snapshot()
raw := make(map[string]int, len(snapshot))
totalSize := 0
for _, resource := range snapshot {
if resource == nil {
continue
}
resourceType := resource.Resource.TypeUrl
sz := len(resource.Resource.GetValue())
raw[resourceType] += sz
totalSize += sz
}
res := make(map[string]string, len(raw))
for k, v := range raw {
res[k] = util.ByteCount(v)
}
res["total"] = util.ByteCount(totalSize)
writeJSON(w, res, req)
return
}
snapshot := s.Cache.Snapshot()
resources := make(map[string][]string, len(snapshot)) // Key is typeUrl and value is resource names.
for _, resource := range snapshot {
if resource == nil {
continue
}
resourceType := resource.Resource.TypeUrl
resources[resourceType] = append(resources[resourceType], resource.Name)
}
writeJSON(w, resources, req)
}
const DistributionTrackingDisabledMessage = "Pilot Version tracking is disabled. It may be enabled by setting the " +
"PILOT_ENABLE_CONFIG_DISTRIBUTION_TRACKING environment variable to true."
func (s *DiscoveryServer) distributedVersions(w http.ResponseWriter, req *http.Request) {
if !features.EnableDistributionTracking {
w.WriteHeader(http.StatusConflict)
_, _ = fmt.Fprint(w, DistributionTrackingDisabledMessage)
return
}
if resourceID := req.URL.Query().Get("resource"); resourceID != "" {
proxyNamespace := req.URL.Query().Get("proxy_namespace")
knownVersions := make(map[string]string)
var results []SyncedVersions
for _, con := range s.SortedClients() {
// wrap this in independent scope so that panic's don't bypass Unlock...
con.proxy.RLock()
if con.proxy != nil && (proxyNamespace == "" || proxyNamespace == con.proxy.ConfigNamespace) {
// read nonces from our statusreporter to allow for skipped nonces, etc.
results = append(results, SyncedVersions{
ProxyID: con.proxy.ID,
ClusterVersion: s.getResourceVersion(s.StatusReporter.QueryLastNonce(con.conID, v3.ClusterType),
resourceID, knownVersions),
ListenerVersion: s.getResourceVersion(s.StatusReporter.QueryLastNonce(con.conID, v3.ListenerType),
resourceID, knownVersions),
RouteVersion: s.getResourceVersion(s.StatusReporter.QueryLastNonce(con.conID, v3.RouteType),
resourceID, knownVersions),
EndpointVersion: s.getResourceVersion(s.StatusReporter.QueryLastNonce(con.conID, v3.EndpointType),
resourceID, knownVersions),
})
}
con.proxy.RUnlock()
}
writeJSON(w, results, req)
} else {
w.WriteHeader(http.StatusUnprocessableEntity)
_, _ = fmt.Fprintf(w, "querystring parameter 'resource' is required\n")
}
}
// VersionLen is the Config Version and is only used as the nonce prefix, but we can reconstruct
// it because is is a b64 encoding of a 64 bit array, which will always be 12 chars in length.
// len = ceil(bitlength/(2^6))+1
const VersionLen = 12
func (s *DiscoveryServer) getResourceVersion(nonce, key string, cache map[string]string) string {
if len(nonce) < VersionLen {
return ""
}
configVersion := nonce[:VersionLen]
result, ok := cache[configVersion]
if !ok {
lookupResult, err := s.Env.GetLedger().GetPreviousValue(configVersion, key)
if err != nil {
istiolog.Errorf("Unable to retrieve resource %s at version %s: %v", key, configVersion, err)
lookupResult = ""
}
// update the cache even on an error, because errors will not resolve themselves, and we don't want to
// repeat the same error for many s.adsClients.
cache[configVersion] = lookupResult
return lookupResult
}
return result
}
// kubernetesConfig wraps a config.Config with a custom marshaling method that matches a Kubernetes
// object structure.
type kubernetesConfig struct {
config.Config
}
func (k kubernetesConfig) MarshalJSON() ([]byte, error) {
cfg, err := crd.ConvertConfig(k.Config)
if err != nil {
return nil, err
}
return json.Marshal(cfg)
}
// Config debugging.
func (s *DiscoveryServer) configz(w http.ResponseWriter, req *http.Request) {
configs := make([]kubernetesConfig, 0)
if s.Env == nil || s.Env.ConfigStore == nil {
return
}
s.Env.ConfigStore.Schemas().ForEach(func(schema resource.Schema) bool {
cfg := s.Env.ConfigStore.List(schema.GroupVersionKind(), "")
for _, c := range cfg {
configs = append(configs, kubernetesConfig{c})
}
return false
})
writeJSON(w, configs, req)
}
// SidecarScope debugging
func (s *DiscoveryServer) sidecarz(w http.ResponseWriter, req *http.Request) {
proxyID, con := s.getDebugConnection(req)
if con == nil {
s.errorHandler(w, proxyID, con)
return
}
writeJSON(w, con.proxy.SidecarScope, req)
}
// Resource debugging.
func (s *DiscoveryServer) resourcez(w http.ResponseWriter, req *http.Request) {
schemas := make([]config.GroupVersionKind, 0)
if s.Env != nil && s.Env.ConfigStore != nil {
s.Env.Schemas().ForEach(func(schema resource.Schema) bool {
schemas = append(schemas, schema.GroupVersionKind())
return false
})
}
writeJSON(w, schemas, req)
}
// AuthorizationDebug holds debug information for authorization policy.
type AuthorizationDebug struct {
AuthorizationPolicies *model.AuthorizationPolicies `json:"authorization_policies"`
}
// authorizationz dumps the internal authorization policies.
func (s *DiscoveryServer) authorizationz(w http.ResponseWriter, req *http.Request) {
info := AuthorizationDebug{
AuthorizationPolicies: s.globalPushContext().AuthzPolicies,
}
writeJSON(w, info, req)
}
// AuthorizationDebug holds debug information for authorization policy.
type TelemetryDebug struct {
Telemetries *model.Telemetries `json:"telemetries"`
}
func (s *DiscoveryServer) telemetryz(w http.ResponseWriter, req *http.Request) {
proxyID, con := s.getDebugConnection(req)
if proxyID != "" && con == nil {
// We can't guarantee the Pilot we are connected to has a connection to the proxy we requested
// There isn't a great way around this, but for debugging purposes its suitable to have the caller retry.
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte("Proxy not connected to this Pilot instance. It may be connected to another instance.\n"))
return
}
if con == nil {
info := TelemetryDebug{
Telemetries: s.globalPushContext().Telemetry,
}
writeJSON(w, info, req)
return
}
writeJSON(w, s.globalPushContext().Telemetry.Debug(con.proxy), req)
}
// connectionsHandler implements interface for displaying current connections.
// It is mapped to /debug/connections.
func (s *DiscoveryServer) connectionsHandler(w http.ResponseWriter, req *http.Request) {
adsClients := &AdsClients{}
connections := s.SortedClients()
adsClients.Total = len(connections)
for _, c := range connections {
adsClient := AdsClient{
ConnectionID: c.conID,
ConnectedAt: c.connectedAt,
PeerAddress: c.peerAddr,
}
adsClients.Connected = append(adsClients.Connected, adsClient)
}
writeJSON(w, adsClients, req)
}
// adsz implements a status and debug interface for ADS.
// It is mapped to /debug/adsz
func (s *DiscoveryServer) adsz(w http.ResponseWriter, req *http.Request) {
if s.handlePushRequest(w, req) {
return
}
proxyID, con := s.getDebugConnection(req)
if proxyID != "" && con == nil {
// We can't guarantee the Pilot we are connected to has a connection to the proxy we requested
// There isn't a great way around this, but for debugging purposes its suitable to have the caller retry.
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte("Proxy not connected to this Pilot instance. It may be connected to another instance.\n"))
return
}
var connections []*Connection
if con != nil {
connections = []*Connection{con}
} else {
connections = s.SortedClients()
}
adsClients := &AdsClients{}
adsClients.Total = len(connections)
for _, c := range connections {
adsClient := AdsClient{
ConnectionID: c.conID,
ConnectedAt: c.connectedAt,
PeerAddress: c.peerAddr,
Labels: c.proxy.Labels,
Metadata: c.proxy.Metadata,
Locality: c.proxy.Locality,
Watches: map[string][]string{},
}
c.proxy.RLock()
for k, wr := range c.proxy.WatchedResources {
r := wr.ResourceNames
if r == nil {
r = []string{}
}
adsClient.Watches[k] = r
}
c.proxy.RUnlock()
adsClients.Connected = append(adsClients.Connected, adsClient)
}
writeJSON(w, adsClients, req)
}
// ecdsz implements a status and debug interface for ECDS.
// It is mapped to /debug/ecdsz
func (s *DiscoveryServer) ecdsz(w http.ResponseWriter, req *http.Request) {
if s.handlePushRequest(w, req) {
return
}
proxyID, con := s.getDebugConnection(req)
if con == nil {
s.errorHandler(w, proxyID, con)
return
}
dump := s.getConfigDumpByResourceType(con, nil, []string{v3.ExtensionConfigurationType})
if len(dump[v3.ExtensionConfigurationType]) == 0 {
w.WriteHeader(http.StatusNotFound)
return
}
writeJSON(w, dump[v3.ExtensionConfigurationType], req)
}
// ConfigDump returns information in the form of the Envoy admin API config dump for the specified proxy
// The dump will only contain dynamic listeners/clusters/routes and can be used to compare what an Envoy instance
// should look like according to Pilot vs what it currently does look like.
func (s *DiscoveryServer) ConfigDump(w http.ResponseWriter, req *http.Request) {
proxyID, con := s.getDebugConnection(req)
if con == nil {
s.errorHandler(w, proxyID, con)
return
}
if ts := s.getResourceTypes(req); len(ts) != 0 {
resources := s.getConfigDumpByResourceType(con, nil, ts)
configDump := &admin.ConfigDump{}
for _, resource := range resources {
for _, rr := range resource {
configDump.Configs = append(configDump.Configs, rr.Resource)
}
}
writeJSON(w, configDump, req)
return
}
if con.proxy.IsZTunnel() {
resources := s.getConfigDumpByResourceType(con, nil, []string{v3.AddressType})
configDump := &admin.ConfigDump{}
for _, resource := range resources {
for _, rr := range resource {
configDump.Configs = append(configDump.Configs, rr.Resource)
}
}
writeJSON(w, configDump, req)
return
}
includeEds := req.URL.Query().Get("include_eds") == "true"
dump, err := s.connectionConfigDump(con, includeEds)
if err != nil {
handleHTTPError(w, err)
return
}
writeJSON(w, dump, req)
}
func (s *DiscoveryServer) getResourceTypes(req *http.Request) []string {
if shortTypes := req.URL.Query().Get("types"); shortTypes != "" {
ts := strings.Split(shortTypes, ",")
resourceTypes := sets.New[string]()
for _, t := range ts {
resourceTypes.Insert(v3.GetResourceType(t))
}
return resourceTypes.UnsortedList()
}
return nil
}
func (s *DiscoveryServer) getConfigDumpByResourceType(conn *Connection, req *model.PushRequest, ts []string) map[string][]*discoveryv3.Resource {
dumps := make(map[string][]*discoveryv3.Resource)
if req == nil {
req = &model.PushRequest{Push: conn.proxy.LastPushContext, Start: time.Now(), Full: true}
}
for _, resourceType := range ts {
w := conn.Watched(resourceType)
if w == nil {
// Not watched, skip
continue
}
gen := s.findGenerator(resourceType, conn)
if gen == nil {
// No generator found, skip
continue
}
if resource, _, err := gen.Generate(conn.proxy, w, req); err == nil {
for _, rr := range resource {
switch resourceType {
case v3.SecretType:
// Secrets must be redacted
secret := &tls.Secret{}
if err := rr.Resource.UnmarshalTo(secret); err != nil {
istiolog.Warnf("failed to unmarshal secret: %v", err)
continue
}
if secret.GetTlsCertificate() != nil {
secret.GetTlsCertificate().PrivateKey = &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: []byte("[redacted]"),
},
}
}
rr.Resource = protoconv.MessageToAny(secret)
dumps[resourceType] = append(dumps[resourceType], rr)
case v3.ExtensionConfigurationType:
tce := &core.TypedExtensionConfig{}
if err := rr.GetResource().UnmarshalTo(tce); err != nil {
istiolog.Warnf("failed to unmarshal extension: %v", err)
continue
}
switch tce.TypedConfig.TypeUrl {
case xds.WasmHTTPFilterType:
w := &wasm.Wasm{}
if err := tce.TypedConfig.UnmarshalTo(w); err != nil {
istiolog.Warnf("failed to unmarshal wasm filter: %v", err)
continue
}
// Redact Wasm secret env variable.
vmenvs := w.GetConfig().GetVmConfig().EnvironmentVariables
if vmenvs != nil {
if _, found := vmenvs.KeyValues[model.WasmSecretEnv]; found {
vmenvs.KeyValues[model.WasmSecretEnv] = "<Redacted>"
}
}
dumps[resourceType] = append(dumps[resourceType], &discoveryv3.Resource{
Name: w.Config.Name,
Resource: protoconv.MessageToAny(w),
})
default:
dumps[resourceType] = append(dumps[resourceType], rr)
}
default:
dumps[resourceType] = append(dumps[resourceType], rr)
}
}
} else {
istiolog.Warnf("generate failed for request resource type (%v): %v", resourceType, err)
continue
}
}
return dumps
}
// connectionConfigDump converts the connection internal state into an Envoy Admin API config dump proto
// It is used in debugging to create a consistent object for comparison between Envoy and Pilot outputs
func (s *DiscoveryServer) connectionConfigDump(conn *Connection, includeEds bool) (*admin.ConfigDump, error) {
req := &model.PushRequest{Push: conn.proxy.LastPushContext, Start: time.Now(), Full: true}
version := req.Push.PushVersion
dump := s.getConfigDumpByResourceType(conn, req, []string{
v3.ClusterType,
v3.ListenerType,
v3.RouteType,
v3.SecretType,
v3.EndpointType,
v3.ExtensionConfigurationType,
})
dynamicActiveClusters := make([]*admin.ClustersConfigDump_DynamicCluster, 0)
for _, cluster := range dump[v3.ClusterType] {
dynamicActiveClusters = append(dynamicActiveClusters, &admin.ClustersConfigDump_DynamicCluster{
Cluster: cluster.Resource,
})
}
clustersAny, err := protoconv.MessageToAnyWithError(&admin.ClustersConfigDump{
VersionInfo: version,
DynamicActiveClusters: dynamicActiveClusters,
})
if err != nil {
return nil, err
}
dynamicActiveListeners := make([]*admin.ListenersConfigDump_DynamicListener, 0)
for _, listener := range dump[v3.ListenerType] {
dynamicActiveListeners = append(dynamicActiveListeners, &admin.ListenersConfigDump_DynamicListener{
Name: listener.Name,
ActiveState: &admin.ListenersConfigDump_DynamicListenerState{
Listener: listener.Resource,
VersionInfo: version,
},
})
}
listenersAny, err := protoconv.MessageToAnyWithError(&admin.ListenersConfigDump{
VersionInfo: version,
DynamicListeners: dynamicActiveListeners,
})
if err != nil {
return nil, err
}
dynamicRouteConfig := make([]*admin.RoutesConfigDump_DynamicRouteConfig, 0)
for _, route := range dump[v3.RouteType] {
dynamicRouteConfig = append(dynamicRouteConfig, &admin.RoutesConfigDump_DynamicRouteConfig{
VersionInfo: version,
RouteConfig: route.Resource,
})
}
routesAny, err := protoconv.MessageToAnyWithError(&admin.RoutesConfigDump{
DynamicRouteConfigs: dynamicRouteConfig,
})
if err != nil {
return nil, err
}
dynamicSecretsConfig := make([]*admin.SecretsConfigDump_DynamicSecret, 0)
for _, secret := range dump[v3.SecretType] {
dynamicSecretsConfig = append(dynamicSecretsConfig, &admin.SecretsConfigDump_DynamicSecret{
VersionInfo: version,
Secret: secret.Resource,
})
}
secretsAny, err := protoconv.MessageToAnyWithError(&admin.SecretsConfigDump{
DynamicActiveSecrets: dynamicSecretsConfig,
})
if err != nil {
return nil, err
}
extensionsConfig := make([]*admin.EcdsConfigDump_EcdsFilterConfig, 0)
for _, ext := range dump[v3.ExtensionConfigurationType] {
extensionsConfig = append(extensionsConfig, &admin.EcdsConfigDump_EcdsFilterConfig{
VersionInfo: version,
EcdsFilter: ext.Resource,
})
}
extensionsAny, err := protoconv.MessageToAnyWithError(&admin.EcdsConfigDump{
EcdsFilters: extensionsConfig,
})
if err != nil {
return nil, err
}
var endpointsAny *anypb.Any
// EDS is disabled by default for compatibility with Envoy config_dump interface
if includeEds {
endpointConfig := make([]*admin.EndpointsConfigDump_DynamicEndpointConfig, 0)
for _, endpoint := range dump[v3.EndpointType] {
endpointConfig = append(endpointConfig, &admin.EndpointsConfigDump_DynamicEndpointConfig{
VersionInfo: version,
EndpointConfig: endpoint.Resource,
})
}
endpointsAny, err = protoconv.MessageToAnyWithError(&admin.EndpointsConfigDump{
DynamicEndpointConfigs: endpointConfig,
})
if err != nil {
return nil, err
}
}
bootstrapAny := protoconv.MessageToAny(&admin.BootstrapConfigDump{})
scopedRoutesAny := protoconv.MessageToAny(&admin.ScopedRoutesConfigDump{})
// The config dump must have all configs with connections specified in
// https://www.envoyproxy.io/docs/envoy/latest/api-v2/admin/v2alpha/config_dump.proto
configs := []*anypb.Any{
bootstrapAny,
clustersAny,
}
if includeEds {
configs = append(configs, endpointsAny)
}
configs = append(configs,
listenersAny,
scopedRoutesAny,
routesAny,
secretsAny,
extensionsAny,
)
configDump := &admin.ConfigDump{
Configs: configs,
}
return configDump, nil
}
// injectTemplateHandler dumps the injection template
// Replaces dumping the template at startup.
func (s *DiscoveryServer) injectTemplateHandler(webhook func() map[string]string) func(http.ResponseWriter, *http.Request) {
return func(w http.ResponseWriter, req *http.Request) {
// TODO: we should split the inject template into smaller modules (separate one for dump core, etc),
// and allow pods to select which patches will be selected. When this happen, this should return
// all inject templates or take a param to select one.
if webhook == nil {
w.WriteHeader(http.StatusNotFound)
return
}
writeJSON(w, webhook(), req)
}
}
// meshHandler dumps the mesh config
func (s *DiscoveryServer) meshHandler(w http.ResponseWriter, req *http.Request) {
writeJSON(w, s.Env.Mesh(), req)
}
// pushStatusHandler dumps the last PushContext
func (s *DiscoveryServer) pushStatusHandler(w http.ResponseWriter, req *http.Request) {
model.LastPushMutex.Lock()
defer model.LastPushMutex.Unlock()
if model.LastPushStatus == nil {
return
}
out, err := model.LastPushStatus.StatusJSON()
if err != nil {
handleHTTPError(w, err)
return
}
w.Header().Add("Content-Type", "application/json")
_, _ = w.Write(out)
}
// PushContextDebug holds debug information for push context.
type PushContextDebug struct {
AuthorizationPolicies *model.AuthorizationPolicies
NetworkGateways []model.NetworkGateway
UnresolvedGateways []model.NetworkGateway
}
// pushContextHandler dumps the current PushContext
func (s *DiscoveryServer) pushContextHandler(w http.ResponseWriter, req *http.Request) {
push := PushContextDebug{}
pc := s.globalPushContext()
if pc == nil {
return
}
push.AuthorizationPolicies = pc.AuthzPolicies
if pc.NetworkManager() != nil {
push.NetworkGateways = pc.NetworkManager().AllGateways()
push.UnresolvedGateways = pc.NetworkManager().Unresolved.AllGateways()
}
writeJSON(w, push, req)
}
// Debug lists all the supported debug endpoints.
func (s *DiscoveryServer) Debug(w http.ResponseWriter, req *http.Request) {
type debugEndpoint struct {
Name string
Href string
Help string
}
var deps []debugEndpoint
for k, v := range s.debugHandlers {
deps = append(deps, debugEndpoint{
Name: k,
Href: k,
Help: v,
})
}
sort.Slice(deps, func(i, j int) bool {
return deps[i].Name < deps[j].Name
})
if err := indexTmpl.Execute(w, deps); err != nil {
istiolog.Errorf("Error in rendering index template %v", err)
w.WriteHeader(http.StatusInternalServerError)
}
}
// list all the supported debug commands in json.
func (s *DiscoveryServer) list(w http.ResponseWriter, req *http.Request) {
var cmdNames []string
for k := range s.debugHandlers {
key := strings.Replace(k, "/debug/", "", -1)
// exclude current list command
if key == "list" {
continue
}
// can not support pprof commands
if strings.Contains(key, "pprof") {
continue
}
cmdNames = append(cmdNames, key)
}
sort.Strings(cmdNames)
writeJSON(w, cmdNames, req)
}
// ndsz implements a status and debug interface for NDS.
// It is mapped to /debug/ndsz on the monitor port (15014).
func (s *DiscoveryServer) ndsz(w http.ResponseWriter, req *http.Request) {
if s.handlePushRequest(w, req) {
return
}
proxyID, con := s.getDebugConnection(req)
if con == nil {
s.errorHandler(w, proxyID, con)
return
}
if !con.proxy.Metadata.DNSCapture {
w.WriteHeader(http.StatusBadRequest)
_, _ = w.Write([]byte("DNS capture is not enabled in the proxy\n"))
return
}
if s.Generators[v3.NameTableType] != nil {
nds, _, _ := s.Generators[v3.NameTableType].Generate(con.proxy, nil, &model.PushRequest{
Push: con.proxy.LastPushContext,
Full: true,
})
if len(nds) == 0 {
return
}
writeJSON(w, nds[0], req)
}
}
// Edsz implements a status and debug interface for EDS.
// It is mapped to /debug/edsz on the monitor port (15014).
func (s *DiscoveryServer) Edsz(w http.ResponseWriter, req *http.Request) {
if s.handlePushRequest(w, req) {
return
}
proxyID, con := s.getDebugConnection(req)
if con == nil {
s.errorHandler(w, proxyID, con)
return
}
clusters := con.Clusters()
eps := make([]jsonMarshalProto, 0, len(clusters))
for _, clusterName := range clusters {
builder := endpoints.NewEndpointBuilder(clusterName, con.proxy, con.proxy.LastPushContext)
eps = append(eps, jsonMarshalProto{builder.BuildClusterLoadAssignment(s.Env.EndpointIndex)})
}
writeJSON(w, eps, req)
}
func (s *DiscoveryServer) forceDisconnect(w http.ResponseWriter, req *http.Request) {
proxyID, con := s.getDebugConnection(req)
if con == nil {
s.errorHandler(w, proxyID, con)
return
}
con.Stop()
_, _ = w.Write([]byte("OK"))
}
func cloneProxy(proxy *model.Proxy) *model.Proxy {
if proxy == nil {
return nil
}
proxy.Lock()
defer proxy.Unlock()
// nolint: govet
copied := *proxy
out := &copied
out.RWMutex = sync.RWMutex{}
// clone WatchedResources which can be mutated when processing request
out.WatchedResources = make(map[string]*model.WatchedResource, len(proxy.WatchedResources))
for k, v := range proxy.WatchedResources {
// nolint: govet
v := *v
out.WatchedResources[k] = &v
}
return out
}
func (s *DiscoveryServer) getProxyConnection(proxyID string) *Connection {
for _, con := range s.Clients() {
if strings.Contains(con.conID, proxyID) {
out := *con
out.proxy = cloneProxy(con.proxy)
return &out
}
}
return nil
}
func (s *DiscoveryServer) instancesz(w http.ResponseWriter, req *http.Request) {
instances := map[string][]model.ServiceTarget{}
for _, con := range s.Clients() {
con.proxy.RLock()
if con.proxy != nil {
instances[con.proxy.ID] = con.proxy.ServiceTargets
}
con.proxy.RUnlock()
}
writeJSON(w, instances, req)
}
func (s *DiscoveryServer) networkz(w http.ResponseWriter, req *http.Request) {
if s.Env == nil || s.Env.NetworkManager == nil {
return
}
writeJSON(w, s.Env.NetworkManager.AllGateways(), req)
}
func (s *DiscoveryServer) mcsz(w http.ResponseWriter, req *http.Request) {
svcs := sortMCSServices(s.Env.MCSServices())
writeJSON(w, svcs, req)
}
func sortMCSServices(svcs []model.MCSServiceInfo) []model.MCSServiceInfo {
sort.Slice(svcs, func(i, j int) bool {
if strings.Compare(svcs[i].Cluster.String(), svcs[j].Cluster.String()) < 0 {
return true
}
if strings.Compare(svcs[i].Namespace, svcs[j].Namespace) < 0 {
return true
}
return strings.Compare(svcs[i].Name, svcs[j].Name) < 0
})
return svcs
}
func (s *DiscoveryServer) clusterz(w http.ResponseWriter, req *http.Request) {
if s.ListRemoteClusters == nil {
w.WriteHeader(http.StatusBadRequest)
return
}
writeJSON(w, s.ListRemoteClusters(), req)
}
// handlePushRequest handles a ?push=true query param and triggers a push.
// A boolean response is returned to indicate if the caller should continue
func (s *DiscoveryServer) handlePushRequest(w http.ResponseWriter, req *http.Request) bool {
if err := req.ParseForm(); err != nil {
w.WriteHeader(http.StatusBadRequest)
_, _ = w.Write([]byte("Failed to parse request\n"))
return true
}
if req.Form.Get("push") != "" {
AdsPushAll(s)
_, _ = fmt.Fprintf(w, "Pushed to %d servers\n", s.adsClientCount())
return true
}
return false
}
// getDebugConnection fetches the Connection requested by proxyID
func (s *DiscoveryServer) getDebugConnection(req *http.Request) (string, *Connection) {
if proxyID := req.URL.Query().Get("proxyID"); proxyID != "" {
return proxyID, s.getProxyConnection(proxyID)
}
return "", nil
}
func (s *DiscoveryServer) errorHandler(w http.ResponseWriter, proxyID string, con *Connection) {
if proxyID == "" {
w.WriteHeader(http.StatusBadRequest)
_, _ = w.Write([]byte("You must provide a proxyID in the query string\n"))
return
}
if con == nil {
// We can't guarantee the Pilot we are connected to has a connection to the proxy we requested
// There isn't a great way around this, but for debugging purposes its suitable to have the caller retry.
w.WriteHeader(http.StatusNotFound)
_, _ = w.Write([]byte("Proxy not connected to this Pilot instance. It may be connected to another instance.\n"))
return
}
}
// jsonMarshalProto wraps a proto.Message so it can be marshaled with the standard encoding/json library
type jsonMarshalProto struct {
proto.Message
}
func (p jsonMarshalProto) MarshalJSON() ([]byte, error) {
return protomarshal.Marshal(p.Message)
}
// writeJSON writes a json payload, handling content type, marshaling, and errors
func writeJSON(w http.ResponseWriter, obj any, req *http.Request) {
w.Header().Set("Content-Type", "application/json")
var b []byte
var err error
if req.URL.Query().Has("pretty") {
b, err = config.ToPrettyJSON(obj)
} else {
b, err = config.ToJSON(obj)
}
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
_, _ = w.Write([]byte(err.Error()))
return
}
_, err = w.Write(b)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
}
// handleHTTPError writes an error message to the response
func handleHTTPError(w http.ResponseWriter, err error) {
w.WriteHeader(http.StatusInternalServerError)
_, _ = w.Write([]byte(err.Error()))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"bytes"
"encoding/json"
"net/http"
"net/url"
"strconv"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
customBytes "github.com/AdamKorcz/bugdetectors/bytes"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
anypb "google.golang.org/protobuf/types/known/anypb"
"istio.io/istio/pilot/pkg/model"
v3 "istio.io/istio/pilot/pkg/xds/v3"
)
var activeNamespaceDebuggers = map[string]struct{}{
"config_dump": {},
"ndsz": {},
"edsz": {},
}
// DebugGen is a Generator for istio debug info
type DebugGen struct {
Server *DiscoveryServer
SystemNamespace string
DebugMux *http.ServeMux
}
type ResponseCapture struct {
body *bytes.Buffer
header map[string]string
wroteHeader bool
}
func (r ResponseCapture) Header() http.Header {
header := make(http.Header)
for k, v := range r.header {
header.Set(k, v)
}
return header
}
func (r ResponseCapture) Write(i []byte) (int, error) {
return r.body.Write(i)
}
func (r ResponseCapture) WriteHeader(statusCode int) {
r.header["statusCode"] = strconv.Itoa(statusCode)
}
func NewResponseCapture() *ResponseCapture {
return &ResponseCapture{
header: make(map[string]string),
body: new(bytes.Buffer),
wroteHeader: false,
}
}
func NewDebugGen(s *DiscoveryServer, systemNamespace string, debugMux *http.ServeMux) *DebugGen {
return &DebugGen{
Server: s,
SystemNamespace: systemNamespace,
DebugMux: debugMux,
}
}
// Generate XDS debug responses according to the incoming debug request
func (dg *DebugGen) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
if err := validateProxyAuthentication(proxy, w); err != nil {
return nil, model.DefaultXdsLogDetails, err
}
resourceName, err := parseAndValidateDebugRequest(proxy, w, dg)
if err != nil {
return nil, model.DefaultXdsLogDetails, err
}
buffer := processDebugRequest(dg, resourceName)
res := model.Resources{&discovery.Resource{
Name: resourceName,
Resource: &anypb.Any{
TypeUrl: v3.DebugType,
Value: customBytes.CheckLen(buffer.Bytes(), "/src/istio/pilot/pkg/xds/debuggen.go:101:13 (May be slightly inaccurate) NEW_LINEbuffer.Bytes()"),
},
}}
return res, model.DefaultXdsLogDetails, nil
}
// GenerateDeltas XDS debug responses according to the incoming debug request
func (dg *DebugGen) GenerateDeltas(
proxy *model.Proxy,
req *model.PushRequest,
w *model.WatchedResource,
) (model.Resources, model.DeletedResources, model.XdsLogDetails, bool, error) {
if err := validateProxyAuthentication(proxy, w); err != nil {
return nil, nil, model.DefaultXdsLogDetails, true, err
}
resourceName, err := parseAndValidateDebugRequest(proxy, w, dg)
if err != nil {
return nil, nil, model.DefaultXdsLogDetails, true, err
}
buffer := processDebugRequest(dg, resourceName)
res := model.Resources{&discovery.Resource{
Name: resourceName,
Resource: &anypb.Any{
TypeUrl: v3.DebugType,
Value: customBytes.CheckLen(buffer.Bytes(), "/src/istio/pilot/pkg/xds/debuggen.go:128:13 (May be slightly inaccurate) NEW_LINEbuffer.Bytes()"),
},
}}
return res, nil, model.DefaultXdsLogDetails, true, nil
}
func validateProxyAuthentication(proxy *model.Proxy, w *model.WatchedResource) error {
if proxy.VerifiedIdentity == nil {
log.Warnf("proxy %s is not authorized to receive debug. Ensure you are connecting over TLS port and are authenticated.", proxy.ID)
return status.Error(codes.Unauthenticated, "authentication required")
}
if w.ResourceNames == nil || len(w.ResourceNames) != 1 {
return status.Error(codes.InvalidArgument, "exactly one debug request is required")
}
return nil
}
func parseAndValidateDebugRequest(proxy *model.Proxy, w *model.WatchedResource, dg *DebugGen) (string, error) {
resourceName := w.ResourceNames[0]
u, _ := url.Parse(resourceName)
debugType := u.Path
identity := proxy.VerifiedIdentity
if identity.Namespace != dg.SystemNamespace {
if _, ok := activeNamespaceDebuggers[debugType]; !ok {
return "", status.Errorf(codes.PermissionDenied, "the debug info is not available for current identity: %q", identity)
}
}
return resourceName, nil
}
func processDebugRequest(dg *DebugGen, resourceName string) bytes.Buffer {
var buffer bytes.Buffer
debugURL := "/debug/" + resourceName
hreq, _ := http.NewRequest(http.MethodGet, debugURL, nil)
handler, _ := dg.DebugMux.Handler(hreq)
response := NewResponseCapture()
handler.ServeHTTP(response, hreq)
if response.wroteHeader && len(response.header) >= 1 {
header, _ := json.Marshal(response.header)
buffer.Write(header)
}
buffer.Write(response.body.Bytes())
return buffer
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
"istio.io/istio/pilot/pkg/features"
istiogrpc "istio.io/istio/pilot/pkg/grpc"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
v3 "istio.io/istio/pilot/pkg/xds/v3"
istiolog "istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
var deltaLog = istiolog.RegisterScope("delta", "delta xds debugging")
func (s *DiscoveryServer) StreamDeltas(stream DeltaDiscoveryStream) error {
if knativeEnv != "" && firstRequest.Load() {
// How scaling works in knative is the first request is the "loading" request. During
// loading request, concurrency=1. Once that request is done, concurrency is enabled.
// However, the XDS stream is long lived, so the first request would block all others. As a
// result, we should exit the first request immediately; clients will retry.
firstRequest.Store(false)
return status.Error(codes.Unavailable, "server warmup not complete; try again")
}
// Check if server is ready to accept clients and process new requests.
// Currently ready means caches have been synced and hence can build
// clusters correctly. Without this check, InitContext() call below would
// initialize with empty config, leading to reconnected Envoys loosing
// configuration. This is an additional safety check inaddition to adding
// cachesSynced logic to readiness probe to handle cases where kube-proxy
// ip tables update latencies.
// See https://github.com/istio/istio/issues/25495.
if !s.IsServerReady() {
return errors.New("server is not ready to serve discovery information")
}
ctx := stream.Context()
peerAddr := "0.0.0.0"
if peerInfo, ok := peer.FromContext(ctx); ok {
peerAddr = peerInfo.Addr.String()
}
if err := s.WaitForRequestLimit(stream.Context()); err != nil {
deltaLog.Warnf("ADS: %q exceeded rate limit: %v", peerAddr, err)
return status.Errorf(codes.ResourceExhausted, "request rate limit exceeded: %v", err)
}
ids, err := s.authenticate(ctx)
if err != nil {
return status.Error(codes.Unauthenticated, err.Error())
}
if ids != nil {
deltaLog.Debugf("Authenticated XDS: %v with identity %v", peerAddr, ids)
} else {
deltaLog.Debugf("Unauthenticated XDS: %v", peerAddr)
}
// InitContext returns immediately if the context was already initialized.
if err = s.globalPushContext().InitContext(s.Env, nil, nil); err != nil {
// Error accessing the data - log and close, maybe a different pilot replica
// has more luck
deltaLog.Warnf("Error reading config %v", err)
return status.Error(codes.Unavailable, "error reading config")
}
con := newDeltaConnection(peerAddr, stream)
// Do not call: defer close(con.pushChannel). The push channel will be garbage collected
// when the connection is no longer used. Closing the channel can cause subtle race conditions
// with push. According to the spec: "It's only necessary to close a channel when it is important
// to tell the receiving goroutines that all data have been sent."
// Block until either a request is received or a push is triggered.
// We need 2 go routines because 'read' blocks in Recv().
go s.receiveDelta(con, ids)
// Wait for the proxy to be fully initialized before we start serving traffic. Because
// initialization doesn't have dependencies that will block, there is no need to add any timeout
// here. Prior to this explicit wait, we were implicitly waiting by receive() not sending to
// reqChannel and the connection not being enqueued for pushes to pushChannel until the
// initialization is complete.
<-con.initialized
for {
// Go select{} statements are not ordered; the same channel can be chosen many times.
// For requests, these are higher priority (client may be blocked on startup until these are done)
// and often very cheap to handle (simple ACK), so we check it first.
select {
case req, ok := <-con.deltaReqChan:
if ok {
if err := s.processDeltaRequest(req, con); err != nil {
return err
}
} else {
// Remote side closed connection or error processing the request.
return <-con.errorChan
}
case <-con.stop:
return nil
default:
}
// If there wasn't already a request, poll for requests and pushes. Note: if we have a huge
// amount of incoming requests, we may still send some pushes, as we do not `continue` above;
// however, requests will be handled ~2x as much as pushes. This ensures a wave of requests
// cannot completely starve pushes. However, this scenario is unlikely.
select {
case req, ok := <-con.deltaReqChan:
if ok {
if err := s.processDeltaRequest(req, con); err != nil {
return err
}
} else {
// Remote side closed connection or error processing the request.
return <-con.errorChan
}
case pushEv := <-con.pushChannel:
err := s.pushConnectionDelta(con, pushEv)
pushEv.done()
if err != nil {
return err
}
case <-con.stop:
return nil
}
}
}
// Compute and send the new configuration for a connection.
func (s *DiscoveryServer) pushConnectionDelta(con *Connection, pushEv *Event) error {
pushRequest := pushEv.pushRequest
if pushRequest.Full {
// Update Proxy with current information.
s.computeProxyState(con.proxy, pushRequest)
}
if !s.ProxyNeedsPush(con.proxy, pushRequest) {
deltaLog.Debugf("Skipping push to %v, no updates required", con.conID)
if pushRequest.Full {
// Only report for full versions, incremental pushes do not have a new version
reportAllEventsForProxyNoPush(con, s.StatusReporter, pushRequest.Push.LedgerVersion)
}
return nil
}
// Send pushes to all generators
// Each Generator is responsible for determining if the push event requires a push
wrl := con.orderWatchedResources()
for _, w := range wrl {
if err := s.pushDeltaXds(con, w, pushRequest); err != nil {
return err
}
}
if pushRequest.Full {
// Report all events for unwatched resources. Watched resources will be reported in pushXds or on ack.
reportEventsForUnWatched(con, s.StatusReporter, pushRequest.Push.LedgerVersion)
}
proxiesConvergeDelay.Record(time.Since(pushRequest.Start).Seconds())
return nil
}
func (s *DiscoveryServer) receiveDelta(con *Connection, identities []string) {
defer func() {
close(con.deltaReqChan)
close(con.errorChan)
// Close the initialized channel, if its not already closed, to prevent blocking the stream
select {
case <-con.initialized:
default:
close(con.initialized)
}
}()
firstRequest := true
for {
req, err := con.deltaStream.Recv()
if err != nil {
if istiogrpc.IsExpectedGRPCError(err) {
deltaLog.Infof("ADS: %q %s terminated", con.peerAddr, con.conID)
return
}
con.errorChan <- err
deltaLog.Errorf("ADS: %q %s terminated with error: %v", con.peerAddr, con.conID, err)
totalXDSInternalErrors.Increment()
return
}
// This should be only set for the first request. The node id may not be set - for example malicious clients.
if firstRequest {
// probe happens before envoy sends first xDS request
if req.TypeUrl == v3.HealthInfoType {
log.Warnf("ADS: %q %s send health check probe before normal xDS request", con.peerAddr, con.conID)
continue
}
firstRequest = false
if req.Node == nil || req.Node.Id == "" {
con.errorChan <- status.New(codes.InvalidArgument, "missing node information").Err()
return
}
if err := s.initConnection(req.Node, con, identities); err != nil {
con.errorChan <- err
return
}
defer s.closeConnection(con)
deltaLog.Infof("ADS: new delta connection for node:%s", con.conID)
}
select {
case con.deltaReqChan <- req:
case <-con.deltaStream.Context().Done():
deltaLog.Infof("ADS: %q %s terminated with stream closed", con.peerAddr, con.conID)
return
}
}
}
func (conn *Connection) sendDelta(res *discovery.DeltaDiscoveryResponse) error {
sendHandler := func() error {
start := time.Now()
defer func() { recordSendTime(time.Since(start)) }()
return conn.deltaStream.Send(res)
}
err := istiogrpc.Send(conn.deltaStream.Context(), sendHandler)
if err == nil {
if !strings.HasPrefix(res.TypeUrl, v3.DebugType) {
conn.proxy.Lock()
if conn.proxy.WatchedResources[res.TypeUrl] == nil {
conn.proxy.WatchedResources[res.TypeUrl] = &model.WatchedResource{TypeUrl: res.TypeUrl}
}
conn.proxy.WatchedResources[res.TypeUrl].NonceSent = res.Nonce
if features.EnableUnsafeDeltaTest {
conn.proxy.WatchedResources[res.TypeUrl].LastResources = applyDelta(conn.proxy.WatchedResources[res.TypeUrl].LastResources, res)
}
conn.proxy.Unlock()
}
} else {
deltaLog.Infof("Timeout writing %s", conn.conID)
xdsResponseWriteTimeouts.Increment()
}
return err
}
// processDeltaRequest is handling one request. This is currently called from the 'main' thread, which also
// handles 'push' requests and close - the code will eventually call the 'push' code, and it needs more mutex
// protection. Original code avoided the mutexes by doing both 'push' and 'process requests' in same thread.
func (s *DiscoveryServer) processDeltaRequest(req *discovery.DeltaDiscoveryRequest, con *Connection) error {
stype := v3.GetShortType(req.TypeUrl)
deltaLog.Debugf("ADS:%s: REQ %s resources sub:%d unsub:%d nonce:%s", stype,
con.conID, len(req.ResourceNamesSubscribe), len(req.ResourceNamesUnsubscribe), req.ResponseNonce)
if req.TypeUrl == v3.HealthInfoType {
s.handleWorkloadHealthcheck(con.proxy, deltaToSotwRequest(req))
return nil
}
if strings.HasPrefix(req.TypeUrl, v3.DebugType) {
return s.pushDeltaXds(con,
&model.WatchedResource{TypeUrl: req.TypeUrl, ResourceNames: req.ResourceNamesSubscribe},
&model.PushRequest{Full: true, Push: con.proxy.LastPushContext})
}
if s.StatusReporter != nil {
s.StatusReporter.RegisterEvent(con.conID, req.TypeUrl, req.ResponseNonce)
}
shouldRespond := s.shouldRespondDelta(con, req)
if !shouldRespond {
return nil
}
subs, _ := deltaWatchedResources(nil, req)
request := &model.PushRequest{
Full: true,
Push: con.proxy.LastPushContext,
Reason: model.NewReasonStats(model.ProxyRequest),
// The usage of LastPushTime (rather than time.Now()), is critical here for correctness; This time
// is used by the XDS cache to determine if a entry is stale. If we use Now() with an old push context,
// we may end up overriding active cache entries with stale ones.
Start: con.proxy.LastPushTime,
Delta: model.ResourceDelta{
// Record sub/unsub, but drop synthetic wildcard info
Subscribed: sets.New(subs...),
Unsubscribed: sets.New(req.ResourceNamesUnsubscribe...).Delete("*"),
},
}
// SidecarScope for the proxy may has not been updated based on this pushContext.
// It can happen when `processRequest` comes after push context has been updated(s.initPushContext),
// but before proxy's SidecarScope has been updated(s.updateProxy).
if con.proxy.SidecarScope != nil && con.proxy.SidecarScope.Version != request.Push.PushVersion {
s.computeProxyState(con.proxy, request)
}
return s.pushDeltaXds(con, con.Watched(req.TypeUrl), request)
}
// shouldRespondDelta determines whether this request needs to be responded back. It applies the ack/nack rules as per xds protocol
// using WatchedResource for previous state and discovery request for the current state.
func (s *DiscoveryServer) shouldRespondDelta(con *Connection, request *discovery.DeltaDiscoveryRequest) bool {
stype := v3.GetShortType(request.TypeUrl)
// If there is an error in request that means previous response is erroneous.
// We do not have to respond in that case. In this case request's version info
// will be different from the version sent. But it is fragile to rely on that.
if request.ErrorDetail != nil {
errCode := codes.Code(request.ErrorDetail.Code)
deltaLog.Warnf("ADS:%s: ACK ERROR %s %s:%s", stype, con.conID, errCode.String(), request.ErrorDetail.GetMessage())
incrementXDSRejects(request.TypeUrl, con.proxy.ID, errCode.String())
return false
}
con.proxy.RLock()
previousInfo := con.proxy.WatchedResources[request.TypeUrl]
con.proxy.RUnlock()
// This can happen in two cases:
// 1. Envoy initially send request to Istiod
// 2. Envoy reconnect to Istiod i.e. Istiod does not have
// information about this typeUrl, but Envoy sends response nonce - either
// because Istiod is restarted or Envoy disconnects and reconnects.
// We should always respond with the current resource names.
if previousInfo == nil {
if len(request.InitialResourceVersions) > 0 {
deltaLog.Debugf("ADS:%s: RECONNECT %s %s resources:%v", stype, con.conID, request.ResponseNonce, len(request.InitialResourceVersions))
} else {
deltaLog.Debugf("ADS:%s: INIT %s %s", stype, con.conID, request.ResponseNonce)
}
con.proxy.Lock()
defer con.proxy.Unlock()
res, wildcard := deltaWatchedResources(nil, request)
con.proxy.WatchedResources[request.TypeUrl] = &model.WatchedResource{
TypeUrl: request.TypeUrl,
ResourceNames: res,
Wildcard: wildcard,
}
// For all EDS requests that we have already responded with in the same stream let us
// force the response. It is important to respond to those requests for Envoy to finish
// warming of those resources(Clusters).
// This can happen with the following sequence
// 1. Envoy disconnects and reconnects to Istiod.
// 2. Envoy sends EDS request and we respond with it.
// 3. Envoy sends CDS request and we respond with clusters.
// 4. Envoy detects a change in cluster state and tries to warm those clusters and send EDS request for them.
// 5. We should respond to the EDS request with Endpoints to let Envoy finish cluster warming.
// Refer to https://github.com/envoyproxy/envoy/issues/13009 for more details.
for _, dependent := range warmingDependencies(request.TypeUrl) {
if dwr, exists := con.proxy.WatchedResources[dependent]; exists {
dwr.AlwaysRespond = true
}
}
return true
}
// If there is mismatch in the nonce, that is a case of expired/stale nonce.
// A nonce becomes stale following a newer nonce being sent to Envoy.
// TODO: due to concurrent unsubscribe, this probably doesn't make sense. Do we need any logic here?
if request.ResponseNonce != "" && request.ResponseNonce != previousInfo.NonceSent {
deltaLog.Debugf("ADS:%s: REQ %s Expired nonce received %s, sent %s", stype,
con.conID, request.ResponseNonce, previousInfo.NonceSent)
xdsExpiredNonce.With(typeTag.Value(v3.GetMetricType(request.TypeUrl))).Increment()
return false
}
// If it comes here, that means nonce match. This an ACK. We should record
// the ack details and respond if there is a change in resource names.
con.proxy.Lock()
previousResources := con.proxy.WatchedResources[request.TypeUrl].ResourceNames
currentResources, _ := deltaWatchedResources(previousResources, request)
con.proxy.WatchedResources[request.TypeUrl].NonceAcked = request.ResponseNonce
con.proxy.WatchedResources[request.TypeUrl].ResourceNames = currentResources
alwaysRespond := previousInfo.AlwaysRespond
previousInfo.AlwaysRespond = false
con.proxy.Unlock()
subChanged := !slices.EqualUnordered(previousResources, currentResources)
// Spontaneous DeltaDiscoveryRequests from the client.
// This can be done to dynamically add or remove elements from the tracked resource_names set.
// In this case response_nonce is empty.
spontaneousReq := request.ResponseNonce == ""
// It is invalid in the below two cases:
// 1. no subscribed resources change from spontaneous delta request.
// 2. subscribed resources changes from ACK.
if spontaneousReq && !subChanged || !spontaneousReq && subChanged {
deltaLog.Errorf("ADS:%s: Subscribed resources check mismatch: %v vs %v", stype, spontaneousReq, subChanged)
if features.EnableUnsafeAssertions {
panic(fmt.Sprintf("ADS:%s: Subscribed resources check mismatch: %v vs %v", stype, spontaneousReq, subChanged))
}
}
// Envoy can send two DiscoveryRequests with same version and nonce
// when it detects a new resource. We should respond if they change.
if !subChanged {
// We should always respond "alwaysRespond" marked requests to let Envoy finish warming
// even though Nonce match and it looks like an ACK.
if alwaysRespond {
deltaLog.Infof("ADS:%s: FORCE RESPONSE %s for warming.", stype, con.conID)
return true
}
deltaLog.Debugf("ADS:%s: ACK %s %s", stype, con.conID, request.ResponseNonce)
return false
}
deltaLog.Debugf("ADS:%s: RESOURCE CHANGE previous resources: %v, new resources: %v %s %s", stype,
previousResources, currentResources, con.conID, request.ResponseNonce)
return true
}
// Push a Delta XDS resource for the given connection.
func (s *DiscoveryServer) pushDeltaXds(con *Connection,
w *model.WatchedResource, req *model.PushRequest,
) error {
if w == nil {
return nil
}
gen := s.findGenerator(w.TypeUrl, con)
if gen == nil {
return nil
}
t0 := time.Now()
originalW := w
// If delta is set, client is requesting new resources or removing old ones. We should just generate the
// new resources it needs, rather than the entire set of known resources.
// Note: we do not need to account for unsubscribed resources as these are handled by parent removal;
// See https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#deleting-resources.
// This means if there are only removals, we will not respond.
var logFiltered string
if !req.Delta.IsEmpty() && !requiresResourceNamesModification(w.TypeUrl) {
// Some types opt out of this and natively handle req.Delta
logFiltered = " filtered:" + strconv.Itoa(len(w.ResourceNames)-len(req.Delta.Subscribed))
w = &model.WatchedResource{
TypeUrl: w.TypeUrl,
ResourceNames: req.Delta.Subscribed.UnsortedList(),
}
}
var res model.Resources
var deletedRes model.DeletedResources
var logdata model.XdsLogDetails
var usedDelta bool
var err error
switch g := gen.(type) {
case model.XdsDeltaResourceGenerator:
res, deletedRes, logdata, usedDelta, err = g.GenerateDeltas(con.proxy, req, w)
if features.EnableUnsafeDeltaTest {
fullRes, l, _ := g.Generate(con.proxy, originalW, req)
s.compareDiff(con, originalW, fullRes, res, deletedRes, usedDelta, req.Delta, l.Incremental)
}
case model.XdsResourceGenerator:
res, logdata, err = g.Generate(con.proxy, w, req)
}
if err != nil || (res == nil && deletedRes == nil) {
// If we have nothing to send, report that we got an ACK for this version.
if s.StatusReporter != nil {
s.StatusReporter.RegisterEvent(con.conID, w.TypeUrl, req.Push.LedgerVersion)
}
return err
}
defer func() { recordPushTime(w.TypeUrl, time.Since(t0)) }()
resp := &discovery.DeltaDiscoveryResponse{
ControlPlane: ControlPlane(),
TypeUrl: w.TypeUrl,
// TODO: send different version for incremental eds
SystemVersionInfo: req.Push.PushVersion,
Nonce: nonce(req.Push.LedgerVersion),
Resources: res,
}
currentResources := slices.Map(res, func(r *discovery.Resource) string {
return r.Name
})
if usedDelta {
resp.RemovedResources = deletedRes
} else if req.Full {
// similar to sotw
subscribed := sets.New(w.ResourceNames...)
removed := subscribed.DeleteAll(currentResources...)
resp.RemovedResources = sets.SortedList(removed)
}
if len(resp.RemovedResources) > 0 {
deltaLog.Debugf("ADS:%v REMOVE for node:%s %v", v3.GetShortType(w.TypeUrl), con.conID, resp.RemovedResources)
}
if shouldSetWatchedResources(w) {
// this is probably a bad idea...
con.proxy.Lock()
w.ResourceNames = currentResources
con.proxy.Unlock()
}
configSize := ResourceSize(res)
configSizeBytes.With(typeTag.Value(w.TypeUrl)).Record(float64(configSize))
ptype := "PUSH"
info := ""
if logdata.Incremental {
ptype = "PUSH INC"
}
if len(logdata.AdditionalInfo) > 0 {
info = " " + logdata.AdditionalInfo
}
if len(logFiltered) > 0 {
info += logFiltered
}
if err := con.sendDelta(resp); err != nil {
if recordSendError(w.TypeUrl, err) {
deltaLog.Warnf("%s: Send failure for node:%s resources:%d size:%s%s: %v",
v3.GetShortType(w.TypeUrl), con.proxy.ID, len(res), util.ByteCount(configSize), info, err)
}
return err
}
switch {
case !req.Full:
if deltaLog.DebugEnabled() {
deltaLog.Debugf("%s: %s%s for node:%s resources:%d size:%s%s",
v3.GetShortType(w.TypeUrl), ptype, req.PushReason(), con.proxy.ID, len(res), util.ByteCount(configSize), info)
}
default:
debug := ""
if deltaLog.DebugEnabled() {
// Add additional information to logs when debug mode enabled.
debug = " nonce:" + resp.Nonce + " version:" + resp.SystemVersionInfo
}
deltaLog.Infof("%s: %s%s for node:%s resources:%d removed:%d size:%v%s%s",
v3.GetShortType(w.TypeUrl), ptype, req.PushReason(), con.proxy.ID, len(res), len(resp.RemovedResources),
util.ByteCount(ResourceSize(res)), info, debug)
}
return nil
}
// requiresResourceNamesModification checks if a generator needs mutable access to w.ResourceNames.
// This is used when resources are spontaneously pushed during Delta XDS
func requiresResourceNamesModification(url string) bool {
return url == v3.AddressType || url == v3.WorkloadType
}
// shouldSetWatchedResources indicates whether we should set the watched resources for a given type.
// for some type like `Address` we customly handle it in the generator
func shouldSetWatchedResources(w *model.WatchedResource) bool {
if w.TypeUrl == v3.AddressType || w.TypeUrl == v3.WorkloadType {
return false
}
// Else fallback based on type
return isWildcardTypeURL(w.TypeUrl)
}
func newDeltaConnection(peerAddr string, stream DeltaDiscoveryStream) *Connection {
return &Connection{
pushChannel: make(chan *Event),
initialized: make(chan struct{}),
stop: make(chan struct{}),
peerAddr: peerAddr,
connectedAt: time.Now(),
deltaStream: stream,
deltaReqChan: make(chan *discovery.DeltaDiscoveryRequest, 1),
errorChan: make(chan error, 1),
}
}
// To satisfy methods that need DiscoveryRequest. Not suitable for real usage
func deltaToSotwRequest(request *discovery.DeltaDiscoveryRequest) *discovery.DiscoveryRequest {
return &discovery.DiscoveryRequest{
Node: request.Node,
ResourceNames: request.ResourceNamesSubscribe,
TypeUrl: request.TypeUrl,
ResponseNonce: request.ResponseNonce,
ErrorDetail: request.ErrorDetail,
}
}
// deltaWatchedResources returns current watched resources of delta xds
func deltaWatchedResources(existing []string, request *discovery.DeltaDiscoveryRequest) ([]string, bool) {
res := sets.New(existing...)
res.InsertAll(request.ResourceNamesSubscribe...)
// This is set by Envoy on first request on reconnection so that we are aware of what Envoy knows
// and can continue the xDS session properly.
for k := range request.InitialResourceVersions {
res.Insert(k)
}
res.DeleteAll(request.ResourceNamesUnsubscribe...)
wildcard := false
// A request is wildcard if they explicitly subscribe to "*" or subscribe to nothing
if res.Contains("*") {
wildcard = true
res.Delete("*")
}
// "if the client sends a request but has never explicitly subscribed to any resource names, the
// server should treat that identically to how it would treat the client having explicitly
// subscribed to *"
// NOTE: this means you cannot subscribe to nothing, which is useful for on-demand loading; to workaround this
// Istio clients will send and initial request both subscribing+unsubscribing to `*`.
if len(request.ResourceNamesSubscribe) == 0 {
wildcard = true
}
return res.UnsortedList(), wildcard
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"context"
"sync"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/test"
)
func NewDeltaAdsTest(t test.Failer, conn *grpc.ClientConn) *DeltaAdsTest {
test.SetForTest(t, &features.DeltaXds, true)
return NewDeltaXdsTest(t, conn, func(conn *grpc.ClientConn) (DeltaDiscoveryClient, error) {
xds := discovery.NewAggregatedDiscoveryServiceClient(conn)
return xds.DeltaAggregatedResources(context.Background())
})
}
func NewDeltaXdsTest(t test.Failer, conn *grpc.ClientConn,
getClient func(conn *grpc.ClientConn) (DeltaDiscoveryClient, error),
) *DeltaAdsTest {
ctx, cancel := context.WithCancel(context.Background())
cl, err := getClient(conn)
if err != nil {
t.Fatal(err)
}
resp := &DeltaAdsTest{
client: cl,
conn: conn,
context: ctx,
cancelContext: cancel,
t: t,
ID: "sidecar~1.1.1.1~test.default~default.svc.cluster.local",
timeout: time.Second,
Type: v3.ClusterType,
responses: make(chan *discovery.DeltaDiscoveryResponse),
error: make(chan error),
}
t.Cleanup(resp.Cleanup)
go resp.adsReceiveChannel()
return resp
}
type DeltaAdsTest struct {
client DeltaDiscoveryClient
responses chan *discovery.DeltaDiscoveryResponse
error chan error
t test.Failer
conn *grpc.ClientConn
metadata model.NodeMetadata
ID string
Type string
cancelOnce sync.Once
context context.Context
cancelContext context.CancelFunc
timeout time.Duration
}
func (a *DeltaAdsTest) Cleanup() {
// Place in once to avoid race when two callers attempt to cleanup
a.cancelOnce.Do(func() {
a.cancelContext()
_ = a.client.CloseSend()
if a.conn != nil {
_ = a.conn.Close()
}
})
}
func (a *DeltaAdsTest) adsReceiveChannel() {
context.AfterFunc(a.context, a.Cleanup)
for {
resp, err := a.client.Recv()
if err != nil {
if isUnexpectedError(err) {
log.Warnf("ads received error: %v", err)
}
select {
case a.error <- err:
case <-a.context.Done():
}
return
}
select {
case a.responses <- resp:
case <-a.context.Done():
return
}
}
}
// DrainResponses reads all responses, but does nothing to them
func (a *DeltaAdsTest) DrainResponses() {
a.t.Helper()
for {
select {
case <-a.context.Done():
return
case r := <-a.responses:
log.Infof("drained response %v", r.TypeUrl)
}
}
}
// ExpectResponse waits until a response is received and returns it
func (a *DeltaAdsTest) ExpectResponse() *discovery.DeltaDiscoveryResponse {
a.t.Helper()
select {
case <-time.After(a.timeout):
a.t.Fatalf("did not get response in time")
case resp := <-a.responses:
if resp == nil || (len(resp.Resources) == 0 && len(resp.RemovedResources) == 0) {
a.t.Fatalf("got empty response")
}
return resp
case err := <-a.error:
a.t.Fatalf("got error: %v", err)
}
return nil
}
// ExpectResponse waits until a response is received and returns it
func (a *DeltaAdsTest) ExpectEmptyResponse() *discovery.DeltaDiscoveryResponse {
a.t.Helper()
select {
case <-time.After(a.timeout):
a.t.Fatalf("did not get response in time")
case resp := <-a.responses:
if resp == nil {
a.t.Fatalf("expected response")
}
if resp != nil && (len(resp.RemovedResources) > 0 || len(resp.Resources) > 0) {
a.t.Fatalf("expected empty response. received %v", resp)
}
return resp
case err := <-a.error:
a.t.Fatalf("got error: %v", err)
}
return nil
}
// ExpectError waits until an error is received and returns it
func (a *DeltaAdsTest) ExpectError() error {
a.t.Helper()
select {
case <-time.After(a.timeout):
a.t.Fatalf("did not get error in time")
case err := <-a.error:
return err
}
return nil
}
// ExpectNoResponse waits a short period of time and ensures no response is received
func (a *DeltaAdsTest) ExpectNoResponse() {
a.t.Helper()
select {
case <-time.After(time.Millisecond * 50):
return
case resp := <-a.responses:
a.t.Fatalf("got unexpected response: %v", resp)
}
}
func (a *DeltaAdsTest) fillInRequestDefaults(req *discovery.DeltaDiscoveryRequest) *discovery.DeltaDiscoveryRequest {
if req == nil {
req = &discovery.DeltaDiscoveryRequest{}
}
if req.TypeUrl == "" {
req.TypeUrl = a.Type
}
if req.Node == nil {
req.Node = &core.Node{
Id: a.ID,
Metadata: a.metadata.ToStruct(),
}
}
return req
}
func (a *DeltaAdsTest) Request(req *discovery.DeltaDiscoveryRequest) {
req = a.fillInRequestDefaults(req)
if err := a.client.Send(req); err != nil {
a.t.Fatal(err)
}
}
// RequestResponseAck does a full XDS exchange: Send a request, get a response, and ACK the response
func (a *DeltaAdsTest) RequestResponseAck(req *discovery.DeltaDiscoveryRequest) *discovery.DeltaDiscoveryResponse {
a.t.Helper()
req = a.fillInRequestDefaults(req)
a.Request(req)
resp := a.ExpectResponse()
req.ResponseNonce = resp.Nonce
a.Request(&discovery.DeltaDiscoveryRequest{
Node: req.Node,
TypeUrl: req.TypeUrl,
ResponseNonce: req.ResponseNonce,
})
return resp
}
// RequestResponseNack does a full XDS exchange with an error: Send a request, get a response, and NACK the response
func (a *DeltaAdsTest) RequestResponseNack(req *discovery.DeltaDiscoveryRequest) *discovery.DeltaDiscoveryResponse {
a.t.Helper()
if req == nil {
req = &discovery.DeltaDiscoveryRequest{}
}
a.Request(req)
resp := a.ExpectResponse()
a.Request(&discovery.DeltaDiscoveryRequest{
Node: req.Node,
TypeUrl: req.TypeUrl,
ResponseNonce: req.ResponseNonce,
ErrorDetail: &status.Status{Message: "Test request NACK"},
})
return resp
}
func (a *DeltaAdsTest) WithID(id string) *DeltaAdsTest {
a.ID = id
return a
}
func (a *DeltaAdsTest) WithType(typeURL string) *DeltaAdsTest {
a.Type = typeURL
return a
}
func (a *DeltaAdsTest) WithMetadata(m model.NodeMetadata) *DeltaAdsTest {
a.metadata = m
return a
}
func (a *DeltaAdsTest) WithTimeout(t time.Duration) *DeltaAdsTest {
a.timeout = t
return a
}
func (a *DeltaAdsTest) WithNodeType(t model.NodeType) *DeltaAdsTest {
a.ID = string(t) + "~1.1.1.1~test.default~default.svc.cluster.local"
return a
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"fmt"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"github.com/google/go-cmp/cmp"
"google.golang.org/protobuf/testing/protocmp"
"istio.io/istio/pilot/pkg/model"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/util/sets"
)
var knownOptimizationGaps = sets.New(
"BlackHoleCluster",
"InboundPassthroughClusterIpv4",
"InboundPassthroughClusterIpv6",
"PassthroughCluster",
)
// compareDiff compares a Delta and SotW XDS response. This allows checking that the Delta XDS
// response returned the optimal result. Checks include correctness checks (e.g. if a config changed,
// we must include it) and possible optimizations (e.g. we sent a config, but it was not changed).
func (s *DiscoveryServer) compareDiff(
con *Connection,
w *model.WatchedResource,
full model.Resources,
resp model.Resources,
deleted model.DeletedResources,
usedDelta bool,
delta model.ResourceDelta,
incremental bool,
) {
current := con.Watched(w.TypeUrl).LastResources
if current == nil {
log.Debugf("ADS:%s: resources initialized", v3.GetShortType(w.TypeUrl))
return
}
if resp == nil && deleted == nil && len(full) == 0 {
// TODO: it suspicious full is never nil - are there case where we should be deleting everything?
// Both SotW and Delta did not respond, nothing to compare
return
}
newByName := map[string]*discovery.Resource{}
for _, v := range full {
newByName[v.Name] = v
}
curByName := map[string]*discovery.Resource{}
for _, v := range current {
curByName[v.Name] = v
}
watched := sets.New(w.ResourceNames...)
details := fmt.Sprintf("last:%v sotw:%v delta:%v-%v", len(current), len(full), len(resp), len(deleted))
wantDeleted := sets.New[string]()
wantChanged := sets.New[string]()
wantUnchanged := sets.New[string]()
for _, c := range current {
n := newByName[c.Name]
if n == nil {
// We had a resource, but SotW didn't generate it.
if watched.Contains(c.Name) {
// We only need to delete it if Envoy is watching. Otherwise, it may have simply unsubscribed
wantDeleted.Insert(c.Name)
}
} else if diff := cmp.Diff(c.Resource, n.Resource, protocmp.Transform()); diff != "" {
// Resource was modified
wantChanged.Insert(c.Name)
} else {
// No diff. Ideally delta doesn't send any update here
wantUnchanged.Insert(c.Name)
}
}
for _, v := range full {
if _, f := curByName[v.Name]; !f {
// Resource is added. Delta doesn't distinguish add vs update, so just put it with changed
wantChanged.Insert(v.Name)
}
}
gotDeleted := sets.New[string]()
if usedDelta {
gotDeleted.InsertAll(deleted...)
}
gotChanged := sets.New[string]()
for _, v := range resp {
gotChanged.Insert(v.Name)
}
// BUGS
extraDeletes := sets.SortedList(gotDeleted.Difference(wantDeleted))
missedDeletes := sets.SortedList(wantDeleted.Difference(gotDeleted))
missedChanges := sets.SortedList(wantChanged.Difference(gotChanged))
// Optimization Potential
extraChanges := sets.SortedList(gotChanged.Difference(wantChanged).Difference(knownOptimizationGaps))
if len(delta.Subscribed) > 0 {
// Delta is configured to build only the request resources. Make sense we didn't build anything extra
if !wantChanged.SupersetOf(gotChanged) {
log.Errorf("%s: TEST for node:%s unexpected resources: %v %v", v3.GetShortType(w.TypeUrl), con.proxy.ID, details, wantChanged.Difference(gotChanged))
}
// Still make sure we didn't delete anything extra
if len(extraDeletes) > 0 {
log.Errorf("%s: TEST for node:%s unexpected deletions: %v %v", v3.GetShortType(w.TypeUrl), con.proxy.ID, details, extraDeletes)
}
} else {
if len(extraDeletes) > 0 {
log.Errorf("%s: TEST for node:%s unexpected deletions: %v %v", v3.GetShortType(w.TypeUrl), con.proxy.ID, details, extraDeletes)
}
if len(missedDeletes) > 0 && !incremental {
// We can skip this if we are incremental; this expects us to only send a partial list. So these are not actually deletes
log.Errorf("%s: TEST for node:%s missed deletions: %v %v", v3.GetShortType(w.TypeUrl), con.proxy.ID, details, missedDeletes)
}
if len(missedChanges) > 0 {
log.Errorf("%s: TEST for node:%s missed changes: %v %v", v3.GetShortType(w.TypeUrl), con.proxy.ID, details, missedChanges)
}
if len(extraChanges) > 0 {
if usedDelta {
log.Infof("%s: TEST for node:%s missed possible optimization: %v. deleted:%v changed:%v",
v3.GetShortType(w.TypeUrl), con.proxy.ID, extraChanges, len(gotDeleted), len(gotChanged))
} else {
log.Debugf("%s: TEST for node:%s missed possible optimization: %v. deleted:%v changed:%v",
v3.GetShortType(w.TypeUrl), con.proxy.ID, extraChanges, len(gotDeleted), len(gotChanged))
}
}
}
}
func applyDelta(message model.Resources, resp *discovery.DeltaDiscoveryResponse) model.Resources {
deleted := sets.New(resp.RemovedResources...)
byName := map[string]*discovery.Resource{}
for _, v := range resp.Resources {
byName[v.Name] = v
}
res := model.Resources{}
for _, m := range message {
if deleted.Contains(m.Name) {
continue
}
if replaced := byName[m.Name]; replaced != nil {
res = append(res, replaced)
delete(byName, m.Name)
continue
}
res = append(res, m)
}
for _, v := range byName {
res = append(res, v)
}
return res
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"context"
"fmt"
"sort"
"strconv"
"sync"
"time"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"github.com/google/uuid"
"go.uber.org/atomic"
"golang.org/x/time/rate"
"google.golang.org/grpc"
"istio.io/istio/pilot/pkg/autoregistration"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/envoyfilter"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/security"
)
var periodicRefreshMetrics = 10 * time.Second
type DebounceOptions struct {
// DebounceAfter is the delay added to events to wait
// after a registry/config event for debouncing.
// This will delay the push by at least this interval, plus
// the time getting subsequent events. If no change is
// detected the push will happen, otherwise we'll keep
// delaying until things settle.
DebounceAfter time.Duration
// debounceMax is the maximum time to wait for events
// while debouncing. Defaults to 10 seconds. If events keep
// showing up with no break for this time, we'll trigger a push.
debounceMax time.Duration
// enableEDSDebounce indicates whether EDS pushes should be debounced.
enableEDSDebounce bool
}
// DiscoveryServer is Pilot's gRPC implementation for Envoy's xds APIs
type DiscoveryServer struct {
// Env is the model environment.
Env *model.Environment
// Generators allow customizing the generated config, based on the client metadata.
// Key is the generator type - will match the Generator metadata to set the per-connection
// default generator, or the combination of Generator metadata and TypeUrl to select a
// different generator for a type.
// Normal istio clients use the default generator - will not be impacted by this.
Generators map[string]model.XdsResourceGenerator
// ProxyNeedsPush is a function that determines whether a push can be completely skipped. Individual generators
// may also choose to not send any updates.
ProxyNeedsPush func(proxy *model.Proxy, req *model.PushRequest) bool
// concurrentPushLimit is a semaphore that limits the amount of concurrent XDS pushes.
concurrentPushLimit chan struct{}
// RequestRateLimit limits the number of new XDS requests allowed. This helps prevent thundering hurd of incoming requests.
RequestRateLimit *rate.Limiter
// InboundUpdates describes the number of configuration updates the discovery server has received
InboundUpdates *atomic.Int64
// CommittedUpdates describes the number of configuration updates the discovery server has
// received, process, and stored in the push context. If this number is less than InboundUpdates,
// there are updates we have not yet processed.
// Note: This does not mean that all proxies have received these configurations; it is strictly
// the push context, which means that the next push to a proxy will receive this configuration.
CommittedUpdates *atomic.Int64
// pushChannel is the buffer used for debouncing.
// after debouncing the pushRequest will be sent to pushQueue
pushChannel chan *model.PushRequest
// pushQueue is the buffer that used after debounce and before the real xds push.
pushQueue *PushQueue
// debugHandlers is the list of all the supported debug handlers.
debugHandlers map[string]string
// adsClients reflect active gRPC channels, for both ADS and EDS.
adsClients map[string]*Connection
adsClientsMutex sync.RWMutex
StatusReporter DistributionStatusCache
// Authenticators for XDS requests. Should be same/subset of the CA authenticators.
Authenticators []security.Authenticator
WorkloadEntryController *autoregistration.Controller
// serverReady indicates caches have been synced up and server is ready to process requests.
serverReady atomic.Bool
DebounceOptions DebounceOptions
// Cache for XDS resources
Cache model.XdsCache
// JwtKeyResolver holds a reference to the JWT key resolver instance.
JwtKeyResolver *model.JwksResolver
// ListRemoteClusters collects debug information about other clusters this istiod reads from.
ListRemoteClusters func() []cluster.DebugInfo
// ClusterAliases are alias names for cluster. When a proxy connects with a cluster ID
// and if it has a different alias we should use that a cluster ID for proxy.
ClusterAliases map[cluster.ID]cluster.ID
// pushVersion stores the numeric push version. This should be accessed via NextVersion()
pushVersion atomic.Uint64
// DiscoveryStartTime is the time since the binary started
DiscoveryStartTime time.Time
}
// NewDiscoveryServer creates DiscoveryServer that sources data from Pilot's internal mesh data structures
func NewDiscoveryServer(env *model.Environment, clusterAliases map[string]string) *DiscoveryServer {
out := &DiscoveryServer{
Env: env,
Generators: map[string]model.XdsResourceGenerator{},
ProxyNeedsPush: DefaultProxyNeedsPush,
concurrentPushLimit: make(chan struct{}, features.PushThrottle),
RequestRateLimit: rate.NewLimiter(rate.Limit(features.RequestLimit), 1),
InboundUpdates: atomic.NewInt64(0),
CommittedUpdates: atomic.NewInt64(0),
pushChannel: make(chan *model.PushRequest, 10),
pushQueue: NewPushQueue(),
debugHandlers: map[string]string{},
adsClients: map[string]*Connection{},
DebounceOptions: DebounceOptions{
DebounceAfter: features.DebounceAfter,
debounceMax: features.DebounceMax,
enableEDSDebounce: features.EnableEDSDebounce,
},
Cache: env.Cache,
DiscoveryStartTime: processStartTime,
}
out.ClusterAliases = make(map[cluster.ID]cluster.ID)
for alias := range clusterAliases {
out.ClusterAliases[cluster.ID(alias)] = cluster.ID(clusterAliases[alias])
}
//out.initJwksResolver()
return out
}
// initJwkResolver initializes the JWT key resolver to be used.
func (s *DiscoveryServer) initJwksResolver() {
if s.JwtKeyResolver != nil {
s.closeJwksResolver()
}
s.JwtKeyResolver = model.NewJwksResolver(
model.JwtPubKeyEvictionDuration, model.JwtPubKeyRefreshInterval,
model.JwtPubKeyRefreshIntervalOnFailure, model.JwtPubKeyRetryInterval)
// Flush cached discovery responses when detecting jwt public key change.
s.JwtKeyResolver.PushFunc = func() {
s.ConfigUpdate(&model.PushRequest{Full: true, Reason: model.NewReasonStats(model.UnknownTrigger)})
}
}
// closeJwksResolver shuts down the JWT key resolver used.
func (s *DiscoveryServer) closeJwksResolver() {
if s.JwtKeyResolver != nil {
s.JwtKeyResolver.Close()
}
}
// Register adds the ADS handler to the grpc server
func (s *DiscoveryServer) Register(rpcs *grpc.Server) {
// Register v3 server
discovery.RegisterAggregatedDiscoveryServiceServer(rpcs, s)
}
var processStartTime = time.Now()
// CachesSynced is called when caches have been synced so that server can accept connections.
func (s *DiscoveryServer) CachesSynced() {
log.Infof("All caches have been synced up in %v, marking server ready", time.Since(s.DiscoveryStartTime))
s.serverReady.Store(true)
}
func (s *DiscoveryServer) IsServerReady() bool {
return s.serverReady.Load()
}
func (s *DiscoveryServer) Start(stopCh <-chan struct{}) {
go s.WorkloadEntryController.Run(stopCh)
go s.handleUpdates(stopCh)
go s.periodicRefreshMetrics(stopCh)
go s.sendPushes(stopCh)
go s.Cache.Run(stopCh)
}
// Push metrics are updated periodically (10s default)
func (s *DiscoveryServer) periodicRefreshMetrics(stopCh <-chan struct{}) {
ticker := time.NewTicker(periodicRefreshMetrics)
defer ticker.Stop()
for {
select {
case <-ticker.C:
push := s.globalPushContext()
model.LastPushMutex.Lock()
if model.LastPushStatus != push {
model.LastPushStatus = push
push.UpdateMetrics()
out, _ := model.LastPushStatus.StatusJSON()
if string(out) != "{}" {
log.Infof("Push Status: %s", string(out))
}
}
model.LastPushMutex.Unlock()
case <-stopCh:
return
}
}
}
// dropCacheForRequest clears the cache in response to a push request
func (s *DiscoveryServer) dropCacheForRequest(req *model.PushRequest) {
// If we don't know what updated, cannot safely cache. Clear the whole cache
if len(req.ConfigsUpdated) == 0 {
s.Cache.ClearAll()
} else {
// Otherwise, just clear the updated configs
s.Cache.Clear(req.ConfigsUpdated)
}
}
// Push is called to push changes on config updates using ADS.
func (s *DiscoveryServer) Push(req *model.PushRequest) {
if !req.Full {
req.Push = s.globalPushContext()
s.dropCacheForRequest(req)
s.AdsPushAll(req)
return
}
// Reset the status during the push.
oldPushContext := s.globalPushContext()
if oldPushContext != nil {
oldPushContext.OnConfigChange()
// Push the previous push Envoy metrics.
envoyfilter.RecordMetrics()
}
// PushContext is reset after a config change. Previous status is
// saved.
t0 := time.Now()
versionLocal := s.NextVersion()
push, err := s.initPushContext(req, oldPushContext, versionLocal)
if err != nil {
return
}
initContextTime := time.Since(t0)
log.Debugf("InitContext %v for push took %s", versionLocal, initContextTime)
pushContextInitTime.Record(initContextTime.Seconds())
req.Push = push
s.AdsPushAll(req)
}
func nonce(noncePrefix string) string {
return noncePrefix + uuid.New().String()
}
// Returns the global push context. This should be used with caution; generally the proxy-specific
// PushContext should be used to get the current state in the context of a single proxy. This should
// only be used for "global" lookups, such as initiating a new push to all proxies.
func (s *DiscoveryServer) globalPushContext() *model.PushContext {
return s.Env.PushContext()
}
// ConfigUpdate implements ConfigUpdater interface, used to request pushes.
func (s *DiscoveryServer) ConfigUpdate(req *model.PushRequest) {
if len(model.ConfigsOfKind(req.ConfigsUpdated, kind.Address)) > 0 {
// This is a bit like clearing EDS cache on EndpointShard update. Because Address
// types are fetched dynamically, they are not part of the same protections, so we need to clear
// the cache.
s.Cache.ClearAll()
}
inboundConfigUpdates.Increment()
s.InboundUpdates.Inc()
s.pushChannel <- req
}
// Debouncing and push request happens in a separate thread, it uses locks
// and we want to avoid complications, ConfigUpdate may already hold other locks.
// handleUpdates processes events from pushChannel
// It ensures that at minimum minQuiet time has elapsed since the last event before processing it.
// It also ensures that at most maxDelay is elapsed between receiving an event and processing it.
func (s *DiscoveryServer) handleUpdates(stopCh <-chan struct{}) {
debounce(s.pushChannel, stopCh, s.DebounceOptions, s.Push, s.CommittedUpdates)
}
// The debounce helper function is implemented to enable mocking
func debounce(ch chan *model.PushRequest, stopCh <-chan struct{}, opts DebounceOptions, pushFn func(req *model.PushRequest), updateSent *atomic.Int64) {
var timeChan <-chan time.Time
var startDebounce time.Time
var lastConfigUpdateTime time.Time
pushCounter := 0
debouncedEvents := 0
// Keeps track of the push requests. If updates are debounce they will be merged.
var req *model.PushRequest
free := true
freeCh := make(chan struct{}, 1)
push := func(req *model.PushRequest, debouncedEvents int, startDebounce time.Time) {
pushFn(req)
updateSent.Add(int64(debouncedEvents))
debounceTime.Record(time.Since(startDebounce).Seconds())
freeCh <- struct{}{}
}
pushWorker := func() {
eventDelay := time.Since(startDebounce)
quietTime := time.Since(lastConfigUpdateTime)
// it has been too long or quiet enough
if eventDelay >= opts.debounceMax || quietTime >= opts.DebounceAfter {
if req != nil {
pushCounter++
if req.ConfigsUpdated == nil {
log.Infof("Push debounce stable[%d] %d for reason %s: %v since last change, %v since last push, full=%v",
pushCounter, debouncedEvents, reasonsUpdated(req),
quietTime, eventDelay, req.Full)
} else {
log.Infof("Push debounce stable[%d] %d for config %s: %v since last change, %v since last push, full=%v",
pushCounter, debouncedEvents, configsUpdated(req),
quietTime, eventDelay, req.Full)
}
free = false
go push(req, debouncedEvents, startDebounce)
req = nil
debouncedEvents = 0
}
} else {
timeChan = time.After(opts.DebounceAfter - quietTime)
}
}
for {
select {
case <-freeCh:
free = true
pushWorker()
case r := <-ch:
// If reason is not set, record it as an unknown reason
if len(r.Reason) == 0 {
r.Reason = model.NewReasonStats(model.UnknownTrigger)
}
if !opts.enableEDSDebounce && !r.Full {
// trigger push now, just for EDS
go func(req *model.PushRequest) {
pushFn(req)
updateSent.Inc()
}(r)
continue
}
lastConfigUpdateTime = time.Now()
if debouncedEvents == 0 {
timeChan = time.After(opts.DebounceAfter)
startDebounce = lastConfigUpdateTime
}
debouncedEvents++
req = req.Merge(r)
case <-timeChan:
if free {
pushWorker()
}
case <-stopCh:
return
}
}
}
func configsUpdated(req *model.PushRequest) string {
configs := ""
for key := range req.ConfigsUpdated {
configs += key.String()
break
}
if len(req.ConfigsUpdated) > 1 {
more := fmt.Sprintf(" and %d more configs", len(req.ConfigsUpdated)-1)
configs += more
}
return configs
}
func reasonsUpdated(req *model.PushRequest) string {
var (
reason0, reason1 model.TriggerReason
reason0Cnt, reason1Cnt, idx int
)
for r, cnt := range req.Reason {
if idx == 0 {
reason0, reason0Cnt = r, cnt
} else if idx == 1 {
reason1, reason1Cnt = r, cnt
} else {
break
}
idx++
}
switch len(req.Reason) {
case 0:
return "unknown"
case 1:
return fmt.Sprintf("%s:%d", reason0, reason0Cnt)
case 2:
return fmt.Sprintf("%s:%d and %s:%d", reason0, reason0Cnt, reason1, reason1Cnt)
default:
return fmt.Sprintf("%s:%d and %d(%d) more reasons", reason0, reason0Cnt, len(req.Reason)-1,
req.Reason.Count()-reason0Cnt)
}
}
func doSendPushes(stopCh <-chan struct{}, semaphore chan struct{}, queue *PushQueue) {
for {
select {
case <-stopCh:
return
default:
// We can send to it until it is full, then it will block until a pushes finishes and reads from it.
// This limits the number of pushes that can happen concurrently
semaphore <- struct{}{}
// Get the next proxy to push. This will block if there are no updates required.
client, push, shuttingdown := queue.Dequeue()
if shuttingdown {
return
}
recordPushTriggers(push.Reason)
// Signals that a push is done by reading from the semaphore, allowing another send on it.
doneFunc := func() {
queue.MarkDone(client)
<-semaphore
}
proxiesQueueTime.Record(time.Since(push.Start).Seconds())
var closed <-chan struct{}
if client.stream != nil {
closed = client.stream.Context().Done()
} else {
closed = client.deltaStream.Context().Done()
}
go func() {
pushEv := &Event{
pushRequest: push,
done: doneFunc,
}
select {
case client.pushChannel <- pushEv:
return
case <-closed: // grpc stream was closed
doneFunc()
log.Infof("Client closed connection %v", client.conID)
}
}()
}
}
}
// initPushContext creates a global push context and stores it on the environment. Note: while this
// method is technically thread safe (there are no data races), it should not be called in parallel;
// if it is, then we may start two push context creations (say A, and B), but then write them in
// reverse order, leaving us with a final version of A, which may be incomplete.
func (s *DiscoveryServer) initPushContext(req *model.PushRequest, oldPushContext *model.PushContext, version string) (*model.PushContext, error) {
push := model.NewPushContext()
push.PushVersion = version
push.JwtKeyResolver = s.JwtKeyResolver
if err := push.InitContext(s.Env, oldPushContext, req); err != nil {
log.Errorf("XDS: failed to init push context: %v", err)
// We can't push if we can't read the data - stick with previous version.
pushContextErrors.Increment()
return nil, err
}
s.dropCacheForRequest(req)
s.Env.SetPushContext(push)
return push, nil
}
func (s *DiscoveryServer) sendPushes(stopCh <-chan struct{}) {
doSendPushes(stopCh, s.concurrentPushLimit, s.pushQueue)
}
// Shutdown shuts down DiscoveryServer components.
func (s *DiscoveryServer) Shutdown() {
s.closeJwksResolver()
s.pushQueue.ShutDown()
}
// Clients returns all currently connected clients. This method can be safely called concurrently,
// but care should be taken with the underlying objects (ie model.Proxy) to ensure proper locking.
// This method returns only fully initialized connections; for all connections, use AllClients
func (s *DiscoveryServer) Clients() []*Connection {
s.adsClientsMutex.RLock()
defer s.adsClientsMutex.RUnlock()
clients := make([]*Connection, 0, len(s.adsClients))
for _, con := range s.adsClients {
select {
case <-con.initialized:
default:
// Initialization not complete, skip
continue
}
clients = append(clients, con)
}
return clients
}
// SortedClients returns all currently connected clients in an ordered manner.
// Sorting order priority is as follows: ClusterID, Namespace, ID.
func (s *DiscoveryServer) SortedClients() []*Connection {
clients := s.Clients()
sort.Slice(clients, func(i, j int) bool {
if clients[i].proxy.GetClusterID().String() < clients[j].proxy.GetClusterID().String() {
return true
}
if clients[i].proxy.GetNamespace() < clients[j].proxy.GetNamespace() {
return true
}
return clients[i].proxy.GetID() < clients[j].proxy.GetID()
})
return clients
}
// AllClients returns all connected clients, per Clients, but additionally includes uninitialized connections
// Warning: callers must take care not to rely on the con.proxy field being set
func (s *DiscoveryServer) AllClients() []*Connection {
s.adsClientsMutex.RLock()
defer s.adsClientsMutex.RUnlock()
return maps.Values(s.adsClients)
}
// SendResponse will immediately send the response to all connections.
// TODO: additional filters can be added, for example namespace.
func (s *DiscoveryServer) SendResponse(connections []*Connection, res *discovery.DiscoveryResponse) {
for _, p := range connections {
// p.send() waits for an ACK - which is reasonable for normal push,
// but in this case we want to sync fast and not bother with stuck connections.
// This is expecting a relatively small number of watchers - each other istiod
// plus few admin tools or bridges to real message brokers. The normal
// push expects 1000s of envoy connections.
con := p
go func() {
err := con.stream.Send(res)
if err != nil {
log.Errorf("Failed to send internal event %s: %v", con.conID, err)
}
}()
}
}
// nolint
// ClientsOf returns the clients that are watching the given resource.
func (s *DiscoveryServer) ClientsOf(typeUrl string) []*Connection {
pending := []*Connection{}
for _, v := range s.Clients() {
if v.Watching(typeUrl) {
pending = append(pending, v)
}
}
return pending
}
func (s *DiscoveryServer) WaitForRequestLimit(ctx context.Context) error {
if s.RequestRateLimit.Limit() == 0 {
// Allow opt out when rate limiting is set to 0qps
return nil
}
// Give a bit of time for queue to clear out, but if not fail fast. Client will connect to another
// instance in best case, or retry with backoff.
wait, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
return s.RequestRateLimit.Wait(wait)
}
func (s *DiscoveryServer) NextVersion() string {
return time.Now().Format(time.RFC3339) + "/" + strconv.FormatUint(s.pushVersion.Inc(), 10)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"fmt"
"strings"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
credscontroller "istio.io/istio/pilot/pkg/credentials"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/model/credentials"
"istio.io/istio/pilot/pkg/networking/core"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
// EcdsGenerator generates ECDS configuration.
type EcdsGenerator struct {
ConfigGenerator core.ConfigGenerator
secretController credscontroller.MulticlusterController
}
var _ model.XdsResourceGenerator = &EcdsGenerator{}
func ecdsNeedsPush(req *model.PushRequest) bool {
if req == nil {
return true
}
// If none set, we will always push
if len(req.ConfigsUpdated) == 0 {
return true
}
// Only push if config updates is triggered by EnvoyFilter, WasmPlugin, or Secret.
for config := range req.ConfigsUpdated {
switch config.Kind {
case kind.EnvoyFilter:
return true
case kind.WasmPlugin:
return true
case kind.Secret:
return true
}
}
return false
}
// onlyReferencedConfigsUpdated indicates whether the PushRequest
// has ONLY referenced resource in ConfigUpdates. For example ONLY
// secret is updated that may be referred by Wasm Plugin.
func onlyReferencedConfigsUpdated(req *model.PushRequest) bool {
referencedConfigUpdated := false
for config := range req.ConfigsUpdated {
switch config.Kind {
case kind.EnvoyFilter:
return false
case kind.WasmPlugin:
return false
case kind.Secret:
referencedConfigUpdated = true
}
}
return referencedConfigUpdated
}
// Generate returns ECDS resources for a given proxy.
func (e *EcdsGenerator) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
if !ecdsNeedsPush(req) {
return nil, model.DefaultXdsLogDetails, nil
}
wasmSecrets := referencedSecrets(proxy, req.Push, w.ResourceNames)
// When referenced configs are ONLY updated (like secret update), we should push
// if the referenced config is relevant for ECDS. A secret update is relevant
// only if it is referred via WASM plugin.
if onlyReferencedConfigsUpdated(req) {
updatedSecrets := model.ConfigsOfKind(req.ConfigsUpdated, kind.Secret)
needsPush := false
for _, sr := range wasmSecrets {
if _, found := updatedSecrets[model.ConfigKey{Kind: kind.Secret, Name: sr.Name, Namespace: sr.Namespace}]; found {
needsPush = true
break
}
}
if !needsPush {
return nil, model.DefaultXdsLogDetails, nil
}
}
var secrets map[string][]byte
if len(wasmSecrets) > 0 {
// Generate the pull secrets first, which will be used when populating the extension config.
if e.secretController != nil {
var err error
secretController, err := e.secretController.ForCluster(proxy.Metadata.ClusterID)
if err != nil {
log.Warnf("proxy %s is from an unknown cluster, cannot retrieve certificates for Wasm image pull: %v", proxy.ID, err)
return nil, model.DefaultXdsLogDetails, nil
}
// Inserts Wasm pull secrets in ECDS response, which will be used at xds proxy for image pull.
// Before forwarding to Envoy, xds proxy will remove the secret from ECDS response.
secrets = e.GeneratePullSecrets(proxy, wasmSecrets, secretController)
}
}
ec := e.ConfigGenerator.BuildExtensionConfiguration(proxy, req.Push, w.ResourceNames, secrets)
if ec == nil {
return nil, model.DefaultXdsLogDetails, nil
}
resources := make(model.Resources, 0, len(ec))
for _, c := range ec {
resources = append(resources, &discovery.Resource{
Name: c.Name,
Resource: protoconv.MessageToAny(c),
})
}
return resources, model.DefaultXdsLogDetails, nil
}
func (e *EcdsGenerator) GeneratePullSecrets(proxy *model.Proxy, secretResources []SecretResource,
secretController credscontroller.Controller,
) map[string][]byte {
if proxy.VerifiedIdentity == nil {
log.Warnf("proxy %s is not authorized to receive secret. Ensure you are connecting over TLS port and are authenticated.", proxy.ID)
return nil
}
results := make(map[string][]byte)
for _, sr := range secretResources {
cred, err := secretController.GetDockerCredential(sr.Name, sr.Namespace)
if err != nil {
log.Warnf("Failed to fetch docker credential %s: %v", sr.ResourceName, err)
} else {
results[sr.ResourceName] = cred
}
}
return results
}
func (e *EcdsGenerator) SetCredController(creds credscontroller.MulticlusterController) {
e.secretController = creds
}
func referencedSecrets(proxy *model.Proxy, push *model.PushContext, resourceNames []string) []SecretResource {
// The requirement for the Wasm pull secret:
// * Wasm pull secrets must be of type `kubernetes.io/dockerconfigjson`.
// * Secret are referenced by a WasmPlugin which applies to this proxy.
// TODO: we get the WasmPlugins here to get the secrets reference in order to decide whether ECDS push is needed,
// and we will get it again at extension config build. Avoid getting it twice if this becomes a problem.
watched := sets.New(resourceNames...)
wasmPlugins := push.WasmPlugins(proxy)
referencedSecrets := sets.String{}
for _, wps := range wasmPlugins {
for _, wp := range wps {
if watched.Contains(wp.ResourceName) && wp.ImagePullSecret != "" {
referencedSecrets.Insert(wp.ImagePullSecret)
}
}
}
var filtered []SecretResource
for rn := range referencedSecrets {
sr, err := parseSecretName(rn, proxy.Metadata.ClusterID)
if err != nil {
log.Warnf("Failed to parse secret resource name %v: %v", rn, err)
continue
}
filtered = append(filtered, sr)
}
return filtered
}
// parseSecretName parses secret resource name from WasmPlugin env variable.
// See toSecretResourceName at model/extensions.go about how secret resource name is generated.
func parseSecretName(resourceName string, proxyCluster cluster.ID) (SecretResource, error) {
// The secret resource name must be formatted as kubernetes://secret-namespace/secret-name.
if !strings.HasPrefix(resourceName, credentials.KubernetesSecretTypeURI) {
return SecretResource{}, fmt.Errorf("misformed Wasm pull secret resource name %v", resourceName)
}
res := strings.TrimPrefix(resourceName, credentials.KubernetesSecretTypeURI)
sep := "/"
split := strings.Split(res, sep)
if len(split) != 2 {
return SecretResource{}, fmt.Errorf("misformed Wasm pull secret resource name %v", resourceName)
}
return SecretResource{
SecretResource: credentials.SecretResource{
ResourceType: credentials.KubernetesSecretType,
Name: split[1],
Namespace: split[0],
ResourceName: resourceName,
Cluster: proxyCluster,
},
}, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"fmt"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pilot/pkg/xds/endpoints"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
// SvcUpdate is a callback from service discovery when service info changes.
func (s *DiscoveryServer) SvcUpdate(shard model.ShardKey, hostname string, namespace string, event model.Event) {
// When a service deleted, we should cleanup the endpoint shards and also remove keys from EndpointIndex to
// prevent memory leaks.
if event == model.EventDelete {
inboundServiceDeletes.Increment()
s.Env.EndpointIndex.DeleteServiceShard(shard, hostname, namespace, false)
} else {
inboundServiceUpdates.Increment()
}
}
// EDSUpdate computes destination address membership across all clusters and networks.
// This is the main method implementing EDS.
// It replaces InstancesByPort in model - instead of iterating over all endpoints it uses
// the hostname-keyed map. And it avoids the conversion from Endpoint to ServiceEntry to envoy
// on each step: instead the conversion happens once, when an endpoint is first discovered.
func (s *DiscoveryServer) EDSUpdate(shard model.ShardKey, serviceName string, namespace string,
istioEndpoints []*model.IstioEndpoint,
) {
inboundEDSUpdates.Increment()
// Update the endpoint shards
pushType := s.Env.EndpointIndex.UpdateServiceEndpoints(shard, serviceName, namespace, istioEndpoints)
if pushType == model.IncrementalPush || pushType == model.FullPush {
// Trigger a push
s.ConfigUpdate(&model.PushRequest{
Full: pushType == model.FullPush,
ConfigsUpdated: sets.New(model.ConfigKey{Kind: kind.ServiceEntry, Name: serviceName, Namespace: namespace}),
Reason: model.NewReasonStats(model.EndpointUpdate),
})
}
}
// EDSCacheUpdate computes destination address membership across all clusters and networks.
// This is the main method implementing EDS.
// It replaces InstancesByPort in model - instead of iterating over all endpoints it uses
// the hostname-keyed map. And it avoids the conversion from Endpoint to ServiceEntry to envoy
// on each step: instead the conversion happens once, when an endpoint is first discovered.
//
// Note: the difference with `EDSUpdate` is that it only update the cache rather than requesting a push
func (s *DiscoveryServer) EDSCacheUpdate(shard model.ShardKey, serviceName string, namespace string,
istioEndpoints []*model.IstioEndpoint,
) {
inboundEDSUpdates.Increment()
// Update the endpoint shards
s.Env.EndpointIndex.UpdateServiceEndpoints(shard, serviceName, namespace, istioEndpoints)
}
func (s *DiscoveryServer) RemoveShard(shardKey model.ShardKey) {
s.Env.EndpointIndex.DeleteShard(shardKey)
}
// EdsGenerator implements the new Generate method for EDS, using the in-memory, optimized endpoint
// storage in DiscoveryServer.
type EdsGenerator struct {
Cache model.XdsCache
EndpointIndex *model.EndpointIndex
}
var _ model.XdsDeltaResourceGenerator = &EdsGenerator{}
// Map of all configs that do not impact EDS
var skippedEdsConfigs = map[kind.Kind]struct{}{
kind.Gateway: {},
kind.VirtualService: {},
kind.WorkloadGroup: {},
kind.AuthorizationPolicy: {},
kind.RequestAuthentication: {},
kind.Secret: {},
kind.Telemetry: {},
kind.WasmPlugin: {},
kind.ProxyConfig: {},
}
func edsNeedsPush(updates model.XdsUpdates) bool {
// If none set, we will always push
if len(updates) == 0 {
return true
}
for config := range updates {
if _, f := skippedEdsConfigs[config.Kind]; !f {
return true
}
}
return false
}
func (eds *EdsGenerator) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
if !edsNeedsPush(req.ConfigsUpdated) {
return nil, model.DefaultXdsLogDetails, nil
}
resources, logDetails := eds.buildEndpoints(proxy, req, w)
return resources, logDetails, nil
}
func (eds *EdsGenerator) GenerateDeltas(proxy *model.Proxy, req *model.PushRequest,
w *model.WatchedResource,
) (model.Resources, model.DeletedResources, model.XdsLogDetails, bool, error) {
if !edsNeedsPush(req.ConfigsUpdated) {
return nil, nil, model.DefaultXdsLogDetails, false, nil
}
if !shouldUseDeltaEds(req) {
resources, logDetails := eds.buildEndpoints(proxy, req, w)
return resources, nil, logDetails, false, nil
}
resources, removed, logs := eds.buildDeltaEndpoints(proxy, req, w)
return resources, removed, logs, true, nil
}
func shouldUseDeltaEds(req *model.PushRequest) bool {
if !req.Full {
return false
}
return canSendPartialFullPushes(req)
}
// canSendPartialFullPushes checks if a request contains *only* endpoints updates except `skippedEdsConfigs`.
// This allows us to perform more efficient pushes where we only update the endpoints that did change.
func canSendPartialFullPushes(req *model.PushRequest) bool {
// If we don't know what configs are updated, just send a full push
if len(req.ConfigsUpdated) == 0 {
return false
}
for cfg := range req.ConfigsUpdated {
if _, f := skippedEdsConfigs[cfg.Kind]; f {
// the updated config does not impact EDS, skip it
// this happens when push requests are merged due to debounce
continue
}
if cfg.Kind != kind.ServiceEntry {
return false
}
}
return true
}
func (eds *EdsGenerator) buildEndpoints(proxy *model.Proxy,
req *model.PushRequest,
w *model.WatchedResource,
) (model.Resources, model.XdsLogDetails) {
var edsUpdatedServices map[string]struct{}
// canSendPartialFullPushes determines if we can send a partial push (ie a subset of known CLAs).
// This is safe when only Services has changed, as this implies that only the CLAs for the
// associated Service changed. Note when a multi-network Service changes it triggers a push with
// ConfigsUpdated=ALL, so in this case we would not enable a partial push.
// Despite this code existing on the SotW code path, sending these partial pushes is still allowed;
// see https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#grouping-resources-into-responses
if !req.Full || canSendPartialFullPushes(req) {
edsUpdatedServices = model.ConfigNamesOfKind(req.ConfigsUpdated, kind.ServiceEntry)
}
var resources model.Resources
empty := 0
cached := 0
regenerated := 0
for _, clusterName := range w.ResourceNames {
if edsUpdatedServices != nil {
if _, ok := edsUpdatedServices[model.ParseSubsetKeyHostname(clusterName)]; !ok {
// Cluster was not updated, skip recomputing. This happens when we get an incremental update for a
// specific Hostname. On connect or for full push edsUpdatedServices will be empty.
continue
}
}
builder := endpoints.NewEndpointBuilder(clusterName, proxy, req.Push)
// We skip cache if assertions are enabled, so that the cache will assert our eviction logic is correct
if !features.EnableUnsafeAssertions {
cachedEndpoint := eds.Cache.Get(&builder)
if cachedEndpoint != nil {
resources = append(resources, cachedEndpoint)
cached++
continue
}
}
// generate eds from beginning
{
l := builder.BuildClusterLoadAssignment(eds.EndpointIndex)
if l == nil {
continue
}
regenerated++
if len(l.Endpoints) == 0 {
empty++
}
resource := &discovery.Resource{
Name: l.ClusterName,
Resource: protoconv.MessageToAny(l),
}
resources = append(resources, resource)
eds.Cache.Add(&builder, req, resource)
}
}
return resources, model.XdsLogDetails{
Incremental: len(edsUpdatedServices) != 0,
AdditionalInfo: fmt.Sprintf("empty:%v cached:%v/%v", empty, cached, cached+regenerated),
}
}
// TODO(@hzxuzhonghu): merge with buildEndpoints
func (eds *EdsGenerator) buildDeltaEndpoints(proxy *model.Proxy,
req *model.PushRequest,
w *model.WatchedResource,
) (model.Resources, []string, model.XdsLogDetails) {
edsUpdatedServices := model.ConfigNamesOfKind(req.ConfigsUpdated, kind.ServiceEntry)
var resources model.Resources
var removed []string
empty := 0
cached := 0
regenerated := 0
for _, clusterName := range w.ResourceNames {
// filter out eds that are not updated for clusters
if _, ok := edsUpdatedServices[model.ParseSubsetKeyHostname(clusterName)]; !ok {
continue
}
builder := endpoints.NewEndpointBuilder(clusterName, proxy, req.Push)
// if a service is not found, it means the cluster is removed
if !builder.ServiceFound() {
removed = append(removed, clusterName)
continue
}
// We skip cache if assertions are enabled, so that the cache will assert our eviction logic is correct
if !features.EnableUnsafeAssertions {
cachedEndpoint := eds.Cache.Get(&builder)
if cachedEndpoint != nil {
resources = append(resources, cachedEndpoint)
cached++
continue
}
}
// generate new eds cache
{
l := builder.BuildClusterLoadAssignment(eds.EndpointIndex)
if l == nil {
removed = append(removed, clusterName)
continue
}
regenerated++
if len(l.Endpoints) == 0 {
empty++
}
resource := &discovery.Resource{
Name: l.ClusterName,
Resource: protoconv.MessageToAny(l),
}
resources = append(resources, resource)
eds.Cache.Add(&builder, req, resource)
}
}
return resources, removed, model.XdsLogDetails{
Incremental: len(edsUpdatedServices) != 0,
AdditionalInfo: fmt.Sprintf("empty:%v cached:%v/%v", empty, cached, cached+regenerated),
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package endpoints
import (
"math"
"net"
"net/netip"
"sort"
"strconv"
"strings"
corev3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
"google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/wrapperspb"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3/loadbalancer"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/schema/kind"
istiolog "istio.io/istio/pkg/log"
"istio.io/istio/pkg/network"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/spiffe"
"istio.io/istio/pkg/util/hash"
)
var (
Separator = []byte{'~'}
Slash = []byte{'/'}
// same as the above "xds" package
log = istiolog.RegisterScope("ads", "ads debugging")
)
// ConnectOriginate is the name for the resources associated with the origination of HTTP CONNECT.
// Duplicated from v1alpha3/waypoint.go to avoid import cycle
const connectOriginate = "connect_originate"
type EndpointBuilder struct {
// These fields define the primary key for an endpoint, and can be used as a cache key
clusterName string
network network.ID
proxyView model.ProxyView
clusterID cluster.ID
locality *corev3.Locality
destinationRule *model.ConsolidatedDestRule
service *model.Service
clusterLocal bool
nodeType model.NodeType
failoverPriorityLabels []byte
// These fields are provided for convenience only
subsetName string
subsetLabels labels.Instance
hostname host.Name
port int
push *model.PushContext
proxy *model.Proxy
dir model.TrafficDirection
mtlsChecker *mtlsChecker
}
func NewEndpointBuilder(clusterName string, proxy *model.Proxy, push *model.PushContext) EndpointBuilder {
dir, subsetName, hostname, port := model.ParseSubsetKey(clusterName)
svc := push.ServiceForHostname(proxy, hostname)
var dr *model.ConsolidatedDestRule
if svc != nil {
dr = proxy.SidecarScope.DestinationRule(model.TrafficDirectionOutbound, proxy, svc.Hostname)
}
return *NewCDSEndpointBuilder(
proxy, push, clusterName,
dir, subsetName, hostname, port,
svc, dr,
)
}
// NewCDSEndpointBuilder allows setting some fields directly when we already
// have the Service and DestinationRule.
func NewCDSEndpointBuilder(
proxy *model.Proxy, push *model.PushContext, clusterName string,
dir model.TrafficDirection, subsetName string, hostname host.Name, port int,
service *model.Service, dr *model.ConsolidatedDestRule,
) *EndpointBuilder {
b := EndpointBuilder{
clusterName: clusterName,
network: proxy.Metadata.Network,
proxyView: proxy.GetView(),
clusterID: proxy.Metadata.ClusterID,
locality: proxy.Locality,
destinationRule: dr,
service: service,
clusterLocal: push.IsClusterLocal(service),
nodeType: proxy.Type,
subsetName: subsetName,
hostname: hostname,
port: port,
push: push,
proxy: proxy,
dir: dir,
}
b.populateSubsetInfo()
b.populateFailoverPriorityLabels()
return &b
}
func (b *EndpointBuilder) servicePort(port int) *model.Port {
if !b.ServiceFound() {
log.Debugf("can not find the service %s for cluster %s", b.hostname, b.clusterName)
return nil
}
svcPort, f := b.service.Ports.GetByPort(port)
if !f {
log.Debugf("can not find the service port %d for cluster %s", b.port, b.clusterName)
return nil
}
return svcPort
}
func (b *EndpointBuilder) WithSubset(subset string) *EndpointBuilder {
if b == nil {
return nil
}
subsetBuilder := *b
subsetBuilder.subsetName = subset
subsetBuilder.populateSubsetInfo()
return &subsetBuilder
}
func (b *EndpointBuilder) populateSubsetInfo() {
if b.dir == model.TrafficDirectionInboundVIP {
b.subsetName = strings.TrimPrefix(b.subsetName, "http/")
b.subsetName = strings.TrimPrefix(b.subsetName, "tcp/")
}
b.mtlsChecker = newMtlsChecker(b.push, b.port, b.destinationRule.GetRule(), b.subsetName)
b.subsetLabels = getSubSetLabels(b.DestinationRule(), b.subsetName)
}
func (b *EndpointBuilder) populateFailoverPriorityLabels() {
enableFailover, lb := getOutlierDetectionAndLoadBalancerSettings(b.DestinationRule(), b.port, b.subsetName)
if enableFailover {
lbSetting := loadbalancer.GetLocalityLbSetting(b.push.Mesh.GetLocalityLbSetting(), lb.GetLocalityLbSetting())
if lbSetting != nil && lbSetting.Distribute == nil &&
len(lbSetting.FailoverPriority) > 0 && (lbSetting.Enabled == nil || lbSetting.Enabled.Value) {
b.failoverPriorityLabels = util.GetFailoverPriorityLabels(b.proxy.Labels, lbSetting.FailoverPriority)
}
}
}
func (b *EndpointBuilder) DestinationRule() *v1alpha3.DestinationRule {
if dr := b.destinationRule.GetRule(); dr != nil {
dr, _ := dr.Spec.(*v1alpha3.DestinationRule)
return dr
}
return nil
}
func (b *EndpointBuilder) Type() string {
return model.EDSType
}
func (b *EndpointBuilder) ServiceFound() bool {
return b.service != nil
}
func (b *EndpointBuilder) IsDNSCluster() bool {
return b.service != nil && (b.service.Resolution == model.DNSLB || b.service.Resolution == model.DNSRoundRobinLB)
}
// Key provides the eds cache key and should include any information that could change the way endpoints are generated.
func (b *EndpointBuilder) Key() any {
// nolint: gosec
// Not security sensitive code
h := hash.New()
b.WriteHash(h)
return h.Sum64()
}
func (b *EndpointBuilder) WriteHash(h hash.Hash) {
if b == nil {
return
}
h.WriteString(b.clusterName)
h.Write(Separator)
h.WriteString(string(b.network))
h.Write(Separator)
h.WriteString(string(b.clusterID))
h.Write(Separator)
h.WriteString(string(b.nodeType))
h.Write(Separator)
h.WriteString(strconv.FormatBool(b.clusterLocal))
h.Write(Separator)
if features.EnableHBONE && b.proxy != nil {
h.WriteString(strconv.FormatBool(b.proxy.IsProxylessGrpc()))
h.Write(Separator)
}
h.WriteString(util.LocalityToString(b.locality))
h.Write(Separator)
if len(b.failoverPriorityLabels) > 0 {
h.Write(b.failoverPriorityLabels)
h.Write(Separator)
}
if b.service.Attributes.NodeLocal {
h.WriteString(b.proxy.GetNodeName())
h.Write(Separator)
}
if b.push != nil && b.push.AuthnPolicies != nil {
h.WriteString(b.push.AuthnPolicies.GetVersion())
}
h.Write(Separator)
for _, dr := range b.destinationRule.GetFrom() {
h.WriteString(dr.Name)
h.Write(Slash)
h.WriteString(dr.Namespace)
}
h.Write(Separator)
if b.service != nil {
h.WriteString(string(b.service.Hostname))
h.Write(Slash)
h.WriteString(b.service.Attributes.Namespace)
}
h.Write(Separator)
if b.proxyView != nil {
h.WriteString(b.proxyView.String())
}
h.Write(Separator)
}
func (b *EndpointBuilder) Cacheable() bool {
// If service is not defined, we cannot do any caching as we will not have a way to
// invalidate the results.
// Service being nil means the EDS will be empty anyways, so not much lost here.
return b.service != nil
}
func (b *EndpointBuilder) DependentConfigs() []model.ConfigHash {
drs := b.destinationRule.GetFrom()
configs := make([]model.ConfigHash, 0, len(drs)+1)
if b.destinationRule != nil {
for _, dr := range drs {
configs = append(configs, model.ConfigKey{
Kind: kind.DestinationRule,
Name: dr.Name, Namespace: dr.Namespace,
}.HashCode())
}
}
if b.service != nil {
configs = append(configs, model.ConfigKey{
Kind: kind.ServiceEntry,
Name: string(b.service.Hostname), Namespace: b.service.Attributes.Namespace,
}.HashCode())
}
// For now, this matches clusterCache's DependentConfigs. If adding anything here, we may need to add them there.
return configs
}
type LocalityEndpoints struct {
istioEndpoints []*model.IstioEndpoint
// The protobuf message which contains LbEndpoint slice.
llbEndpoints endpoint.LocalityLbEndpoints
}
func (e *LocalityEndpoints) append(ep *model.IstioEndpoint, le *endpoint.LbEndpoint) {
e.istioEndpoints = append(e.istioEndpoints, ep)
e.llbEndpoints.LbEndpoints = append(e.llbEndpoints.LbEndpoints, le)
}
func (e *LocalityEndpoints) refreshWeight() {
var weight *wrapperspb.UInt32Value
if len(e.llbEndpoints.LbEndpoints) == 0 {
weight = nil
} else {
weight = &wrapperspb.UInt32Value{}
for _, lbEp := range e.llbEndpoints.LbEndpoints {
weight.Value += lbEp.GetLoadBalancingWeight().Value
}
}
e.llbEndpoints.LoadBalancingWeight = weight
}
func (e *LocalityEndpoints) AssertInvarianceInTest() {
if len(e.llbEndpoints.LbEndpoints) != len(e.istioEndpoints) {
panic(" len(e.llbEndpoints.LbEndpoints) != len(e.tunnelMetadata)")
}
}
// FromServiceEndpoints builds LocalityLbEndpoints from the PushContext's snapshotted ServiceIndex.
// Used for CDS (ClusterLoadAssignment constructed elsewhere).
func (b *EndpointBuilder) FromServiceEndpoints() []*endpoint.LocalityLbEndpoints {
if b == nil {
return nil
}
svcEps := b.push.ServiceEndpointsByPort(b.service, b.port, b.subsetLabels)
// don't use the pre-computed endpoints for CDS to preserve previous behavior
return ExtractEnvoyEndpoints(b.generate(svcEps, true))
}
// BuildClusterLoadAssignment converts the shards for this EndpointBuilder's Service
// into a ClusterLoadAssignment. Used for EDS.
func (b *EndpointBuilder) BuildClusterLoadAssignment(endpointIndex *model.EndpointIndex) *endpoint.ClusterLoadAssignment {
svcEps := b.snapshotShards(endpointIndex)
localityLbEndpoints := b.generate(svcEps, false)
if len(localityLbEndpoints) == 0 {
return buildEmptyClusterLoadAssignment(b.clusterName)
}
l := b.createClusterLoadAssignment(localityLbEndpoints)
// If locality aware routing is enabled, prioritize endpoints or set their lb weight.
// Failover should only be enabled when there is an outlier detection, otherwise Envoy
// will never detect the hosts are unhealthy and redirect traffic.
enableFailover, lb := getOutlierDetectionAndLoadBalancerSettings(b.DestinationRule(), b.port, b.subsetName)
lbSetting := loadbalancer.GetLocalityLbSetting(b.push.Mesh.GetLocalityLbSetting(), lb.GetLocalityLbSetting())
if lbSetting != nil {
// Make a shallow copy of the cla as we are mutating the endpoints with priorities/weights relative to the calling proxy
l = util.CloneClusterLoadAssignment(l)
wrappedLocalityLbEndpoints := make([]*loadbalancer.WrappedLocalityLbEndpoints, len(localityLbEndpoints))
for i := range localityLbEndpoints {
wrappedLocalityLbEndpoints[i] = &loadbalancer.WrappedLocalityLbEndpoints{
IstioEndpoints: localityLbEndpoints[i].istioEndpoints,
LocalityLbEndpoints: l.Endpoints[i],
}
}
loadbalancer.ApplyLocalityLBSetting(l, wrappedLocalityLbEndpoints, b.locality, b.proxy.Labels, lbSetting, enableFailover)
}
return l
}
// generate endpoints with applies weights, multi-network mapping and other filtering
// noCache means we will not use or update the IstioEndpoint's precomputedEnvoyEndpoint
func (b *EndpointBuilder) generate(eps []*model.IstioEndpoint, allowPrecomputed bool) []*LocalityEndpoints {
// shouldn't happen here
if !b.ServiceFound() {
return nil
}
svcPort := b.servicePort(b.port)
if svcPort == nil {
return nil
}
eps = slices.Filter(eps, func(ep *model.IstioEndpoint) bool {
return b.filterIstioEndpoint(ep, svcPort)
})
localityEpMap := make(map[string]*LocalityEndpoints)
for _, ep := range eps {
eep := ep.EnvoyEndpoint()
mtlsEnabled := b.mtlsChecker.checkMtlsEnabled(ep, b.proxy.IsWaypointProxy())
// Determine if we need to build the endpoint. We try to cache it for performance reasons
needToCompute := eep == nil
if features.EnableHBONE {
// Currently the HBONE implementation leads to different endpoint generation depending on if the
// client proxy supports HBONE or not. This breaks the cache.
// For now, just disable caching if the global HBONE flag is enabled.
needToCompute = true
}
if eep != nil && mtlsEnabled != isMtlsEnabled(eep) {
// The mTLS settings may have changed, invalidating the cache endpoint. Rebuild it
needToCompute = true
}
if needToCompute || !allowPrecomputed {
eep = buildEnvoyLbEndpoint(b, ep, mtlsEnabled)
if eep == nil {
continue
}
if allowPrecomputed {
ep.ComputeEnvoyEndpoint(eep)
}
}
locLbEps, found := localityEpMap[ep.Locality.Label]
if !found {
locLbEps = &LocalityEndpoints{
llbEndpoints: endpoint.LocalityLbEndpoints{
Locality: util.ConvertLocality(ep.Locality.Label),
LbEndpoints: make([]*endpoint.LbEndpoint, 0, len(eps)),
},
}
localityEpMap[ep.Locality.Label] = locLbEps
}
locLbEps.append(ep, eep)
}
locEps := make([]*LocalityEndpoints, 0, len(localityEpMap))
locs := make([]string, 0, len(localityEpMap))
for k := range localityEpMap {
locs = append(locs, k)
}
if len(locs) >= 2 {
sort.Strings(locs)
}
for _, locality := range locs {
locLbEps := localityEpMap[locality]
var weight uint32
var overflowStatus bool
for _, ep := range locLbEps.llbEndpoints.LbEndpoints {
weight, overflowStatus = addUint32(weight, ep.LoadBalancingWeight.GetValue())
}
locLbEps.llbEndpoints.LoadBalancingWeight = &wrapperspb.UInt32Value{
Value: weight,
}
if overflowStatus {
log.Warnf("Sum of localityLbEndpoints weight is overflow: service:%s, port: %d, locality:%s",
b.service.Hostname, b.port, locality)
}
locEps = append(locEps, locLbEps)
}
if len(locEps) == 0 {
b.push.AddMetric(model.ProxyStatusClusterNoInstances, b.clusterName, "", "")
}
// Apply the Split Horizon EDS filter, if applicable.
locEps = b.EndpointsByNetworkFilter(locEps)
if model.IsDNSSrvSubsetKey(b.clusterName) {
// For the SNI-DNAT clusters, we are using AUTO_PASSTHROUGH gateway. AUTO_PASSTHROUGH is intended
// to passthrough mTLS requests. However, at the gateway we do not actually have any way to tell if the
// request is a valid mTLS request or not, since its passthrough TLS.
// To ensure we allow traffic only to mTLS endpoints, we filter out non-mTLS endpoints for these cluster types.
locEps = b.EndpointsWithMTLSFilter(locEps)
}
return locEps
}
// addUint32AvoidOverflow returns sum of two uint32 and status. If sum overflows,
// and returns MaxUint32 and status.
func addUint32(left, right uint32) (uint32, bool) {
if math.MaxUint32-right < left {
return math.MaxUint32, true
}
return left + right, false
}
func (b *EndpointBuilder) filterIstioEndpoint(ep *model.IstioEndpoint, svcPort *model.Port) bool {
// for ServiceInternalTrafficPolicy
if b.service.Attributes.NodeLocal && ep.NodeName != b.proxy.GetNodeName() {
return false
}
// Only send endpoints from the networks in the network view requested by the proxy.
// The default network view assigned to the Proxy is nil, in that case match any network.
if !b.proxyView.IsVisible(ep) {
// Endpoint's network doesn't match the set of networks that the proxy wants to see.
return false
}
// If the downstream service is configured as cluster-local, only include endpoints that
// reside in the same cluster.
if b.clusterLocal && (b.clusterID != ep.Locality.ClusterID) {
return false
}
// TODO(nmittler): Consider merging discoverability policy with cluster-local
if !ep.IsDiscoverableFromProxy(b.proxy) {
return false
}
if svcPort.Name != ep.ServicePortName {
return false
}
// Port labels
if !b.subsetLabels.SubsetOf(ep.Labels) {
return false
}
// If we don't know the address we must eventually use a gateway address
if ep.Address == "" && (!b.gateways().IsMultiNetworkEnabled() || b.proxy.InNetwork(ep.Network)) {
return false
}
// Filter out unhealthy endpoints
if !features.SendUnhealthyEndpoints.Load() && ep.HealthStatus == model.UnHealthy {
return false
}
// Draining endpoints are only sent to 'persistent session' clusters.
draining := ep.HealthStatus == model.Draining ||
features.DrainingLabel != "" && ep.Labels[features.DrainingLabel] != ""
if draining {
persistentSession := b.service.Attributes.Labels[features.PersistentSessionLabel] != ""
if !persistentSession {
return false
}
}
return true
}
// snapshotShards into a local slice to avoid lock contention
func (b *EndpointBuilder) snapshotShards(endpointIndex *model.EndpointIndex) []*model.IstioEndpoint {
shards := b.findShards(endpointIndex)
if shards == nil {
return nil
}
// Determine whether or not the target service is considered local to the cluster
// and should, therefore, not be accessed from outside the cluster.
isClusterLocal := b.clusterLocal
var eps []*model.IstioEndpoint
shards.RLock()
// Extract shard keys so we can iterate in order. This ensures a stable EDS output.
keys := shards.Keys()
// The shards are updated independently, now need to filter and merge for this cluster
for _, shardKey := range keys {
if shardKey.Cluster != b.clusterID {
// If the downstream service is configured as cluster-local, only include endpoints that
// reside in the same cluster.
if isClusterLocal || b.service.Attributes.NodeLocal {
continue
}
}
eps = append(eps, shards.Shards[shardKey]...)
}
shards.RUnlock()
return eps
}
// findShards returns the endpoints for a cluster
func (b *EndpointBuilder) findShards(endpointIndex *model.EndpointIndex) *model.EndpointShards {
if b.service == nil {
log.Debugf("can not find the service for cluster %s", b.clusterName)
return nil
}
// Service resolution type might have changed and Cluster may be still in the EDS cluster list of "Connection.Clusters".
// This can happen if a ServiceEntry's resolution is changed from STATIC to DNS which changes the Envoy cluster type from
// EDS to STRICT_DNS or LOGICAL_DNS. When pushEds is called before Envoy sends the updated cluster list via Endpoint request which in turn
// will update "Connection.Clusters", we might accidentally send EDS updates for STRICT_DNS cluster. This check guards
// against such behavior and returns nil. When the updated cluster warms up in Envoy, it would update with new endpoints
// automatically.
// Gateways use EDS for Passthrough cluster. So we should allow Passthrough here.
if b.IsDNSCluster() {
log.Infof("cluster %s in eds cluster, but its resolution now is updated to %v, skipping it.", b.clusterName, b.service.Resolution)
return nil
}
epShards, f := endpointIndex.ShardsForService(string(b.hostname), b.service.Attributes.Namespace)
if !f {
// Shouldn't happen here
log.Debugf("can not find the endpointShards for cluster %s", b.clusterName)
return nil
}
return epShards
}
// Create the CLusterLoadAssignment. At this moment the options must have been applied to the locality lb endpoints.
func (b *EndpointBuilder) createClusterLoadAssignment(llbOpts []*LocalityEndpoints) *endpoint.ClusterLoadAssignment {
llbEndpoints := make([]*endpoint.LocalityLbEndpoints, 0, len(llbOpts))
for _, l := range llbOpts {
llbEndpoints = append(llbEndpoints, &l.llbEndpoints)
}
return &endpoint.ClusterLoadAssignment{
ClusterName: b.clusterName,
Endpoints: llbEndpoints,
}
}
// cluster with no endpoints
func buildEmptyClusterLoadAssignment(clusterName string) *endpoint.ClusterLoadAssignment {
return &endpoint.ClusterLoadAssignment{
ClusterName: clusterName,
}
}
func (b *EndpointBuilder) gateways() *model.NetworkGateways {
if b.IsDNSCluster() {
return b.push.NetworkManager().Unresolved
}
return b.push.NetworkManager().NetworkGateways
}
func ExtractEnvoyEndpoints(locEps []*LocalityEndpoints) []*endpoint.LocalityLbEndpoints {
var locLbEps []*endpoint.LocalityLbEndpoints
for _, eps := range locEps {
locLbEps = append(locLbEps, &eps.llbEndpoints)
}
return locLbEps
}
// buildEnvoyLbEndpoint packs the endpoint based on istio info.
func buildEnvoyLbEndpoint(b *EndpointBuilder, e *model.IstioEndpoint, mtlsEnabled bool) *endpoint.LbEndpoint {
addr := util.BuildAddress(e.Address, e.EndpointPort)
healthStatus := e.HealthStatus
if features.DrainingLabel != "" && e.Labels[features.DrainingLabel] != "" {
healthStatus = model.Draining
}
ep := &endpoint.LbEndpoint{
HealthStatus: corev3.HealthStatus(healthStatus),
LoadBalancingWeight: &wrapperspb.UInt32Value{
Value: e.GetLoadBalancingWeight(),
},
HostIdentifier: &endpoint.LbEndpoint_Endpoint{
Endpoint: &endpoint.Endpoint{
Address: addr,
},
},
Metadata: &corev3.Metadata{},
}
// Istio telemetry depends on the metadata value being set for endpoints in the mesh.
// Istio endpoint level tls transport socket configuration depends on this logic
// Do not remove
var meta *model.EndpointMetadata
if features.CanonicalServiceForMeshExternalServiceEntry && b.service.MeshExternal {
svcLabels := b.service.Attributes.Labels
if _, ok := svcLabels[model.IstioCanonicalServiceLabelName]; ok {
meta = e.MetadataClone()
if meta.Labels == nil {
meta.Labels = make(map[string]string)
}
meta.Labels[model.IstioCanonicalServiceLabelName] = svcLabels[model.IstioCanonicalServiceLabelName]
meta.Labels[model.IstioCanonicalServiceRevisionLabelName] = svcLabels[model.IstioCanonicalServiceRevisionLabelName]
} else {
meta = e.Metadata()
}
meta.Namespace = b.service.Attributes.Namespace
} else {
meta = e.Metadata()
}
// detect if mTLS is possible for this endpoint, used later during ep filtering
// this must be done while converting IstioEndpoints because we still have workload labels
if !mtlsEnabled {
meta.TLSMode = ""
}
util.AppendLbEndpointMetadata(meta, ep.Metadata)
waypoint := ""
address, port := e.Address, int(e.EndpointPort)
tunnel := supportTunnel(b, e)
// Setup tunnel information, if needed
// This is for waypoint
if b.dir == model.TrafficDirectionInboundVIP {
// This is only used in waypoint proxy
inScope := waypointInScope(b.proxy, e)
if !inScope {
// A waypoint can *partially* select a Service in edge cases. In this case, some % of requests will
// go through the waypoint, and the rest direct. Since these have already been load balanced across,
// we want to make sure we only send to workloads behind our waypoint
return nil
}
// For inbound, we only use EDS for the VIP cases. The VIP cluster will point to encap listener.
if tunnel {
// We will connect to CONNECT origination internal listener, telling it to tunnel to ip:15008,
// and add some detunnel metadata that had the original port.
ep.Metadata.FilterMetadata[util.OriginalDstMetadataKey] = util.BuildTunnelMetadataStruct(address, port, waypoint)
ep = util.BuildInternalLbEndpoint(connectOriginate, ep.Metadata)
ep.LoadBalancingWeight = &wrapperspb.UInt32Value{
Value: e.GetLoadBalancingWeight(),
}
}
} else if tunnel {
// Support connecting to server side waypoint proxy, if the destination has one. This is for sidecars and ingress.
if b.dir == model.TrafficDirectionOutbound && !b.proxy.IsWaypointProxy() {
workloads := findWaypoints(b.push, e)
if len(workloads) > 0 {
// TODO: load balance
waypoint = net.JoinHostPort(workloads[0].String(), strconv.Itoa(model.HBoneInboundListenPort))
}
}
// Setup tunnel metadata so requests will go through the tunnel
ep.HostIdentifier = &endpoint.LbEndpoint_Endpoint{Endpoint: &endpoint.Endpoint{
Address: util.BuildInternalAddressWithIdentifier(connectOriginate, net.JoinHostPort(address, strconv.Itoa(port))),
}}
ep.Metadata.FilterMetadata[util.OriginalDstMetadataKey] = util.BuildTunnelMetadataStruct(address, port, waypoint)
ep.Metadata.FilterMetadata[util.EnvoyTransportSocketMetadataKey] = &structpb.Struct{
Fields: map[string]*structpb.Value{
model.TunnelLabelShortName: {Kind: &structpb.Value_StringValue{StringValue: model.TunnelHTTP}},
},
}
}
return ep
}
func supportTunnel(b *EndpointBuilder, e *model.IstioEndpoint) bool {
if b.proxy.IsProxylessGrpc() {
// Proxyless client cannot handle tunneling, even if the server can
return false
}
if !b.proxy.EnableHBONE() {
return false
}
// Other side is a waypoint proxy.
if al := e.Labels[constants.ManagedGatewayLabel]; al == constants.ManagedGatewayMeshControllerLabel {
return true
}
// Otherwise has ambient enabled. Note: this is a synthetic label, not existing in the real Pod.
if b.push.SupportsTunnel(e.Network, e.Address) {
return true
}
// Otherwise supports tunnel
// Currently we only support HTTP tunnel, so just check for that. If we support more, we will
// need to pick the right one based on our support overlap.
if e.SupportsTunnel(model.TunnelHTTP) {
return true
}
return false
}
// waypointInScope computes whether the endpoint is owned by the waypoint
func waypointInScope(waypoint *model.Proxy, e *model.IstioEndpoint) bool {
scope := waypoint.WaypointScope()
if scope.Namespace != e.Namespace {
return false
}
ident, _ := spiffe.ParseIdentity(e.ServiceAccount)
if scope.ServiceAccount != "" && (scope.ServiceAccount != ident.ServiceAccount) {
return false
}
return true
}
func findWaypoints(push *model.PushContext, e *model.IstioEndpoint) []netip.Addr {
ident, _ := spiffe.ParseIdentity(e.ServiceAccount)
ips := push.WaypointsFor(model.WaypointScope{
Namespace: e.Namespace,
ServiceAccount: ident.ServiceAccount,
})
return ips
}
func getOutlierDetectionAndLoadBalancerSettings(
destinationRule *v1alpha3.DestinationRule,
portNumber int,
subsetName string,
) (bool, *v1alpha3.LoadBalancerSettings) {
if destinationRule == nil {
return false, nil
}
outlierDetectionEnabled := false
var lbSettings *v1alpha3.LoadBalancerSettings
port := &model.Port{Port: portNumber}
policy := getSubsetTrafficPolicy(destinationRule, port, subsetName)
if policy != nil {
lbSettings = policy.LoadBalancer
if policy.OutlierDetection != nil {
outlierDetectionEnabled = true
}
}
return outlierDetectionEnabled, lbSettings
}
func getSubsetTrafficPolicy(destinationRule *v1alpha3.DestinationRule, port *model.Port, subsetName string) *v1alpha3.TrafficPolicy {
var subSetTrafficPolicy *v1alpha3.TrafficPolicy
for _, subset := range destinationRule.Subsets {
if subset.Name == subsetName {
subSetTrafficPolicy = subset.TrafficPolicy
break
}
}
return util.MergeSubsetTrafficPolicy(destinationRule.TrafficPolicy, subSetTrafficPolicy, port)
}
// getSubSetLabels returns the labels associated with a subset of a given service.
func getSubSetLabels(dr *v1alpha3.DestinationRule, subsetName string) labels.Instance {
// empty subset
if subsetName == "" {
return nil
}
if dr == nil {
return nil
}
for _, subset := range dr.Subsets {
if subset.Name == subsetName {
if len(subset.Labels) == 0 {
return nil
}
return subset.Labels
}
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package endpoints
import (
"math"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
"google.golang.org/protobuf/proto"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
labelutil "istio.io/istio/pilot/pkg/serviceregistry/util/label"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/network"
)
// EndpointsByNetworkFilter is a network filter function to support Split Horizon EDS - filter the endpoints based on the network
// of the connected sidecar. The filter will filter out all endpoints which are not present within the
// sidecar network and add a gateway endpoint to remote networks that have endpoints
// (if gateway exists and its IP is an IP and not a dns name).
// Information for the mesh networks is provided as a MeshNetwork config map.
func (b *EndpointBuilder) EndpointsByNetworkFilter(endpoints []*LocalityEndpoints) []*LocalityEndpoints {
if !b.gateways().IsMultiNetworkEnabled() {
// Multi-network is not configured (this is the case by default). Just access all endpoints directly.
return endpoints
}
// A new array of endpoints to be returned that will have both local and
// remote gateways (if any)
filtered := make([]*LocalityEndpoints, 0)
// Scale all weights by the lcm of gateways per network and gateways per cluster.
// This will allow us to more easily spread traffic to the endpoint across multiple
// network gateways, increasing reliability of the endpoint.
scaleFactor := b.gateways().GetLBWeightScaleFactor()
// Go through all cluster endpoints and add those with the same network as the sidecar
// to the result. Also count the number of endpoints per each remote network while
// iterating so that it can be used as the weight for the gateway endpoint
for _, ep := range endpoints {
lbEndpoints := &LocalityEndpoints{
llbEndpoints: endpoint.LocalityLbEndpoints{
Locality: ep.llbEndpoints.Locality,
Priority: ep.llbEndpoints.Priority,
// Endpoints and weight will be reset below.
},
}
// Create a map to keep track of the gateways used and their aggregate weights.
gatewayWeights := make(map[model.NetworkGateway]uint32)
// Process all the endpoints.
for i, lbEp := range ep.llbEndpoints.LbEndpoints {
istioEndpoint := ep.istioEndpoints[i]
// If the proxy can't view the network for this endpoint, exclude it entirely.
if !b.proxyView.IsVisible(istioEndpoint) {
continue
}
// Copy the endpoint in order to expand the load balancing weight.
// When multiplying, be careful to avoid overflow - clipping the
// result at the maximum value for uint32.
weight := b.scaleEndpointLBWeight(lbEp, scaleFactor)
if lbEp.GetLoadBalancingWeight().GetValue() != weight {
lbEp = proto.Clone(lbEp).(*endpoint.LbEndpoint)
lbEp.LoadBalancingWeight = &wrappers.UInt32Value{
Value: weight,
}
}
epNetwork := istioEndpoint.Network
epCluster := istioEndpoint.Locality.ClusterID
gateways := b.selectNetworkGateways(epNetwork, epCluster)
// Check if the endpoint is directly reachable. It's considered directly reachable if
// the endpoint is either on the local network or on a remote network that can be reached
// directly from the local network.
if b.proxy.InNetwork(epNetwork) || len(gateways) == 0 {
// The endpoint is directly reachable - just add it.
// If there is no gateway, the address must not be empty
if lbEp.GetEndpoint().GetAddress().GetSocketAddress().GetAddress() != "" {
lbEndpoints.append(ep.istioEndpoints[i], lbEp)
}
continue
}
// Cross-network traffic relies on mTLS to be enabled for SNI routing
// TODO BTS may allow us to work around this
if !isMtlsEnabled(lbEp) {
continue
}
// Apply the weight for this endpoint to the network gateways.
splitWeightAmongGateways(weight, gateways, gatewayWeights)
}
// Sort the gateways into an ordered list so that the generated endpoints are deterministic.
gateways := maps.Keys(gatewayWeights)
gateways = model.SortGateways(gateways)
// Create endpoints for the gateways.
for _, gw := range gateways {
epWeight := gatewayWeights[gw]
if epWeight == 0 {
log.Warnf("gateway weight must be greater than 0, scaleFactor is %d", scaleFactor)
epWeight = 1
}
epAddr := util.BuildAddress(gw.Addr, gw.Port)
// Generate a fake IstioEndpoint to carry network and cluster information.
gwIstioEp := &model.IstioEndpoint{
Network: gw.Network,
Locality: model.Locality{
ClusterID: gw.Cluster,
},
Labels: labelutil.AugmentLabels(nil, gw.Cluster, "", "", gw.Network),
}
// Generate the EDS endpoint for this gateway.
gwEp := &endpoint.LbEndpoint{
HostIdentifier: &endpoint.LbEndpoint_Endpoint{
Endpoint: &endpoint.Endpoint{
Address: epAddr,
},
},
LoadBalancingWeight: &wrappers.UInt32Value{
Value: epWeight,
},
Metadata: &core.Metadata{},
}
// TODO: figure out a way to extract locality data from the gateway public endpoints in meshNetworks
util.AppendLbEndpointMetadata(&model.EndpointMetadata{
Network: gw.Network,
TLSMode: model.IstioMutualTLSModeLabel,
ClusterID: gw.Cluster,
Labels: labels.Instance{},
}, gwEp.Metadata)
// Currently gateway endpoint does not support tunnel.
lbEndpoints.append(gwIstioEp, gwEp)
}
// Endpoint members could be stripped or aggregated by network. Adjust weight value here.
lbEndpoints.refreshWeight()
filtered = append(filtered, lbEndpoints)
}
return filtered
}
// selectNetworkGateways chooses the gateways that best match the network and cluster. If there is
// no match for the network+cluster, then all gateways matching the network are returned. Preferring
// gateways that match against cluster has the following advantages:
//
// 1. Potentially reducing extra latency incurred when the gateway and endpoint reside in different
// clusters.
//
// 2. Enables Kubernetes MCS use cases, where endpoints for a service might be exported in one
// cluster but not another within the same network. By targeting the gateway for the cluster
// where the exported endpoints reside, we ensure that we only send traffic to exported endpoints.
func (b *EndpointBuilder) selectNetworkGateways(nw network.ID, c cluster.ID) []model.NetworkGateway {
// Get the gateways for this network+cluster combination.
gws := b.gateways().GatewaysForNetworkAndCluster(nw, c)
if len(gws) == 0 {
// No match for network+cluster, just match the network.
gws = b.gateways().GatewaysForNetwork(nw)
}
return gws
}
func (b *EndpointBuilder) scaleEndpointLBWeight(ep *endpoint.LbEndpoint, scaleFactor uint32) uint32 {
if ep.GetLoadBalancingWeight() == nil || ep.GetLoadBalancingWeight().Value == 0 {
return scaleFactor
}
weight := uint32(math.MaxUint32)
if ep.GetLoadBalancingWeight().Value < math.MaxUint32/scaleFactor {
weight = ep.GetLoadBalancingWeight().Value * scaleFactor
}
return weight
}
// Apply the weight for this endpoint to the network gateways.
func splitWeightAmongGateways(weight uint32, gateways []model.NetworkGateway, gatewayWeights map[model.NetworkGateway]uint32) {
// Spread the weight across the gateways.
weightPerGateway := weight / uint32(len(gateways))
for _, gateway := range gateways {
gatewayWeights[gateway] += weightPerGateway
}
}
// EndpointsWithMTLSFilter removes all endpoints that do not handle mTLS. This is determined by looking at
// auto-mTLS, DestinationRule, and PeerAuthentication to determine if we would send mTLS to these endpoints.
// Note there is no guarantee these destinations *actually* handle mTLS; just that we are configured to send mTLS to them.
func (b *EndpointBuilder) EndpointsWithMTLSFilter(endpoints []*LocalityEndpoints) []*LocalityEndpoints {
// A new array of endpoints to be returned that will have both local and
// remote gateways (if any)
filtered := make([]*LocalityEndpoints, 0)
// Go through all cluster endpoints and add those with mTLS enabled
for _, ep := range endpoints {
lbEndpoints := &LocalityEndpoints{
llbEndpoints: endpoint.LocalityLbEndpoints{
Locality: ep.llbEndpoints.Locality,
Priority: ep.llbEndpoints.Priority,
// Endpoints and will be reset below.
},
}
for i, lbEp := range ep.llbEndpoints.LbEndpoints {
if !isMtlsEnabled(lbEp) {
// no mTLS, skip it
continue
}
lbEndpoints.append(ep.istioEndpoints[i], lbEp)
}
lbEndpoints.refreshWeight()
filtered = append(filtered, lbEndpoints)
}
return filtered
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package endpoints
import (
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
networkingapi "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/security/authn"
"istio.io/istio/pkg/config"
)
// TODO this logic is probably done elsewhere in XDS, possible code-reuse + perf improvements
type mtlsChecker struct {
push *model.PushContext
svcPort int
destinationRule *networkingapi.ClientTLSSettings_TLSmode
}
func newMtlsChecker(push *model.PushContext, svcPort int, dr *config.Config, subset string) *mtlsChecker {
return &mtlsChecker{
push: push,
svcPort: svcPort,
destinationRule: tlsModeForDestinationRule(dr, subset, svcPort),
}
}
// isMtlsEnabled returns true if the given lbEp has mTLS enabled.
func isMtlsEnabled(lbEp *endpoint.LbEndpoint) bool {
return lbEp.Metadata.FilterMetadata[util.EnvoyTransportSocketMetadataKey].
GetFields()[model.TLSModeLabelShortname].
GetStringValue() == model.IstioMutualTLSModeLabel
}
// checkMtlsEnabled computes whether mTLS should be enabled or not. This is determined based
// on the DR, original endpoint TLSMode (based on injection of sidecar), and PeerAuthentication settings.
func (c *mtlsChecker) checkMtlsEnabled(ep *model.IstioEndpoint, isWaypoint bool) bool {
if drMode := c.destinationRule; drMode != nil {
return *drMode == networkingapi.ClientTLSSettings_ISTIO_MUTUAL
}
// if endpoint has no sidecar or explicitly tls disabled by "security.istio.io/tlsMode" label.
if ep.TLSMode != model.IstioMutualTLSModeLabel {
return false
}
return authn.
NewMtlsPolicy(c.push, ep.Namespace, ep.Labels, isWaypoint).
GetMutualTLSModeForPort(ep.EndpointPort) != model.MTLSDisable
}
func tlsModeForDestinationRule(drc *config.Config, subset string, port int) *networkingapi.ClientTLSSettings_TLSmode {
if drc == nil {
return nil
}
dr, ok := drc.Spec.(*networkingapi.DestinationRule)
if !ok || dr == nil {
return nil
}
if subset == "" {
return trafficPolicyTLSModeForPort(dr.GetTrafficPolicy(), port)
}
for _, ss := range dr.Subsets {
if ss.Name != subset {
continue
}
return trafficPolicyTLSModeForPort(ss.GetTrafficPolicy(), port)
}
return nil
}
func trafficPolicyTLSModeForPort(tp *networkingapi.TrafficPolicy, port int) *networkingapi.ClientTLSSettings_TLSmode {
if tp == nil {
return nil
}
var mode *networkingapi.ClientTLSSettings_TLSmode
if tp.Tls != nil {
mode = &tp.Tls.Mode
}
// if there is a port-level setting matching this cluster
for _, portSettings := range tp.GetPortLevelSettings() {
if int(portSettings.GetPort().GetNumber()) == port && portSettings.Tls != nil {
mode = &portSettings.Tls.Mode
break
}
}
return mode
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filters
import (
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
sfsvalue "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/common/set_filter_state/v3"
cors "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/cors/v3"
fault "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/fault/v3"
grpcstats "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_stats/v3"
grpcweb "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/grpc_web/v3"
router "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/router/v3"
sfs "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/set_filter_state/v3"
statefulsession "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/stateful_session/v3"
httpinspector "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/http_inspector/v3"
originaldst "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/original_dst/v3"
originalsrc "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/original_src/v3"
proxy_proto "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/proxy_protocol/v3"
tlsinspector "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/listener/tls_inspector/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
sfsnetwork "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/set_filter_state/v3"
previoushost "github.com/envoyproxy/go-control-plane/envoy/extensions/retry/host/previous_hosts/v3"
rawbuffer "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/raw_buffer/v3"
"google.golang.org/protobuf/types/known/wrapperspb"
alpn "istio.io/api/envoy/config/filter/http/alpn/v2alpha1"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/wellknown"
)
const (
TLSTransportProtocol = "tls"
RawBufferTransportProtocol = "raw_buffer"
// Alpn HTTP filter name which will override the ALPN for upstream TLS connection.
AlpnFilterName = "istio.alpn"
MxFilterName = "istio.metadata_exchange"
// AuthnFilterName is the name for the Istio AuthN filter. This should be the same
// as the name defined in
// https://github.com/istio/proxy/blob/master/src/envoy/http/authn/http_filter_factory.cc#L30
AuthnFilterName = "istio_authn"
// EnvoyJwtFilterName is the name of the Envoy JWT filter.
EnvoyJwtFilterName = "envoy.filters.http.jwt_authn"
// EnvoyJwtFilterPayload is the struct field for the payload in dynamic metadata in Envoy JWT filter.
EnvoyJwtFilterPayload = "payload"
)
// Define static filters to be reused across the codebase. This avoids duplicate marshaling/unmarshaling
// This should not be used for filters that will be mutated
var (
RetryPreviousHosts = &route.RetryPolicy_RetryHostPredicate{
Name: "envoy.retry_host_predicates.previous_hosts",
ConfigType: &route.RetryPolicy_RetryHostPredicate_TypedConfig{
TypedConfig: protoconv.MessageToAny(&previoushost.PreviousHostsPredicate{}),
},
}
RawBufferTransportSocket = &core.TransportSocket{
Name: wellknown.TransportSocketRawBuffer,
ConfigType: &core.TransportSocket_TypedConfig{
TypedConfig: protoconv.MessageToAny(&rawbuffer.RawBuffer{}),
},
}
Cors = &hcm.HttpFilter{
Name: wellknown.CORS,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&cors.Cors{}),
},
}
Fault = &hcm.HttpFilter{
Name: wellknown.Fault,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&fault.HTTPFault{}),
},
}
GrpcWeb = &hcm.HttpFilter{
Name: wellknown.GRPCWeb,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&grpcweb.GrpcWeb{}),
},
}
GrpcStats = &hcm.HttpFilter{
Name: wellknown.HTTPGRPCStats,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&grpcstats.FilterConfig{
EmitFilterState: true,
PerMethodStatSpecifier: &grpcstats.FilterConfig_StatsForAllMethods{
StatsForAllMethods: &wrapperspb.BoolValue{Value: false},
},
}),
},
}
TLSInspector = &listener.ListenerFilter{
Name: wellknown.TLSInspector,
ConfigType: &listener.ListenerFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&tlsinspector.TlsInspector{}),
},
}
HTTPInspector = &listener.ListenerFilter{
Name: wellknown.HTTPInspector,
ConfigType: &listener.ListenerFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&httpinspector.HttpInspector{}),
},
}
OriginalDestination = &listener.ListenerFilter{
Name: wellknown.OriginalDestination,
ConfigType: &listener.ListenerFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&originaldst.OriginalDst{}),
},
}
OriginalSrc = &listener.ListenerFilter{
Name: wellknown.OriginalSource,
ConfigType: &listener.ListenerFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&originalsrc.OriginalSrc{
Mark: 1337,
}),
},
}
ProxyProtocol = &listener.ListenerFilter{
Name: wellknown.ProxyProtocol,
ConfigType: &listener.ListenerFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&proxy_proto.ProxyProtocol{}),
},
}
EmptySessionFilter = &hcm.HttpFilter{
Name: util.StatefulSessionFilter,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&statefulsession.StatefulSession{}),
},
}
Alpn = &hcm.HttpFilter{
Name: AlpnFilterName,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&alpn.FilterConfig{
AlpnOverride: []*alpn.FilterConfig_AlpnOverride{
{
UpstreamProtocol: alpn.FilterConfig_HTTP10,
AlpnOverride: mtlsHTTP10ALPN,
},
{
UpstreamProtocol: alpn.FilterConfig_HTTP11,
AlpnOverride: mtlsHTTP11ALPN,
},
{
UpstreamProtocol: alpn.FilterConfig_HTTP2,
AlpnOverride: mtlsHTTP2ALPN,
},
},
}),
},
}
// TCP MX is an Istio filter defined in https://github.com/istio/proxy/tree/master/source/extensions/filters/network/metadata_exchange.
tcpMx = protoconv.TypedStructWithFields("type.googleapis.com/envoy.tcp.metadataexchange.config.MetadataExchange",
map[string]any{
"protocol": "istio-peer-exchange",
"enable_discovery": true,
})
TCPListenerMx = &listener.Filter{
Name: MxFilterName,
ConfigType: &listener.Filter_TypedConfig{TypedConfig: tcpMx},
}
TCPClusterMx = &cluster.Filter{
Name: MxFilterName,
TypedConfig: tcpMx,
}
WaypointDownstreamMetadataFilter = &hcm.HttpFilter{
Name: "waypoint_downstream_peer_metadata",
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.TypedStructWithFields("type.googleapis.com/io.istio.http.peer_metadata.Config",
map[string]any{
"downstream_discovery": []any{
map[string]any{
"workload_discovery": map[string]any{},
},
},
"shared_with_upstream": true,
}),
},
}
WaypointUpstreamMetadataFilter = &hcm.HttpFilter{
Name: "waypoint_upstream_peer_metadata",
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.TypedStructWithFields("type.googleapis.com/io.istio.http.peer_metadata.Config",
map[string]any{
"upstream_discovery": []any{
map[string]any{
"workload_discovery": map[string]any{},
},
},
}),
},
}
SidecarInboundMetadataFilter = &hcm.HttpFilter{
Name: MxFilterName,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.TypedStructWithFields("type.googleapis.com/io.istio.http.peer_metadata.Config",
map[string]any{
"downstream_discovery": []any{
map[string]any{
"istio_headers": map[string]any{},
},
map[string]any{
"workload_discovery": map[string]any{},
},
},
"downstream_propagation": []any{
map[string]any{
"istio_headers": map[string]any{},
},
},
}),
},
}
SidecarOutboundMetadataFilter = &hcm.HttpFilter{
Name: MxFilterName,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.TypedStructWithFields("type.googleapis.com/io.istio.http.peer_metadata.Config",
map[string]any{
"upstream_discovery": []any{
map[string]any{
"istio_headers": map[string]any{},
},
map[string]any{
"workload_discovery": map[string]any{},
},
},
"upstream_propagation": []any{
map[string]any{
"istio_headers": map[string]any{},
},
},
}),
},
}
// TODO https://github.com/istio/istio/issues/46740
// false values can be omitted in protobuf, results in diff JSON values between control plane and envoy config dumps
// long term fix will be to add the metadata config to istio/api and use that over TypedStruct
SidecarOutboundMetadataFilterSkipHeaders = &hcm.HttpFilter{
Name: MxFilterName,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.TypedStructWithFields("type.googleapis.com/io.istio.http.peer_metadata.Config",
map[string]any{
"upstream_discovery": []any{
map[string]any{
"istio_headers": map[string]any{},
},
map[string]any{
"workload_discovery": map[string]any{},
},
},
"upstream_propagation": []any{
map[string]any{
"istio_headers": map[string]any{
"skip_external_clusters": true,
},
},
},
}),
},
}
ConnectAuthorityFilter = &hcm.HttpFilter{
Name: "connect_authority",
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&sfs.Config{
OnRequestHeaders: []*sfsvalue.FilterStateValue{
{
Key: &sfsvalue.FilterStateValue_ObjectKey{
ObjectKey: "envoy.filters.listener.original_dst.local_ip",
},
Value: &sfsvalue.FilterStateValue_FormatString{
FormatString: &core.SubstitutionFormatString{
Format: &core.SubstitutionFormatString_TextFormatSource{
TextFormatSource: &core.DataSource{
Specifier: &core.DataSource_InlineString{
InlineString: "%REQ(:AUTHORITY)%",
},
},
},
},
},
SharedWithUpstream: sfsvalue.FilterStateValue_ONCE,
}, {
Key: &sfsvalue.FilterStateValue_ObjectKey{
ObjectKey: "envoy.filters.listener.original_dst.remote_ip",
},
Value: &sfsvalue.FilterStateValue_FormatString{
FormatString: &core.SubstitutionFormatString{
Format: &core.SubstitutionFormatString_TextFormatSource{
TextFormatSource: &core.DataSource{
Specifier: &core.DataSource_InlineString{
InlineString: "%DOWNSTREAM_REMOTE_ADDRESS%",
},
},
},
},
},
SharedWithUpstream: sfsvalue.FilterStateValue_ONCE,
}, {
Key: &sfsvalue.FilterStateValue_ObjectKey{
ObjectKey: "io.istio.peer_principal",
},
FactoryKey: "envoy.string",
Value: &sfsvalue.FilterStateValue_FormatString{
FormatString: &core.SubstitutionFormatString{
Format: &core.SubstitutionFormatString_TextFormatSource{
TextFormatSource: &core.DataSource{
Specifier: &core.DataSource_InlineString{
InlineString: "%DOWNSTREAM_PEER_URI_SAN%",
},
},
},
},
},
SharedWithUpstream: sfsvalue.FilterStateValue_ONCE,
}, {
Key: &sfsvalue.FilterStateValue_ObjectKey{
ObjectKey: "io.istio.local_principal",
},
FactoryKey: "envoy.string",
Value: &sfsvalue.FilterStateValue_FormatString{
FormatString: &core.SubstitutionFormatString{
Format: &core.SubstitutionFormatString_TextFormatSource{
TextFormatSource: &core.DataSource{
Specifier: &core.DataSource_InlineString{
InlineString: "%DOWNSTREAM_LOCAL_URI_SAN%",
},
},
},
},
},
SharedWithUpstream: sfsvalue.FilterStateValue_ONCE,
},
},
}),
},
}
ConnectAuthorityNetworkFilter = &listener.Filter{
Name: "connect_authority",
ConfigType: &listener.Filter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&sfsnetwork.Config{
OnNewConnection: []*sfsvalue.FilterStateValue{{
Key: &sfsvalue.FilterStateValue_ObjectKey{
ObjectKey: "envoy.filters.listener.original_dst.local_ip",
},
Value: &sfsvalue.FilterStateValue_FormatString{
FormatString: &core.SubstitutionFormatString{
Format: &core.SubstitutionFormatString_TextFormatSource{
TextFormatSource: &core.DataSource{
Specifier: &core.DataSource_InlineString{
InlineString: "%FILTER_STATE(envoy.filters.listener.original_dst.local_ip:PLAIN)%",
},
},
},
},
},
SharedWithUpstream: sfsvalue.FilterStateValue_ONCE,
}},
}),
},
}
)
// Router is used a bunch, so its worth precomputing even though we have a few options.
// Since there are only 4 possible options, just precompute them all
var routers = func() map[RouterFilterContext]*hcm.HttpFilter {
res := map[RouterFilterContext]*hcm.HttpFilter{}
for _, startSpan := range []bool{true, false} {
for _, suppressHeaders := range []bool{true, false} {
res[RouterFilterContext{
StartChildSpan: startSpan,
SuppressDebugHeaders: suppressHeaders,
}] = &hcm.HttpFilter{
Name: wellknown.Router,
ConfigType: &hcm.HttpFilter_TypedConfig{
TypedConfig: protoconv.MessageToAny(&router.Router{
StartChildSpan: startSpan,
SuppressEnvoyHeaders: suppressHeaders,
}),
},
}
}
}
return res
}()
func BuildRouterFilter(ctx RouterFilterContext) *hcm.HttpFilter {
return routers[ctx]
}
var (
// These ALPNs are injected in the client side by the ALPN filter.
// "istio" is added for each upstream protocol in order to make it
// backward compatible. e.g., 1.4 proxy -> 1.3 proxy.
// Non istio-* variants are added to ensure that traffic sent out of the mesh has a valid ALPN;
// ideally this would not be added, but because the override filter is in the HCM, rather than cluster,
// we do not yet know the upstream so we cannot determine if its in or out of the mesh
mtlsHTTP10ALPN = []string{"istio-http/1.0", "istio", "http/1.0"}
mtlsHTTP11ALPN = []string{"istio-http/1.1", "istio", "http/1.1"}
mtlsHTTP2ALPN = []string{"istio-h2", "istio", "h2"}
)
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
type LdsGenerator struct {
ConfigGenerator core.ConfigGenerator
}
var _ model.XdsResourceGenerator = &LdsGenerator{}
// Map of all configs that do not impact LDS
var skippedLdsConfigs = map[model.NodeType]sets.Set[kind.Kind]{
model.Router: sets.New[kind.Kind](
// for autopassthrough gateways, we build filterchains per-dr subset
kind.WorkloadGroup,
kind.WorkloadEntry,
kind.Secret,
kind.ProxyConfig,
),
model.SidecarProxy: sets.New[kind.Kind](
kind.Gateway,
kind.WorkloadGroup,
kind.WorkloadEntry,
kind.Secret,
kind.ProxyConfig,
),
model.Waypoint: sets.New[kind.Kind](
kind.Gateway,
kind.WorkloadGroup,
kind.WorkloadEntry,
kind.Secret,
kind.ProxyConfig,
),
}
func ldsNeedsPush(proxy *model.Proxy, req *model.PushRequest) bool {
if req == nil {
return true
}
switch proxy.Type {
case model.Waypoint:
if model.HasConfigsOfKind(req.ConfigsUpdated, kind.Address) {
// Waypoint proxies have a matcher against pod IPs in them. Historically, any LDS change would do a full
// push, recomputing push context. Doing that on every IP change doesn't scale, so we need these to remain
// incremental pushes.
// This allows waypoints only to push LDS on incremental pushes to Address type which would otherwise be skipped.
return true
}
// Otherwise, only handle full pushes (skip endpoint-only updates)
if !req.Full {
return false
}
default:
if !req.Full {
// LDS only handles full push
return false
}
}
// If none set, we will always push
if len(req.ConfigsUpdated) == 0 {
return true
}
for config := range req.ConfigsUpdated {
if !skippedLdsConfigs[proxy.Type].Contains(config.Kind) {
return true
}
}
return false
}
func (l LdsGenerator) Generate(proxy *model.Proxy, _ *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
if !ldsNeedsPush(proxy, req) {
return nil, model.DefaultXdsLogDetails, nil
}
listeners := l.ConfigGenerator.BuildListeners(proxy, req.Push)
resources := model.Resources{}
for _, c := range listeners {
resources = append(resources, &discovery.Resource{
Name: c.Name,
Resource: protoconv.MessageToAny(c),
})
}
return resources, model.DefaultXdsLogDetails, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"sync"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"istio.io/istio/pilot/pkg/model"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/monitoring"
)
var (
errTag = monitoring.CreateLabel("err")
nodeTag = monitoring.CreateLabel("node")
typeTag = monitoring.CreateLabel("type")
versionTag = monitoring.CreateLabel("version")
// pilot_total_xds_rejects should be used instead. This is for backwards compatibility
cdsReject = monitoring.NewGauge(
"pilot_xds_cds_reject",
"Pilot rejected CDS configs.",
)
// pilot_total_xds_rejects should be used instead. This is for backwards compatibility
edsReject = monitoring.NewGauge(
"pilot_xds_eds_reject",
"Pilot rejected EDS.",
)
// pilot_total_xds_rejects should be used instead. This is for backwards compatibility
ldsReject = monitoring.NewGauge(
"pilot_xds_lds_reject",
"Pilot rejected LDS.",
)
// pilot_total_xds_rejects should be used instead. This is for backwards compatibility
rdsReject = monitoring.NewGauge(
"pilot_xds_rds_reject",
"Pilot rejected RDS.",
)
totalXDSRejects = monitoring.NewSum(
"pilot_total_xds_rejects",
"Total number of XDS responses from pilot rejected by proxy.",
)
xdsExpiredNonce = monitoring.NewSum(
"pilot_xds_expired_nonce",
"Total number of XDS requests with an expired nonce.",
)
monServices = monitoring.NewGauge(
"pilot_services",
"Total services known to pilot.",
)
// TODO: Update all the resource stats in separate routine
// virtual services, destination rules, gateways, etc.
xdsClients = monitoring.NewGauge(
"pilot_xds",
"Number of endpoints connected to this pilot using XDS.",
)
xdsClientTrackerMutex = &sync.Mutex{}
xdsClientTracker = make(map[string]float64)
xdsResponseWriteTimeouts = monitoring.NewSum(
"pilot_xds_write_timeout",
"Pilot XDS response write timeouts.",
)
// Covers xds_builderr and xds_senderr for xds in {lds, rds, cds, eds}.
pushes = monitoring.NewSum(
"pilot_xds_pushes",
"Pilot build and send errors for lds, rds, cds and eds.",
)
cdsSendErrPushes = pushes.With(typeTag.Value("cds_senderr"))
edsSendErrPushes = pushes.With(typeTag.Value("eds_senderr"))
ldsSendErrPushes = pushes.With(typeTag.Value("lds_senderr"))
rdsSendErrPushes = pushes.With(typeTag.Value("rds_senderr"))
debounceTime = monitoring.NewDistribution(
"pilot_debounce_time",
"Delay in seconds between the first config enters debouncing and the merged push request is pushed into the push queue.",
[]float64{.01, .1, 1, 3, 5, 10, 20, 30},
)
pushContextInitTime = monitoring.NewDistribution(
"pilot_pushcontext_init_seconds",
"Total time in seconds Pilot takes to init pushContext.",
[]float64{.01, .1, 0.5, 1, 3, 5},
)
pushTime = monitoring.NewDistribution(
"pilot_xds_push_time",
"Total time in seconds Pilot takes to push lds, rds, cds and eds.",
[]float64{.01, .1, 1, 3, 5, 10, 20, 30},
)
sendTime = monitoring.NewDistribution(
"pilot_xds_send_time",
"Total time in seconds Pilot takes to send generated configuration.",
[]float64{.01, .1, 1, 3, 5, 10, 20, 30},
)
proxiesQueueTime = monitoring.NewDistribution(
"pilot_proxy_queue_time",
"Time in seconds, a proxy is in the push queue before being dequeued.",
[]float64{.1, .5, 1, 3, 5, 10, 20, 30},
)
pushTriggers = monitoring.NewSum(
"pilot_push_triggers",
"Total number of times a push was triggered, labeled by reason for the push.",
)
proxiesConvergeDelay = monitoring.NewDistribution(
"pilot_proxy_convergence_time",
"Delay in seconds between config change and a proxy receiving all required configuration.",
[]float64{.1, .5, 1, 3, 5, 10, 20, 30},
)
pushContextErrors = monitoring.NewSum(
"pilot_xds_push_context_errors",
"Number of errors (timeouts) initiating push context.",
)
totalXDSInternalErrors = monitoring.NewSum(
"pilot_total_xds_internal_errors",
"Total number of internal XDS errors in pilot.",
)
inboundUpdates = monitoring.NewSum(
"pilot_inbound_updates",
"Total number of updates received by pilot.",
)
pilotSDSCertificateErrors = monitoring.NewSum(
"pilot_sds_certificate_errors_total",
"Total number of failures to fetch SDS key and certificate.",
)
inboundConfigUpdates = inboundUpdates.With(typeTag.Value("config"))
inboundEDSUpdates = inboundUpdates.With(typeTag.Value("eds"))
inboundServiceUpdates = inboundUpdates.With(typeTag.Value("svc"))
inboundServiceDeletes = inboundUpdates.With(typeTag.Value("svcdelete"))
configSizeBytes = monitoring.NewDistribution(
"pilot_xds_config_size_bytes",
"Distribution of configuration sizes pushed to clients",
// Important boundaries: 10K, 1M, 4M, 10M, 40M
// 4M default limit for gRPC, 10M config will start to strain system,
// 40M is likely upper-bound on config sizes supported.
[]float64{1, 10000, 1000000, 4000000, 10000000, 40000000},
monitoring.WithUnit(monitoring.Bytes),
)
)
func recordXDSClients(version string, delta float64) {
xdsClientTrackerMutex.Lock()
defer xdsClientTrackerMutex.Unlock()
xdsClientTracker[version] += delta
xdsClients.With(versionTag.Value(version)).Record(xdsClientTracker[version])
}
// triggerMetric is a precomputed monitoring.Metric for each trigger type. This saves on a lot of allocations
var triggerMetric = map[model.TriggerReason]monitoring.Metric{
model.EndpointUpdate: pushTriggers.With(typeTag.Value(string(model.EndpointUpdate))),
model.ConfigUpdate: pushTriggers.With(typeTag.Value(string(model.ConfigUpdate))),
model.ServiceUpdate: pushTriggers.With(typeTag.Value(string(model.ServiceUpdate))),
model.ProxyUpdate: pushTriggers.With(typeTag.Value(string(model.ProxyUpdate))),
model.GlobalUpdate: pushTriggers.With(typeTag.Value(string(model.GlobalUpdate))),
model.UnknownTrigger: pushTriggers.With(typeTag.Value(string(model.UnknownTrigger))),
model.DebugTrigger: pushTriggers.With(typeTag.Value(string(model.DebugTrigger))),
model.SecretTrigger: pushTriggers.With(typeTag.Value(string(model.SecretTrigger))),
model.NetworksTrigger: pushTriggers.With(typeTag.Value(string(model.NetworksTrigger))),
model.ProxyRequest: pushTriggers.With(typeTag.Value(string(model.ProxyRequest))),
model.NamespaceUpdate: pushTriggers.With(typeTag.Value(string(model.NamespaceUpdate))),
model.ClusterUpdate: pushTriggers.With(typeTag.Value(string(model.ClusterUpdate))),
}
func recordPushTriggers(reasons model.ReasonStats) {
for r, cnt := range reasons {
t, f := triggerMetric[r]
if f {
t.RecordInt(int64(cnt))
} else {
pushTriggers.With(typeTag.Value(string(r))).Increment()
}
}
}
func isUnexpectedError(err error) bool {
s, ok := status.FromError(err)
// Unavailable or canceled code will be sent when a connection is closing down. This is very normal,
// due to the XDS connection being dropped every 30 minutes, or a pod shutting down.
isError := s.Code() != codes.Unavailable && s.Code() != codes.Canceled
return !ok || isError
}
// recordSendError records a metric indicating that a push failed. It returns true if this was an unexpected
// error
func recordSendError(xdsType string, err error) bool {
if isUnexpectedError(err) {
// TODO use a single metric with a type tag
switch xdsType {
case v3.ListenerType:
ldsSendErrPushes.Increment()
case v3.ClusterType:
cdsSendErrPushes.Increment()
case v3.EndpointType:
edsSendErrPushes.Increment()
case v3.RouteType:
rdsSendErrPushes.Increment()
}
return true
}
return false
}
func incrementXDSRejects(xdsType string, node, errCode string) {
totalXDSRejects.With(typeTag.Value(v3.GetMetricType(xdsType))).Increment()
switch xdsType {
case v3.ListenerType:
ldsReject.With(nodeTag.Value(node), errTag.Value(errCode)).Increment()
case v3.ClusterType:
cdsReject.With(nodeTag.Value(node), errTag.Value(errCode)).Increment()
case v3.EndpointType:
edsReject.With(nodeTag.Value(node), errTag.Value(errCode)).Increment()
case v3.RouteType:
rdsReject.With(nodeTag.Value(node), errTag.Value(errCode)).Increment()
}
}
func recordSendTime(duration time.Duration) {
sendTime.Record(duration.Seconds())
}
func recordPushTime(xdsType string, duration time.Duration) {
pushTime.With(typeTag.Value(v3.GetMetricType(xdsType))).Record(duration.Seconds())
pushes.With(typeTag.Value(v3.GetMetricType(xdsType))).Increment()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
// NdsGenerator generates config for Nds i.e. Name Discovery Service. Istio agents
// send NDS requests to istiod and istiod responds with a list of services and their
// associated IPs (including service entries).
// The agent then updates its internal DNS based on this data. If DNS capture is enabled
// in the pod the agent will capture all DNS requests and attempt to resolve locally before
// forwarding to upstream dns servers.
type NdsGenerator struct {
ConfigGenerator core.ConfigGenerator
}
var _ model.XdsResourceGenerator = &NdsGenerator{}
// Map of all configs that do not impact NDS
var skippedNdsConfigs = sets.New[kind.Kind](
kind.Gateway,
kind.VirtualService,
kind.DestinationRule,
kind.Secret,
kind.Telemetry,
kind.EnvoyFilter,
kind.WorkloadEntry,
kind.WorkloadGroup,
kind.AuthorizationPolicy,
kind.RequestAuthentication,
kind.PeerAuthentication,
kind.WasmPlugin,
kind.ProxyConfig,
kind.MeshConfig,
)
func ndsNeedsPush(req *model.PushRequest) bool {
if req == nil {
return true
}
if !req.Full {
// NDS generally handles full push. We only allow partial pushes, when headless endpoints change.
return headlessEndpointsUpdated(req)
}
// If none set, we will always push
if len(req.ConfigsUpdated) == 0 {
return true
}
for config := range req.ConfigsUpdated {
if _, f := skippedNdsConfigs[config.Kind]; !f {
return true
}
}
return false
}
func headlessEndpointsUpdated(req *model.PushRequest) bool {
return req.Reason.Has(model.HeadlessEndpointUpdate)
}
func (n NdsGenerator) Generate(proxy *model.Proxy, _ *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
if !ndsNeedsPush(req) {
return nil, model.DefaultXdsLogDetails, nil
}
nt := n.ConfigGenerator.BuildNameTable(proxy, req.Push)
if nt == nil {
return nil, model.DefaultXdsLogDetails, nil
}
resources := model.Resources{&discovery.Resource{Resource: protoconv.MessageToAny(nt)}}
return resources, model.DefaultXdsLogDetails, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
mesh "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
tb "istio.io/istio/pilot/pkg/trustbundle"
"istio.io/istio/pilot/pkg/util/protoconv"
)
// PcdsGenerator generates proxy configuration for proxies to consume
type PcdsGenerator struct {
TrustBundle *tb.TrustBundle
}
var _ model.XdsResourceGenerator = &PcdsGenerator{}
func pcdsNeedsPush(req *model.PushRequest) bool {
if !features.MultiRootMesh {
return false
}
if req == nil {
return true
}
if !req.Full {
return false
}
if len(req.ConfigsUpdated) == 0 {
// This needs to be better optimized
return true
}
return false
}
// Generate returns ProxyConfig protobuf containing TrustBundle for given proxy
func (e *PcdsGenerator) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
if !pcdsNeedsPush(req) {
return nil, model.DefaultXdsLogDetails, nil
}
if e.TrustBundle == nil {
return nil, model.DefaultXdsLogDetails, nil
}
// TODO: For now, only TrustBundle updates are pushed. Eventually, this should push entire Proxy Configuration
pc := &mesh.ProxyConfig{
CaCertificatesPem: e.TrustBundle.GetTrustBundle(),
}
return model.Resources{&discovery.Resource{Resource: protoconv.MessageToAny(pc)}}, model.DefaultXdsLogDetails, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
// UnAffectedConfigKinds contains config types which does not affect certain proxy types.
var UnAffectedConfigKinds = map[model.NodeType]sets.Set[kind.Kind]{
// For Gateways, we do not care about the following configs for example Sidecar.
model.Router: sets.New(kind.Sidecar),
// For Sidecar, we do not care about the following configs for example Gateway.
model.SidecarProxy: sets.New(kind.Gateway),
}
// ConfigAffectsProxy checks if a pushEv will affect a specified proxy. That means whether the push will be performed
// towards the proxy.
func ConfigAffectsProxy(req *model.PushRequest, proxy *model.Proxy) bool {
// Empty changes means "all" to get a backward compatibility.
if len(req.ConfigsUpdated) == 0 {
return true
}
if proxy.IsWaypointProxy() || proxy.IsZTunnel() {
// Optimizations do not apply since scoping uses different mechanism
// TODO: implement ambient aware scoping
return true
}
for config := range req.ConfigsUpdated {
if proxyDependentOnConfig(proxy, config, req.Push) {
return true
}
}
return false
}
func proxyDependentOnConfig(proxy *model.Proxy, config model.ConfigKey, push *model.PushContext) bool {
// Skip config dependency check based on proxy type for certain configs.
if UnAffectedConfigKinds[proxy.Type].Contains(config.Kind) {
return false
}
// Detailed config dependencies check.
switch proxy.Type {
case model.SidecarProxy:
if proxy.SidecarScope.DependsOnConfig(config, push.Mesh.RootNamespace) {
return true
} else if proxy.PrevSidecarScope != nil && proxy.PrevSidecarScope.DependsOnConfig(config, push.Mesh.RootNamespace) {
return true
}
case model.Router:
if config.Kind == kind.ServiceEntry {
// If config is ServiceEntry, name of the config is service's FQDN
if features.FilterGatewayClusterConfig && !push.ServiceAttachedToGateway(config.Name, proxy) {
return false
}
hostname := host.Name(config.Name)
// gateways have default sidecar scopes
if proxy.SidecarScope.GetService(hostname) == nil &&
proxy.PrevSidecarScope.GetService(hostname) == nil {
// skip the push when the service is not visible to the gateway,
// and the old service is not visible/existent
return false
}
}
return true
default:
// TODO We'll add the check for other proxy types later.
return true
}
return false
}
// DefaultProxyNeedsPush check if a proxy needs push for this push event.
func DefaultProxyNeedsPush(proxy *model.Proxy, req *model.PushRequest) bool {
if ConfigAffectsProxy(req, proxy) {
return true
}
// If the proxy's service updated, need push for it.
if len(proxy.ServiceTargets) > 0 && req.ConfigsUpdated != nil {
for _, svc := range proxy.ServiceTargets {
if _, ok := req.ConfigsUpdated[model.ConfigKey{
Kind: kind.ServiceEntry,
Name: string(svc.Service.Hostname),
Namespace: svc.Service.Attributes.Namespace,
}]; ok {
return true
}
}
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"sync"
"istio.io/istio/pilot/pkg/model"
)
type PushQueue struct {
cond *sync.Cond
// pending stores all connections in the queue. If the same connection is enqueued again,
// the PushRequest will be merged.
pending map[*Connection]*model.PushRequest
// queue maintains ordering of the queue
queue []*Connection
// processing stores all connections that have been Dequeue(), but not MarkDone().
// The value stored will be initially be nil, but may be populated if the connection is Enqueue().
// If model.PushRequest is not nil, it will be Enqueued again once MarkDone has been called.
processing map[*Connection]*model.PushRequest
shuttingDown bool
}
func NewPushQueue() *PushQueue {
return &PushQueue{
pending: make(map[*Connection]*model.PushRequest),
processing: make(map[*Connection]*model.PushRequest),
cond: sync.NewCond(&sync.Mutex{}),
}
}
// Enqueue will mark a proxy as pending a push. If it is already pending, pushInfo will be merged.
// ServiceEntry updates will be added together, and full will be set if either were full
func (p *PushQueue) Enqueue(con *Connection, pushRequest *model.PushRequest) {
p.cond.L.Lock()
defer p.cond.L.Unlock()
if p.shuttingDown {
return
}
// If its already in progress, merge the info and return
if request, f := p.processing[con]; f {
p.processing[con] = request.CopyMerge(pushRequest)
return
}
if request, f := p.pending[con]; f {
p.pending[con] = request.CopyMerge(pushRequest)
return
}
p.pending[con] = pushRequest
p.queue = append(p.queue, con)
// Signal waiters on Dequeue that a new item is available
p.cond.Signal()
}
// Remove a proxy from the queue. If there are no proxies ready to be removed, this will block
func (p *PushQueue) Dequeue() (con *Connection, request *model.PushRequest, shutdown bool) {
p.cond.L.Lock()
defer p.cond.L.Unlock()
// Block until there is one to remove. Enqueue will signal when one is added.
for len(p.queue) == 0 && !p.shuttingDown {
p.cond.Wait()
}
if len(p.queue) == 0 {
// We must be shutting down.
return nil, nil, true
}
con = p.queue[0]
// The underlying array will still exist, despite the slice changing, so the object may not GC without this
// See https://github.com/grpc/grpc-go/issues/4758
p.queue[0] = nil
p.queue = p.queue[1:]
request = p.pending[con]
delete(p.pending, con)
// Mark the connection as in progress
p.processing[con] = nil
return con, request, false
}
func (p *PushQueue) MarkDone(con *Connection) {
p.cond.L.Lock()
defer p.cond.L.Unlock()
request := p.processing[con]
delete(p.processing, con)
// If the info is present, that means Enqueue was called while connection was not yet marked done.
// This means we need to add it back to the queue.
if request != nil {
p.pending[con] = request
p.queue = append(p.queue, con)
p.cond.Signal()
}
}
// Get number of pending proxies
func (p *PushQueue) Pending() int {
p.cond.L.Lock()
defer p.cond.L.Unlock()
return len(p.queue)
}
// ShutDown will cause queue to ignore all new items added to it. As soon as the
// worker goroutines have drained the existing items in the queue, they will be
// instructed to exit.
func (p *PushQueue) ShutDown() {
p.cond.L.Lock()
defer p.cond.L.Unlock()
p.shuttingDown = true
p.cond.Broadcast()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
type RdsGenerator struct {
ConfigGenerator core.ConfigGenerator
}
var _ model.XdsResourceGenerator = &RdsGenerator{}
// Map of all configs that do not impact RDS
var skippedRdsConfigs = sets.New[kind.Kind](
kind.WorkloadEntry,
kind.WorkloadGroup,
kind.AuthorizationPolicy,
kind.RequestAuthentication,
kind.PeerAuthentication,
kind.Secret,
kind.WasmPlugin,
kind.Telemetry,
kind.ProxyConfig,
)
func rdsNeedsPush(req *model.PushRequest) bool {
if req == nil {
return true
}
if !req.Full {
// RDS only handles full push
return false
}
// If none set, we will always push
if len(req.ConfigsUpdated) == 0 {
return true
}
for config := range req.ConfigsUpdated {
if !skippedRdsConfigs.Contains(config.Kind) {
return true
}
}
return false
}
func (c RdsGenerator) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
if !rdsNeedsPush(req) {
return nil, model.DefaultXdsLogDetails, nil
}
resources, logDetails := c.ConfigGenerator.BuildHTTPRoutes(proxy, req, w.ResourceNames)
return resources, logDetails, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package requestidextension
import (
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
uuid_extension "github.com/envoyproxy/go-control-plane/envoy/extensions/request_id/uuid/v3"
"google.golang.org/protobuf/types/known/wrapperspb"
"istio.io/istio/pilot/pkg/util/protoconv"
)
var UUIDRequestIDExtension = &hcm.RequestIDExtension{
TypedConfig: protoconv.MessageToAny(&uuid_extension.UuidRequestIdConfig{
UseRequestIdForTraceSampling: &wrapperspb.BoolValue{
Value: true,
},
}),
}
func BuildUUIDRequestIDExtension(ctx *UUIDRequestIDExtensionContext) *hcm.RequestIDExtension {
if ctx == nil {
return UUIDRequestIDExtension
}
return &hcm.RequestIDExtension{
TypedConfig: protoconv.MessageToAny(&uuid_extension.UuidRequestIdConfig{
UseRequestIdForTraceSampling: &wrapperspb.BoolValue{
Value: ctx.UseRequestIDForTraceSampling,
},
}),
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"crypto/x509"
"encoding/pem"
"fmt"
"strconv"
"strings"
"time"
xxhashv2 "github.com/cespare/xxhash/v2"
cryptomb "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/private_key_providers/cryptomb/v3alpha"
qat "github.com/envoyproxy/go-control-plane/contrib/envoy/extensions/private_key_providers/qat/v3alpha"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
envoytls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
mesh "istio.io/api/mesh/v1alpha1"
credscontroller "istio.io/istio/pilot/pkg/credentials"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/model/credentials"
securitymodel "istio.io/istio/pilot/pkg/security/model"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
// SecretResource wraps the authnmodel type with cache functions implemented
type SecretResource struct {
credentials.SecretResource
pkpConfHash string
}
var _ model.XdsCacheEntry = SecretResource{}
func (sr SecretResource) Type() string {
return model.SDSType
}
func (sr SecretResource) Key() any {
return sr.SecretResource.Key() + "/" + sr.pkpConfHash
}
func (sr SecretResource) DependentConfigs() []model.ConfigHash {
configs := []model.ConfigHash{}
for _, config := range relatedConfigs(model.ConfigKey{Kind: kind.Secret, Name: sr.Name, Namespace: sr.Namespace}) {
configs = append(configs, config.HashCode())
}
return configs
}
func (sr SecretResource) Cacheable() bool {
return true
}
func sdsNeedsPush(updates model.XdsUpdates) bool {
if len(updates) == 0 {
return true
}
for update := range updates {
switch update.Kind {
case kind.Secret:
return true
case kind.ReferenceGrant:
return true
}
}
return false
}
// parseResources parses a list of resource names to SecretResource types, for a given proxy.
// Invalid resource names are ignored
func (s *SecretGen) parseResources(names []string, proxy *model.Proxy) []SecretResource {
res := make([]SecretResource, 0, len(names))
pkpConf := (*mesh.ProxyConfig)(proxy.Metadata.ProxyConfig).GetPrivateKeyProvider()
pkpConfHashStr := ""
if pkpConf != nil {
pkpConfHashStr = strconv.FormatUint(xxhashv2.Sum64String(pkpConf.String()), 10)
}
for _, resource := range names {
sr, err := credentials.ParseResourceName(resource, proxy.VerifiedIdentity.Namespace, proxy.Metadata.ClusterID, s.configCluster)
if err != nil {
pilotSDSCertificateErrors.Increment()
log.Warnf("error parsing resource name: %v", err)
continue
}
res = append(res, SecretResource{sr, pkpConfHashStr})
}
return res
}
func (s *SecretGen) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
if proxy.VerifiedIdentity == nil {
log.Warnf("proxy %s is not authorized to receive credscontroller. Ensure you are connecting over TLS port and are authenticated.", proxy.ID)
return nil, model.DefaultXdsLogDetails, nil
}
if req == nil || !sdsNeedsPush(req.ConfigsUpdated) {
return nil, model.DefaultXdsLogDetails, nil
}
var updatedSecrets sets.Set[model.ConfigKey]
if !req.Full {
updatedSecrets = model.ConfigsOfKind(req.ConfigsUpdated, kind.Secret)
}
proxyClusterSecrets, err := s.secrets.ForCluster(proxy.Metadata.ClusterID)
if err != nil {
log.Warnf("proxy %s is from an unknown cluster, cannot retrieve certificates: %v", proxy.ID, err)
pilotSDSCertificateErrors.Increment()
return nil, model.DefaultXdsLogDetails, nil
}
configClusterSecrets, err := s.secrets.ForCluster(s.configCluster)
if err != nil {
log.Warnf("config cluster %s not found, cannot retrieve certificates: %v", s.configCluster, err)
pilotSDSCertificateErrors.Increment()
return nil, model.DefaultXdsLogDetails, nil
}
// Filter down to resources we can access. We do not return an error if they attempt to access a Secret
// they cannot; instead we just exclude it. This ensures that a single bad reference does not break the whole
// SDS flow. The pilotSDSCertificateErrors metric and logs handle visibility into invalid references.
resources := filterAuthorizedResources(s.parseResources(w.ResourceNames, proxy), proxy, proxyClusterSecrets)
results := model.Resources{}
cached, regenerated := 0, 0
for _, sr := range resources {
if updatedSecrets != nil {
if !containsAny(updatedSecrets, relatedConfigs(model.ConfigKey{Kind: kind.Secret, Name: sr.Name, Namespace: sr.Namespace})) {
// This is an incremental update, filter out secrets that are not updated.
continue
}
}
cachedItem := s.cache.Get(sr)
if cachedItem != nil && !features.EnableUnsafeAssertions {
// If it is in the Cache, add it and continue
// We skip cache if assertions are enabled, so that the cache will assert our eviction logic is correct
results = append(results, cachedItem)
cached++
continue
}
regenerated++
res := s.generate(sr, configClusterSecrets, proxyClusterSecrets, proxy)
if res != nil {
s.cache.Add(sr, req, res)
results = append(results, res)
}
}
return results, model.XdsLogDetails{
Incremental: updatedSecrets != nil,
AdditionalInfo: fmt.Sprintf("cached:%v/%v", cached, cached+regenerated),
}, nil
}
func (s *SecretGen) generate(sr SecretResource, configClusterSecrets, proxyClusterSecrets credscontroller.Controller, proxy *model.Proxy) *discovery.Resource {
// Fetch the appropriate cluster's secret, based on the credential type
var secretController credscontroller.Controller
switch sr.ResourceType {
case credentials.KubernetesGatewaySecretType:
secretController = configClusterSecrets
default:
secretController = proxyClusterSecrets
}
isCAOnlySecret := strings.HasSuffix(sr.Name, securitymodel.SdsCaSuffix)
if isCAOnlySecret {
caCertInfo, err := secretController.GetCaCert(sr.Name, sr.Namespace)
if err != nil {
pilotSDSCertificateErrors.Increment()
log.Warnf("failed to fetch ca certificate for %s: %v", sr.ResourceName, err)
return nil
}
if features.VerifySDSCertificate {
if err := ValidateCertificate(caCertInfo.Cert); err != nil {
recordInvalidCertificate(sr.ResourceName, err)
}
}
res := toEnvoyCaSecret(sr.ResourceName, caCertInfo)
return res
}
certInfo, err := secretController.GetCertInfo(sr.Name, sr.Namespace)
if err != nil {
pilotSDSCertificateErrors.Increment()
log.Warnf("failed to fetch key and certificate for %s: %v", sr.ResourceName, err)
return nil
}
if features.VerifySDSCertificate {
if err := ValidateCertificate(certInfo.Cert); err != nil {
recordInvalidCertificate(sr.ResourceName, err)
}
}
res := toEnvoyTLSSecret(sr.ResourceName, certInfo, proxy, s.meshConfig)
return res
}
func ValidateCertificate(data []byte) error {
block, _ := pem.Decode(data)
if block == nil {
return fmt.Errorf("pem decode failed")
}
certs, err := x509.ParseCertificates(block.Bytes)
if err != nil {
return err
}
now := time.Now()
for _, cert := range certs {
// check if the certificate has expired
if now.After(cert.NotAfter) || now.Before(cert.NotBefore) {
return fmt.Errorf("certificate is expired or not yet valid")
}
}
return nil
}
func recordInvalidCertificate(name string, err error) {
pilotSDSCertificateErrors.Increment()
log.Warnf("invalid certificates: %q: %v", name, err)
}
// filterAuthorizedResources takes a list of SecretResource and filters out resources that proxy cannot access
func filterAuthorizedResources(resources []SecretResource, proxy *model.Proxy, secrets credscontroller.Controller) []SecretResource {
var authzResult *bool
var authzError error
// isAuthorized is a small wrapper around credscontroller.Authorize so we only call it once instead of each time in the loop
isAuthorized := func() bool {
if authzResult != nil {
return *authzResult
}
res := false
if err := secrets.Authorize(proxy.VerifiedIdentity.ServiceAccount, proxy.VerifiedIdentity.Namespace); err == nil {
res = true
} else {
authzError = err
}
authzResult = &res
return res
}
// There are 4 cases of secret reference
// Verified cross namespace (by ReferencePolicy). No Authz needed.
// Verified same namespace (implicit). No Authz needed.
// Unverified cross namespace. Never allowed.
// Unverified same namespace. Allowed if authorized.
allowedResources := make([]SecretResource, 0, len(resources))
deniedResources := make([]string, 0)
for _, r := range resources {
sameNamespace := r.Namespace == proxy.VerifiedIdentity.Namespace
verified := proxy.MergedGateway != nil && proxy.MergedGateway.VerifiedCertificateReferences.Contains(r.ResourceName)
switch r.ResourceType {
case credentials.KubernetesGatewaySecretType:
// For KubernetesGateway, we only allow VerifiedCertificateReferences.
// This means a Secret in the same namespace as the Gateway (which also must be in the same namespace
// as the proxy), or a ReferencePolicy allowing the reference.
if verified {
allowedResources = append(allowedResources, r)
} else {
deniedResources = append(deniedResources, r.Name)
}
case credentials.KubernetesSecretType:
// For Kubernetes, we require the secret to be in the same namespace as the proxy and for it to be
// authorized for access.
if sameNamespace && isAuthorized() {
allowedResources = append(allowedResources, r)
} else {
deniedResources = append(deniedResources, r.Name)
}
default:
// Should never happen
log.Warnf("unknown credential type %q", r.Type)
pilotSDSCertificateErrors.Increment()
}
}
// If we filtered any out, report an error. We aggregate errors in one place here, rather than in the loop,
// to avoid excessive logs.
if len(deniedResources) > 0 {
errMessage := authzError
if errMessage == nil {
errMessage = fmt.Errorf("cross namespace secret reference requires ReferencePolicy")
}
log.Warnf("proxy %s attempted to access unauthorized certificates %s: %v", proxy.ID, atMostNJoin(deniedResources, 3), errMessage)
pilotSDSCertificateErrors.Increment()
}
return allowedResources
}
func toEnvoyCaSecret(name string, certInfo *credscontroller.CertInfo) *discovery.Resource {
validationContext := &envoytls.CertificateValidationContext{
TrustedCa: &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: certInfo.Cert,
},
},
}
if certInfo.CRL != nil {
validationContext.Crl = &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: certInfo.CRL,
},
}
}
res := protoconv.MessageToAny(&envoytls.Secret{
Name: name,
Type: &envoytls.Secret_ValidationContext{
ValidationContext: validationContext,
},
})
return &discovery.Resource{
Name: name,
Resource: res,
}
}
func toEnvoyTLSSecret(name string, certInfo *credscontroller.CertInfo, proxy *model.Proxy, meshConfig *mesh.MeshConfig) *discovery.Resource {
var res *anypb.Any
pkpConf := proxy.Metadata.ProxyConfigOrDefault(meshConfig.GetDefaultConfig()).GetPrivateKeyProvider()
switch pkpConf.GetProvider().(type) {
case *mesh.PrivateKeyProvider_Cryptomb:
crypto := pkpConf.GetCryptomb()
msg := protoconv.MessageToAny(&cryptomb.CryptoMbPrivateKeyMethodConfig{
PollDelay: durationpb.New(time.Duration(crypto.GetPollDelay().Nanos)),
PrivateKey: &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: certInfo.Key,
},
},
})
res = protoconv.MessageToAny(&envoytls.Secret{
Name: name,
Type: &envoytls.Secret_TlsCertificate{
TlsCertificate: &envoytls.TlsCertificate{
CertificateChain: &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: certInfo.Cert,
},
},
PrivateKeyProvider: &envoytls.PrivateKeyProvider{
ProviderName: "cryptomb",
ConfigType: &envoytls.PrivateKeyProvider_TypedConfig{
TypedConfig: msg,
},
Fallback: crypto.GetFallback().GetValue(),
},
},
},
})
case *mesh.PrivateKeyProvider_Qat:
qatConf := pkpConf.GetQat()
msg := protoconv.MessageToAny(&qat.QatPrivateKeyMethodConfig{
PollDelay: durationpb.New(time.Duration(qatConf.GetPollDelay().Nanos)),
PrivateKey: &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: certInfo.Key,
},
},
})
res = protoconv.MessageToAny(&envoytls.Secret{
Name: name,
Type: &envoytls.Secret_TlsCertificate{
TlsCertificate: &envoytls.TlsCertificate{
CertificateChain: &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: certInfo.Cert,
},
},
PrivateKeyProvider: &envoytls.PrivateKeyProvider{
ProviderName: "qat",
ConfigType: &envoytls.PrivateKeyProvider_TypedConfig{
TypedConfig: msg,
},
Fallback: qatConf.GetFallback().GetValue(),
},
},
},
})
default:
tlsCertificate := &envoytls.TlsCertificate{
CertificateChain: &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: certInfo.Cert,
},
},
PrivateKey: &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: certInfo.Key,
},
},
}
if certInfo.Staple != nil {
tlsCertificate.OcspStaple = &core.DataSource{
Specifier: &core.DataSource_InlineBytes{
InlineBytes: certInfo.Staple,
},
}
}
res = protoconv.MessageToAny(&envoytls.Secret{
Name: name,
Type: &envoytls.Secret_TlsCertificate{
TlsCertificate: tlsCertificate,
},
})
}
return &discovery.Resource{
Name: name,
Resource: res,
}
}
func containsAny(mp sets.Set[model.ConfigKey], keys []model.ConfigKey) bool {
for _, k := range keys {
if _, f := mp[k]; f {
return true
}
}
return false
}
// relatedConfigs maps a single resource to a list of relevant resources. This is used for cache invalidation
// and push skipping. This is because an secret potentially has a dependency on the same secret with or without
// the -cacert suffix. By including this dependency we ensure we do not miss any updates.
// This is important for cases where we have a compound secret. In this case, the `foo` secret may update,
// but we need to push both the `foo` and `foo-cacert` resource name, or they will fall out of sync.
func relatedConfigs(k model.ConfigKey) []model.ConfigKey {
related := []model.ConfigKey{k}
// For secret without -cacert suffix, add the suffix
if !strings.HasSuffix(k.Name, securitymodel.SdsCaSuffix) {
k.Name += securitymodel.SdsCaSuffix
related = append(related, k)
} else {
// For secret with -cacert suffix, remove the suffix
k.Name = strings.TrimSuffix(k.Name, securitymodel.SdsCaSuffix)
related = append(related, k)
}
return related
}
type SecretGen struct {
secrets credscontroller.MulticlusterController
// Cache for XDS resources
cache model.XdsCache
configCluster cluster.ID
meshConfig *mesh.MeshConfig
}
var _ model.XdsResourceGenerator = &SecretGen{}
func NewSecretGen(sc credscontroller.MulticlusterController, cache model.XdsCache, configCluster cluster.ID,
meshConfig *mesh.MeshConfig,
) *SecretGen {
// TODO: Currently we only have a single credentials controller (Kubernetes). In the future, we will need a mapping
// of resource type to secret controller (ie kubernetes:// -> KubernetesController, vault:// -> VaultController)
return &SecretGen{
secrets: sc,
cache: cache,
configCluster: configCluster,
meshConfig: meshConfig,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"fmt"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
status "github.com/envoyproxy/go-control-plane/envoy/service/status/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/util/protoconv"
v3 "istio.io/istio/pilot/pkg/xds/v3"
)
const (
TypeDebugPrefix = v3.DebugType + "/"
// TypeDebugSyncronization requests Envoy CSDS for proxy sync status
TypeDebugSyncronization = v3.DebugType + "/syncz"
// TypeDebugConfigDump requests Envoy configuration for a proxy without creating one
TypeDebugConfigDump = v3.DebugType + "/config_dump"
// TODO: TypeURLReady - readiness events for endpoints, agent can propagate
)
// StatusGen is a Generator for XDS status: connections, syncz, configdump
type StatusGen struct {
Server *DiscoveryServer
// TODO: track last N Nacks and connection events, with 'version' based on timestamp.
// On new connect, use version to send recent events since last update.
}
func NewStatusGen(s *DiscoveryServer) *StatusGen {
return &StatusGen{
Server: s,
}
}
// Generate XDS responses about internal events:
// - connection status
// - NACKs
// We can also expose ACKS.
func (sg *StatusGen) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
return sg.handleInternalRequest(proxy, w, req)
}
// Generate delta XDS responses about internal events:
// - connection status
// - NACKs
// We can also expose ACKS.
func (sg *StatusGen) GenerateDeltas(
proxy *model.Proxy,
req *model.PushRequest,
w *model.WatchedResource,
) (model.Resources, model.DeletedResources, model.XdsLogDetails, bool, error) {
res, detail, err := sg.handleInternalRequest(proxy, w, req)
return res, nil, detail, true, err
}
func (sg *StatusGen) handleInternalRequest(_ *model.Proxy, w *model.WatchedResource, _ *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
res := model.Resources{}
switch w.TypeUrl {
case TypeDebugSyncronization:
res = sg.debugSyncz()
case TypeDebugConfigDump:
if len(w.ResourceNames) == 0 || len(w.ResourceNames) > 1 {
// Malformed request from client
log.Infof("%s with %d ResourceNames", TypeDebugConfigDump, len(w.ResourceNames))
break
}
var err error
dumpRes, err := sg.debugConfigDump(w.ResourceNames[0])
if err != nil {
log.Infof("%s failed: %v", TypeDebugConfigDump, err)
break
}
res = dumpRes
}
return res, model.DefaultXdsLogDetails, nil
}
// isSidecar ad-hoc method to see if connection represents a sidecar
func isProxy(con *Connection) bool {
return con != nil &&
con.proxy != nil &&
con.proxy.Metadata != nil &&
con.proxy.Metadata.ProxyConfig != nil
}
func isZtunnel(con *Connection) bool {
return con != nil &&
con.proxy != nil &&
con.proxy.Metadata != nil &&
con.proxy.Type == model.Ztunnel
}
func (sg *StatusGen) debugSyncz() model.Resources {
res := model.Resources{}
stypes := []string{
v3.ListenerType,
v3.RouteType,
v3.EndpointType,
v3.ClusterType,
v3.ExtensionConfigurationType,
}
for _, con := range sg.Server.Clients() {
con.proxy.RLock()
// Skip "nodes" without metadata (they are probably istioctl queries!)
if isProxy(con) || isZtunnel(con) {
xdsConfigs := make([]*status.ClientConfig_GenericXdsConfig, 0)
for _, stype := range stypes {
pxc := &status.ClientConfig_GenericXdsConfig{}
if watchedResource, ok := con.proxy.WatchedResources[stype]; ok {
pxc.ConfigStatus = debugSyncStatus(watchedResource)
} else if isZtunnel(con) {
pxc.ConfigStatus = status.ConfigStatus_UNKNOWN
} else {
pxc.ConfigStatus = status.ConfigStatus_NOT_SENT
}
pxc.TypeUrl = stype
xdsConfigs = append(xdsConfigs, pxc)
}
clientConfig := &status.ClientConfig{
Node: &core.Node{
Id: con.proxy.ID,
Metadata: model.NodeMetadata{
ClusterID: con.proxy.Metadata.ClusterID,
Namespace: con.proxy.Metadata.Namespace,
IstioVersion: con.proxy.Metadata.IstioVersion,
}.ToStruct(),
},
GenericXdsConfigs: xdsConfigs,
}
res = append(res, &discovery.Resource{
Name: clientConfig.Node.Id,
Resource: protoconv.MessageToAny(clientConfig),
})
}
con.proxy.RUnlock()
}
return res
}
func debugSyncStatus(wr *model.WatchedResource) status.ConfigStatus {
if wr.NonceSent == "" {
return status.ConfigStatus_NOT_SENT
}
if wr.NonceAcked == wr.NonceSent {
return status.ConfigStatus_SYNCED
}
return status.ConfigStatus_STALE
}
func (sg *StatusGen) debugConfigDump(proxyID string) (model.Resources, error) {
conn := sg.Server.getProxyConnection(proxyID)
if conn == nil {
// This is "like" a 404. The error is the client's. However, this endpoint
// only tracks a single "shard" of connections. The client may try another instance.
return nil, fmt.Errorf("config dump could not find connection for proxyID %q", proxyID)
}
dump, err := sg.Server.connectionConfigDump(conn, false)
if err != nil {
return nil, err
}
return model.AnyToUnnamedResources(dump.Configs), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"fmt"
"strings"
)
func atMostNJoin(data []string, limit int) string {
if limit == 0 || limit == 1 {
// Assume limit >1, but make sure we dpn't crash if someone does pass those
return strings.Join(data, ", ")
}
if len(data) == 0 {
return ""
}
if len(data) < limit {
return strings.Join(data, ", ")
}
return strings.Join(data[:limit-1], ", ") + fmt.Sprintf(", and %d others", len(data)-limit+1)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v3
import (
"strings"
resource "github.com/envoyproxy/go-control-plane/pkg/resource/v3"
)
const (
envoyTypePrefix = resource.APITypePrefix + "envoy."
ClusterType = resource.ClusterType
EndpointType = resource.EndpointType
ListenerType = resource.ListenerType
RouteType = resource.RouteType
SecretType = resource.SecretType
ExtensionConfigurationType = resource.ExtensionConfigType
NameTableType = resource.APITypePrefix + "istio.networking.nds.v1.NameTable"
HealthInfoType = resource.APITypePrefix + "istio.v1.HealthInformation"
ProxyConfigType = resource.APITypePrefix + "istio.mesh.v1alpha1.ProxyConfig"
// DebugType requests debug info from istio, a secured implementation for istio debug interface.
DebugType = "istio.io/debug"
BootstrapType = resource.APITypePrefix + "envoy.config.bootstrap.v3.Bootstrap"
AddressType = resource.APITypePrefix + "istio.workload.Address"
WorkloadType = resource.APITypePrefix + "istio.workload.Workload"
WorkloadAuthorizationType = resource.APITypePrefix + "istio.security.Authorization"
// nolint
HttpProtocolOptionsType = "envoy.extensions.upstreams.http.v3.HttpProtocolOptions"
)
// GetShortType returns an abbreviated form of a type, useful for logging or human friendly messages
func GetShortType(typeURL string) string {
switch typeURL {
case ClusterType:
return "CDS"
case ListenerType:
return "LDS"
case RouteType:
return "RDS"
case EndpointType:
return "EDS"
case SecretType:
return "SDS"
case NameTableType:
return "NDS"
case ProxyConfigType:
return "PCDS"
case ExtensionConfigurationType:
return "ECDS"
case AddressType, WorkloadType:
return "WDS"
case WorkloadAuthorizationType:
return "WADS"
default:
return typeURL
}
}
// GetMetricType returns the form of a type reported for metrics
func GetMetricType(typeURL string) string {
switch typeURL {
case ClusterType:
return "cds"
case ListenerType:
return "lds"
case RouteType:
return "rds"
case EndpointType:
return "eds"
case SecretType:
return "sds"
case NameTableType:
return "nds"
case ProxyConfigType:
return "pcds"
case ExtensionConfigurationType:
return "ecds"
case BootstrapType:
return "bds"
case AddressType, WorkloadType:
return "wds"
case WorkloadAuthorizationType:
return "wads"
default:
return typeURL
}
}
// GetResourceType returns resource form of an abbreviated form
func GetResourceType(shortType string) string {
s := strings.ToUpper(shortType)
switch s {
case "CDS":
return ClusterType
case "LDS":
return ListenerType
case "RDS":
return RouteType
case "EDS":
return EndpointType
case "SDS":
return SecretType
case "NDS":
return NameTableType
case "PCDS":
return ProxyConfigType
case "ECDS":
return ExtensionConfigurationType
case "WDS":
return AddressType
case "WADS":
return WorkloadAuthorizationType
default:
return shortType
}
}
// IsEnvoyType checks whether the typeURL is a valid Envoy type.
func IsEnvoyType(typeURL string) bool {
return strings.HasPrefix(typeURL, envoyTypePrefix)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/util/protoconv"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/util/sets"
)
type WorkloadGenerator struct {
Server *DiscoveryServer
}
var (
_ model.XdsResourceGenerator = &WorkloadGenerator{}
_ model.XdsDeltaResourceGenerator = &WorkloadGenerator{}
)
// GenerateDeltas computes Workload resources. This is design to be highly optimized to delta updates,
// and supports *on-demand* client usage. A client can subscribe with a wildcard subscription and get all
// resources (with delta updates), or on-demand and only get responses for specifically subscribed resources.
//
// Incoming requests may be for VIP or Pod IP addresses. However, all responses are Workload resources, which are pod based.
// This means subscribing to a VIP may end up pushing many resources of different name than the request.
// On-demand clients are expected to handle this (for wildcard, this is not applicable, as they don't specify any resources at all).
func (e WorkloadGenerator) GenerateDeltas(
proxy *model.Proxy,
req *model.PushRequest,
w *model.WatchedResource,
) (model.Resources, model.DeletedResources, model.XdsLogDetails, bool, error) {
updatedAddresses := model.ConfigNameOfKind(req.ConfigsUpdated, kind.Address)
isReq := req.IsRequest()
if len(updatedAddresses) == 0 && len(req.ConfigsUpdated) > 0 {
// Nothing changed..
return nil, nil, model.XdsLogDetails{}, false, nil
}
subs := sets.New(w.ResourceNames...)
addresses := updatedAddresses
// If it is not a wildcard, filter out resources we are not subscribed to
if !w.Wildcard {
addresses = addresses.Intersection(subs)
}
// Specific requested resource: always include
addresses = addresses.Merge(req.Delta.Subscribed)
addresses = addresses.Difference(req.Delta.Unsubscribed)
if !w.Wildcard {
// We only need this for on-demand. This allows us to subscribe the client to resources they
// didn't explicitly request.
// For wildcard, they subscribe to everything already.
// TODO: optimize me
additional := e.Server.Env.ServiceDiscovery.AdditionalPodSubscriptions(proxy, addresses, subs)
addresses.Merge(additional)
}
// TODO: it is needlessly wasteful to do a full sync just because the rest of Istio thought it was "full"
// The only things that can really trigger a "full" push here is trust domain or network changing, which is extremely rare
// We do a full push for wildcard requests (initial proxy sync) or for full pushes with no ConfigsUpdates (since we don't know what changed)
full := (isReq && w.Wildcard) || (!isReq && req.Full && len(req.ConfigsUpdated) == 0)
// Nothing to do
if len(addresses) == 0 && !full {
if isReq {
// We need to respond for requests, even if we have nothing to respond with
return make(model.Resources, 0), nil, model.XdsLogDetails{}, false, nil
}
// For NOP pushes, no need
return nil, nil, model.XdsLogDetails{}, false, nil
}
resources := make(model.Resources, 0)
addrs, removed := e.Server.Env.ServiceDiscovery.AddressInformation(addresses)
// Note: while "removed" is a weird name for a resource that never existed, this is how the spec works:
// https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#id2
have := sets.New[string]()
for _, addr := range addrs {
// TODO(@hzxuzhonghu): calculate removed with aliases in `AddressInformation`
aliases := addr.Aliases()
removed.DeleteAll(aliases...)
n := addr.ResourceName()
have.Insert(n)
switch w.TypeUrl {
case v3.WorkloadType:
if addr.GetWorkload() != nil {
resources = append(resources, &discovery.Resource{
Name: n,
Aliases: aliases,
Resource: protoconv.MessageToAny(addr.GetWorkload()), // TODO: pre-marshal
})
}
case v3.AddressType:
resources = append(resources, &discovery.Resource{
Name: n,
Aliases: aliases,
Resource: protoconv.MessageToAny(addr), // TODO: pre-marshal
})
}
}
if full {
// If it's a full push, AddressInformation won't have info to compute the full set of removals.
// Instead, we need can see what resources are missing that we were subscribe to; those were removed.
removed = subs.Difference(have).Merge(removed)
}
if !w.Wildcard {
// For on-demand, we may have requested a VIP but gotten Pod IPs back. We need to update
// the internal book-keeping to subscribe to the Pods, so that we push updates to those Pods.
w.ResourceNames = subs.Merge(have).UnsortedList()
} else {
// For wildcard, we record all resources that have been pushed and not removed
// It was to correctly calculate removed resources during full push alongside with specific address removed.
w.ResourceNames = subs.Merge(have).Difference(removed).UnsortedList()
}
return resources, removed.UnsortedList(), model.XdsLogDetails{}, true, nil
}
func (e WorkloadGenerator) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
resources, _, details, _, err := e.GenerateDeltas(proxy, req, w)
return resources, details, err
}
type WorkloadRBACGenerator struct {
Server *DiscoveryServer
}
func (e WorkloadRBACGenerator) GenerateDeltas(
proxy *model.Proxy,
req *model.PushRequest,
w *model.WatchedResource,
) (model.Resources, model.DeletedResources, model.XdsLogDetails, bool, error) {
var updatedPolicies sets.Set[model.ConfigKey]
if len(req.ConfigsUpdated) != 0 {
updatedPolicies = model.ConfigsOfKind(req.ConfigsUpdated, kind.AuthorizationPolicy)
// Convert the actual Kubernetes PeerAuthentication policies to the synthetic ones
// by adding the prefix
//
// This is needed because the handler that produces the ConfigUpdate blindly sends
// the Kubernetes resource names without context of the synthetic Ambient policies
// TODO: Split out PeerAuthentication into a separate handler in
// https://github.com/istio/istio/blob/master/pilot/pkg/bootstrap/server.go#L882
for p := range model.ConfigsOfKind(req.ConfigsUpdated, kind.PeerAuthentication) {
updatedPolicies.Insert(model.ConfigKey{
Name: model.GetAmbientPolicyConfigName(p),
Namespace: p.Namespace,
Kind: p.Kind,
})
}
}
if len(req.ConfigsUpdated) != 0 && len(updatedPolicies) == 0 {
// This was a incremental push for a resource we don't watch... skip
return nil, nil, model.DefaultXdsLogDetails, false, nil
}
policies := e.Server.Env.ServiceDiscovery.Policies(updatedPolicies)
resources := make(model.Resources, 0)
expected := sets.New[string]()
if len(updatedPolicies) > 0 {
// Partial update. Removes are ones we request but didn't get back when querying the policies
for k := range updatedPolicies {
expected.Insert(k.Namespace + "/" + k.Name)
}
} else {
// Full update, expect everything
expected.InsertAll(w.ResourceNames...)
}
removed := expected
for _, p := range policies {
n := p.Namespace + "/" + p.Name
removed.Delete(n) // We found it, so it isn't a removal
resources = append(resources, &discovery.Resource{
Name: n,
Resource: protoconv.MessageToAny(p),
})
}
return resources, sets.SortedList(removed), model.XdsLogDetails{}, true, nil
}
func (e WorkloadRBACGenerator) Generate(proxy *model.Proxy, w *model.WatchedResource, req *model.PushRequest) (model.Resources, model.XdsLogDetails, error) {
resources, _, details, _, err := e.GenerateDeltas(proxy, req, w)
return resources, details, err
}
var (
_ model.XdsResourceGenerator = &WorkloadRBACGenerator{}
_ model.XdsDeltaResourceGenerator = &WorkloadRBACGenerator{}
)
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"encoding/json"
"strconv"
"strings"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/lazy"
istioversion "istio.io/istio/pkg/version"
)
// IstioControlPlaneInstance defines the format Istio uses for when creating Envoy config.core.v3.ControlPlane.identifier
type IstioControlPlaneInstance struct {
// The Istio component type (e.g. "istiod")
Component string
// The ID of the component instance
ID string
// The Istio version
Info istioversion.BuildInfo
}
// Evaluate the controlPlane lazily in order to allow "POD_NAME" env var setting after running the process.
var controlPlane = lazy.New(func() (*core.ControlPlane, error) {
// The Pod Name (instance identity) is in PilotArgs, but not reachable globally nor from DiscoveryServer
podName := env.Register("POD_NAME", "", "").Get()
byVersion, err := json.Marshal(IstioControlPlaneInstance{
Component: "istiod",
ID: podName,
Info: istioversion.Info,
})
if err != nil {
log.Warnf("XDS: Could not serialize control plane id: %v", err)
}
return &core.ControlPlane{Identifier: string(byVersion)}, nil
})
// ControlPlane identifies the instance and Istio version.
func ControlPlane() *core.ControlPlane {
// Error will never happen because the getter of lazy does not return error.
cp, _ := controlPlane.Get()
return cp
}
func (s *DiscoveryServer) findGenerator(typeURL string, con *Connection) model.XdsResourceGenerator {
if g, f := s.Generators[con.proxy.Metadata.Generator+"/"+typeURL]; f {
return g
}
if g, f := s.Generators[string(con.proxy.Type)+"/"+typeURL]; f {
return g
}
if g, f := s.Generators[typeURL]; f {
return g
}
// XdsResourceGenerator is the default generator for this connection. We want to allow
// some types to use custom generators - for example EDS.
g := con.proxy.XdsResourceGenerator
if g == nil {
if strings.HasPrefix(typeURL, TypeDebugPrefix) {
g = s.Generators["event"]
} else {
// TODO move this to just directly using the resource TypeUrl
g = s.Generators["api"] // default to "MCP" generators - any type supported by store
}
}
return g
}
// Push an XDS resource for the given connection. Configuration will be generated
// based on the passed in generator. Based on the updates field, generators may
// choose to send partial or even no response if there are no changes.
func (s *DiscoveryServer) pushXds(con *Connection, w *model.WatchedResource, req *model.PushRequest) error {
if w == nil {
return nil
}
gen := s.findGenerator(w.TypeUrl, con)
if gen == nil {
return nil
}
t0 := time.Now()
// If delta is set, client is requesting new resources or removing old ones. We should just generate the
// new resources it needs, rather than the entire set of known resources.
// Note: we do not need to account for unsubscribed resources as these are handled by parent removal;
// See https://www.envoyproxy.io/docs/envoy/latest/api-docs/xds_protocol#deleting-resources.
// This means if there are only removals, we will not respond.
var logFiltered string
if !req.Delta.IsEmpty() && !con.proxy.IsProxylessGrpc() {
logFiltered = " filtered:" + strconv.Itoa(len(w.ResourceNames)-len(req.Delta.Subscribed))
w = &model.WatchedResource{
TypeUrl: w.TypeUrl,
ResourceNames: req.Delta.Subscribed.UnsortedList(),
}
}
res, logdata, err := gen.Generate(con.proxy, w, req)
info := ""
if len(logdata.AdditionalInfo) > 0 {
info = " " + logdata.AdditionalInfo
}
if len(logFiltered) > 0 {
info += logFiltered
}
if err != nil || res == nil {
// If we have nothing to send, report that we got an ACK for this version.
if s.StatusReporter != nil {
s.StatusReporter.RegisterEvent(con.conID, w.TypeUrl, req.Push.LedgerVersion)
}
if log.DebugEnabled() {
log.Debugf("%s: SKIP%s for node:%s%s", v3.GetShortType(w.TypeUrl), req.PushReason(), con.proxy.ID, info)
}
return err
}
defer func() { recordPushTime(w.TypeUrl, time.Since(t0)) }()
resp := &discovery.DiscoveryResponse{
ControlPlane: ControlPlane(),
TypeUrl: w.TypeUrl,
// TODO: send different version for incremental eds
VersionInfo: req.Push.PushVersion,
Nonce: nonce(req.Push.LedgerVersion),
Resources: model.ResourcesToAny(res),
}
configSize := ResourceSize(res)
configSizeBytes.With(typeTag.Value(w.TypeUrl)).Record(float64(configSize))
ptype := "PUSH"
if logdata.Incremental {
ptype = "PUSH INC"
}
if err := con.send(resp); err != nil {
if recordSendError(w.TypeUrl, err) {
log.Warnf("%s: Send failure for node:%s resources:%d size:%s%s: %v",
v3.GetShortType(w.TypeUrl), con.proxy.ID, len(res), util.ByteCount(configSize), info, err)
}
return err
}
switch {
case !req.Full:
if log.DebugEnabled() {
log.Debugf("%s: %s%s for node:%s resources:%d size:%s%s",
v3.GetShortType(w.TypeUrl), ptype, req.PushReason(), con.proxy.ID, len(res), util.ByteCount(configSize), info)
}
default:
debug := ""
if log.DebugEnabled() {
// Add additional information to logs when debug mode enabled.
debug = " nonce:" + resp.Nonce + " version:" + resp.VersionInfo
}
log.Infof("%s: %s%s for node:%s resources:%d size:%v%s%s", v3.GetShortType(w.TypeUrl), ptype, req.PushReason(), con.proxy.ID, len(res),
util.ByteCount(ResourceSize(res)), info, debug)
}
return nil
}
func ResourceSize(r model.Resources) int {
// Approximate size by looking at the Any marshaled size. This avoids high cost
// proto.Size, at the expense of slightly under counting.
size := 0
for _, r := range r {
size += len(r.Resource.Value)
}
return size
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"context"
"fmt"
"net"
"strings"
"time"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/test/bufconn"
authorizationv1 "k8s.io/api/authorization/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/fake"
k8stesting "k8s.io/client-go/testing"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/autoregistration"
"istio.io/istio/pilot/pkg/bootstrap"
"istio.io/istio/pilot/pkg/config/kube/gateway"
ingress "istio.io/istio/pilot/pkg/config/kube/ingress"
"istio.io/istio/pilot/pkg/config/memory"
kubesecrets "istio.io/istio/pilot/pkg/credentials/kube"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3"
"istio.io/istio/pilot/pkg/serviceregistry"
kube "istio.io/istio/pilot/pkg/serviceregistry/kube/controller"
memregistry "istio.io/istio/pilot/pkg/serviceregistry/memory"
"istio.io/istio/pilot/pkg/serviceregistry/util/xdsfake"
"istio.io/istio/pilot/pkg/xds"
"istio.io/istio/pilot/pkg/xds/endpoints"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pilot/test/xdstest"
"istio.io/istio/pkg/adsc"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/schema/kind"
"istio.io/istio/pkg/keepalive"
kubelib "istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/multicluster"
"istio.io/istio/pkg/test"
"istio.io/istio/pkg/test/util/retry"
"istio.io/istio/pkg/util/sets"
)
type FakeOptions struct {
// If provided, sets the name of the "default" or local cluster to the similaed pilots. (Defaults to opts.DefaultClusterName)
DefaultClusterName cluster.ID
// If provided, the minor version will be overridden for calls to GetKubernetesVersion to 1.minor
KubernetesVersion string
// If provided, a service registry with the name of each map key will be created with the given objects.
KubernetesObjectsByCluster map[cluster.ID][]runtime.Object
// If provided, these objects will be used directly for the default cluster ("Kubernetes" or DefaultClusterName)
KubernetesObjects []runtime.Object
// If provided, a service registry with the name of each map key will be created with the given objects.
KubernetesObjectStringByCluster map[cluster.ID]string
// If provided, the yaml string will be parsed and used as objects for the default cluster ("Kubernetes" or DefaultClusterName)
KubernetesObjectString string
// If provided, these configs will be used directly
Configs []config.Config
// If provided, the yaml string will be parsed and used as configs
ConfigString string
// If provided, the ConfigString will be treated as a go template, with this as input params
ConfigTemplateInput any
// If provided, this mesh config will be used
MeshConfig *meshconfig.MeshConfig
NetworksWatcher mesh.NetworksWatcher
// Callback to modify the kube client before it is started
KubeClientModifier func(c kubelib.Client)
// ListenerBuilder, if specified, allows making the server use the given
// listener instead of a buffered conn.
ListenerBuilder func() (net.Listener, error)
// Time to debounce
// By default, set to 0s to speed up tests
DebounceTime time.Duration
// EnableFakeXDSUpdater will use a XDSUpdater that can be used to watch events
EnableFakeXDSUpdater bool
DisableSecretAuthorization bool
Services []*model.Service
Gateways []model.NetworkGateway
}
type FakeDiscoveryServer struct {
*v1alpha3.ConfigGenTest
t test.Failer
Discovery *xds.DiscoveryServer
Listener net.Listener
BufListener *bufconn.Listener
kubeClient kubelib.Client
KubeRegistry *kube.FakeController
XdsUpdater model.XDSUpdater
MemRegistry *memregistry.ServiceDiscovery
}
func NewFakeDiscoveryServer(t test.Failer, opts FakeOptions) *FakeDiscoveryServer {
m := opts.MeshConfig
if m == nil {
m = mesh.DefaultMeshConfig()
}
// Init with a dummy environment, since we have a circular dependency with the env creation.
s := xds.NewDiscoveryServer(model.NewEnvironment(), map[string]string{})
// Disable debounce to reduce test times
s.DebounceOptions.DebounceAfter = opts.DebounceTime
// Setup time to Now instead of process start to make logs not misleading
s.DiscoveryStartTime = time.Now()
t.Cleanup(s.Shutdown)
serviceHandler := func(_, curr *model.Service, _ model.Event) {
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: sets.New(model.ConfigKey{Kind: kind.ServiceEntry, Name: string(curr.Hostname), Namespace: curr.Attributes.Namespace}),
Reason: model.NewReasonStats(model.ServiceUpdate),
}
s.ConfigUpdate(pushReq)
}
if opts.DefaultClusterName == "" {
opts.DefaultClusterName = "Kubernetes"
}
k8sObjects := getKubernetesObjects(t, opts)
var defaultKubeClient kubelib.Client
var defaultKubeController *kube.FakeController
var registries []serviceregistry.Instance
if opts.NetworksWatcher != nil {
opts.NetworksWatcher.AddNetworksHandler(func() {
s.ConfigUpdate(&model.PushRequest{
Full: true,
Reason: model.NewReasonStats(model.NetworksTrigger),
})
})
}
var xdsUpdater model.XDSUpdater = s
if opts.EnableFakeXDSUpdater {
xdsUpdater = xdsfake.NewWithDelegate(s)
}
creds := kubesecrets.NewMulticluster(opts.DefaultClusterName)
configController := memory.NewSyncController(memory.MakeSkipValidation(collections.PilotGatewayAPI()))
for k8sCluster, objs := range k8sObjects {
client := kubelib.NewFakeClientWithVersion(opts.KubernetesVersion, objs...)
if opts.KubeClientModifier != nil {
opts.KubeClientModifier(client)
}
k8sConfig := configController
if k8sCluster != opts.DefaultClusterName {
k8sConfig = nil
}
k8s, _ := kube.NewFakeControllerWithOptions(t, kube.FakeControllerOptions{
ServiceHandler: serviceHandler,
Client: client,
ClusterID: k8sCluster,
DomainSuffix: "cluster.local",
XDSUpdater: xdsUpdater,
NetworksWatcher: opts.NetworksWatcher,
SkipRun: true,
ConfigController: k8sConfig,
ConfigCluster: k8sCluster == opts.DefaultClusterName,
MeshWatcher: mesh.NewFixedWatcher(m),
})
stop := test.NewStop(t)
// start default client informers after creating ingress/secret controllers
if defaultKubeClient == nil || k8sCluster == opts.DefaultClusterName {
defaultKubeClient = client
if opts.DisableSecretAuthorization {
DisableAuthorizationForSecret(defaultKubeClient.Kube().(*fake.Clientset))
}
defaultKubeController = k8s
} else {
client.RunAndWait(stop)
}
registries = append(registries, k8s)
creds.ClusterAdded(&multicluster.Cluster{ID: k8sCluster, Client: client}, stop)
}
stop := test.NewStop(t)
ingr := ingress.NewController(defaultKubeClient, mesh.NewFixedWatcher(m), kube.Options{
DomainSuffix: "cluster.local",
})
defaultKubeClient.RunAndWait(stop)
var gwc *gateway.Controller
cg := v1alpha3.NewConfigGenTest(t, v1alpha3.TestOptions{
Configs: opts.Configs,
ConfigString: opts.ConfigString,
ConfigTemplateInput: opts.ConfigTemplateInput,
ConfigController: configController,
MeshConfig: m,
XDSUpdater: xdsUpdater,
NetworksWatcher: opts.NetworksWatcher,
ServiceRegistries: registries,
ConfigStoreCaches: []model.ConfigStoreController{ingr},
CreateConfigStore: func(c model.ConfigStoreController) model.ConfigStoreController {
g := gateway.NewController(defaultKubeClient, c, func(class schema.GroupVersionResource, stop <-chan struct{}) bool {
return true
}, nil, kube.Options{
DomainSuffix: "cluster.local",
})
gwc = g
return gwc
},
SkipRun: true,
ClusterID: opts.DefaultClusterName,
Services: opts.Services,
Gateways: opts.Gateways,
})
cg.Registry.AppendServiceHandler(serviceHandler)
s.Env = cg.Env()
s.Env.GatewayAPIController = gwc
if err := s.Env.InitNetworksManager(s); err != nil {
t.Fatal(err)
}
bootstrap.InitGenerators(s, v1alpha3.NewConfigGenerator(s.Cache), "istio-system", "", nil)
s.Generators[v3.SecretType] = xds.NewSecretGen(creds, s.Cache, opts.DefaultClusterName, nil)
s.Generators[v3.ExtensionConfigurationType].(*xds.EcdsGenerator).SetCredController(creds)
memRegistry := cg.MemRegistry
memRegistry.XdsUpdater = s
// Setup config handlers
// TODO code re-use from server.go
configHandler := func(_, curr config.Config, event model.Event) {
pushReq := &model.PushRequest{
Full: true,
ConfigsUpdated: sets.New(model.ConfigKey{Kind: kind.MustFromGVK(curr.GroupVersionKind), Name: curr.Name, Namespace: curr.Namespace}),
Reason: model.NewReasonStats(model.ConfigUpdate),
}
s.ConfigUpdate(pushReq)
}
schemas := collections.Pilot.All()
if features.EnableGatewayAPI {
schemas = collections.PilotGatewayAPI().All()
}
for _, schema := range schemas {
// This resource type was handled in external/servicediscovery.go, no need to rehandle here.
if schema.GroupVersionKind() == gvk.ServiceEntry {
continue
}
if schema.GroupVersionKind() == gvk.WorkloadEntry {
continue
}
cg.Store().RegisterEventHandler(schema.GroupVersionKind(), configHandler)
}
for _, registry := range registries {
k8s, ok := registry.(*kube.FakeController)
// this closely matches what we do in serviceregistry/kube/controller/multicluster.go
if !ok || k8s.Cluster() != cg.ServiceEntryRegistry.Cluster() {
continue
}
cg.ServiceEntryRegistry.AppendWorkloadHandler(k8s.WorkloadInstanceHandler)
k8s.AppendWorkloadHandler(cg.ServiceEntryRegistry.WorkloadInstanceHandler)
}
s.WorkloadEntryController = autoregistration.NewController(cg.Store(), "test", keepalive.Infinity)
var listener net.Listener
if opts.ListenerBuilder != nil {
var err error
if listener, err = opts.ListenerBuilder(); err != nil {
t.Fatal(err)
}
} else {
// Start in memory gRPC listener
buffer := 1024 * 1024
listener = bufconn.Listen(buffer)
}
grpcServer := grpc.NewServer()
s.Register(grpcServer)
go func() {
if err := grpcServer.Serve(listener); err != nil && !(err == grpc.ErrServerStopped || err.Error() == "closed") {
t.Fatal(err)
}
}()
t.Cleanup(func() {
grpcServer.Stop()
_ = listener.Close()
})
// Start the discovery server
s.Start(stop)
cg.ServiceEntryRegistry.XdsUpdater = s
// Now that handlers are added, get everything started
cg.Run()
kubelib.WaitForCacheSync("fake", stop,
cg.Registry.HasSynced,
cg.Store().HasSynced)
cg.ServiceEntryRegistry.ResyncEDS()
// Send an update. This ensures that even if there are no configs provided, the push context is
// initialized.
s.ConfigUpdate(&model.PushRequest{Full: true})
// Wait until initial updates are committed
c := s.InboundUpdates.Load()
retry.UntilOrFail(t, func() bool {
return s.CommittedUpdates.Load() >= c
}, retry.Delay(time.Millisecond))
// Mark ourselves ready
s.CachesSynced()
bufListener, _ := listener.(*bufconn.Listener)
fake := &FakeDiscoveryServer{
t: t,
Discovery: s,
Listener: listener,
BufListener: bufListener,
ConfigGenTest: cg,
kubeClient: defaultKubeClient,
KubeRegistry: defaultKubeController,
XdsUpdater: xdsUpdater,
MemRegistry: memRegistry,
}
return fake
}
func (f *FakeDiscoveryServer) KubeClient() kubelib.Client {
return f.kubeClient
}
func (f *FakeDiscoveryServer) PushContext() *model.PushContext {
return f.Env().PushContext()
}
// ConnectADS starts an ADS connection to the server. It will automatically be cleaned up when the test ends
func (f *FakeDiscoveryServer) ConnectADS() *xds.AdsTest {
conn, err := grpc.Dial("buffcon",
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(),
grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) {
return f.BufListener.Dial()
}))
if err != nil {
f.t.Fatalf("failed to connect: %v", err)
}
return xds.NewAdsTest(f.t, conn)
}
// ConnectDeltaADS starts a Delta ADS connection to the server. It will automatically be cleaned up when the test ends
func (f *FakeDiscoveryServer) ConnectDeltaADS() *xds.DeltaAdsTest {
conn, err := grpc.Dial("buffcon",
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithBlock(),
grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) {
return f.BufListener.Dial()
}))
if err != nil {
f.t.Fatalf("failed to connect: %v", err)
}
return xds.NewDeltaAdsTest(f.t, conn)
}
func APIWatches() []string {
watches := []string{gvk.MeshConfig.String()}
for _, sch := range collections.Pilot.All() {
watches = append(watches, sch.GroupVersionKind().String())
}
return watches
}
func (f *FakeDiscoveryServer) ConnectUnstarted(p *model.Proxy, watch []string) *adsc.ADSC {
f.t.Helper()
p = f.SetupProxy(p)
initialWatch := []*discovery.DiscoveryRequest{}
for _, typeURL := range watch {
initialWatch = append(initialWatch, &discovery.DiscoveryRequest{TypeUrl: typeURL})
}
opts := []grpc.DialOption{grpc.WithTransportCredentials(insecure.NewCredentials())}
if f.BufListener != nil {
opts = append(opts, grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) {
return f.BufListener.Dial()
}))
}
adscConn, err := adsc.New(f.Listener.Addr().String(), &adsc.ADSConfig{
Config: adsc.Config{
IP: p.IPAddresses[0],
NodeType: p.Type,
Meta: p.Metadata.ToStruct(),
Locality: p.Locality,
Namespace: p.ConfigNamespace,
GrpcOpts: opts,
},
InitialDiscoveryRequests: initialWatch,
})
if err != nil {
f.t.Fatalf("Error connecting: %v", err)
}
f.t.Cleanup(func() {
adscConn.Close()
})
return adscConn
}
// Connect starts an ADS connection to the server using adsc. It will automatically be cleaned up when the test ends
// watch can be configured to determine the resources to watch initially, and wait can be configured to determine what
// resources we should initially wait for.
func (f *FakeDiscoveryServer) Connect(p *model.Proxy, watch []string, wait []string) *adsc.ADSC {
f.t.Helper()
if watch == nil {
watch = []string{v3.ClusterType}
}
adscConn := f.ConnectUnstarted(p, watch)
if err := adscConn.Run(); err != nil {
f.t.Fatalf("ADSC: failed running: %v", err)
}
if len(wait) > 0 {
_, err := adscConn.Wait(10*time.Second, wait...)
if err != nil {
f.t.Fatalf("Error getting initial for %v config: %v", wait, err)
}
}
return adscConn
}
func (f *FakeDiscoveryServer) Endpoints(p *model.Proxy) []*endpoint.ClusterLoadAssignment {
loadAssignments := make([]*endpoint.ClusterLoadAssignment, 0)
for _, c := range xdstest.ExtractEdsClusterNames(f.Clusters(p)) {
builder := endpoints.NewEndpointBuilder(c, p, f.PushContext())
loadAssignments = append(loadAssignments, builder.BuildClusterLoadAssignment(f.Discovery.Env.EndpointIndex))
}
return loadAssignments
}
func (f *FakeDiscoveryServer) T() test.Failer {
return f.t
}
// EnsureSynced checks that all ConfigUpdates sent have been established
// This does NOT ensure that the change has been sent to all proxies; only that PushContext is updated
// Typically, if trying to ensure changes are sent, its better to wait for the push event.
func (f *FakeDiscoveryServer) EnsureSynced(t test.Failer) {
c := f.Discovery.InboundUpdates.Load()
retry.UntilOrFail(t, func() bool {
return f.Discovery.CommittedUpdates.Load() >= c
}, retry.Delay(time.Millisecond))
}
func getKubernetesObjects(t test.Failer, opts FakeOptions) map[cluster.ID][]runtime.Object {
objects := map[cluster.ID][]runtime.Object{}
if len(opts.KubernetesObjects) > 0 {
objects[opts.DefaultClusterName] = append(objects[opts.DefaultClusterName], opts.KubernetesObjects...)
}
if len(opts.KubernetesObjectString) > 0 {
parsed, err := kubernetesObjectsFromString(opts.KubernetesObjectString)
if err != nil {
t.Fatalf("failed parsing KubernetesObjectString: %v", err)
}
objects[opts.DefaultClusterName] = append(objects[opts.DefaultClusterName], parsed...)
}
for k8sCluster, objectStr := range opts.KubernetesObjectStringByCluster {
parsed, err := kubernetesObjectsFromString(objectStr)
if err != nil {
t.Fatalf("failed parsing KubernetesObjectStringByCluster for %s: %v", k8sCluster, err)
}
objects[k8sCluster] = append(objects[k8sCluster], parsed...)
}
for k8sCluster, clusterObjs := range opts.KubernetesObjectsByCluster {
objects[k8sCluster] = append(objects[k8sCluster], clusterObjs...)
}
if len(objects) == 0 {
return map[cluster.ID][]runtime.Object{opts.DefaultClusterName: {}}
}
return objects
}
func kubernetesObjectsFromString(s string) ([]runtime.Object, error) {
var objects []runtime.Object
decode := kubelib.IstioCodec.UniversalDeserializer().Decode
objectStrs := strings.Split(s, "---")
for _, s := range objectStrs {
if len(strings.TrimSpace(s)) == 0 {
continue
}
o, _, err := decode([]byte(s), nil, nil)
if err != nil {
return nil, fmt.Errorf("failed deserializing kubernetes object: %v (%v)", err, s)
}
objects = append(objects, o)
}
return objects, nil
}
// DisableAuthorizationForSecret makes the authorization check always pass. Should be used only for tests.
func DisableAuthorizationForSecret(fake *fake.Clientset) {
fake.Fake.PrependReactor("create", "subjectaccessreviews", func(action k8stesting.Action) (bool, runtime.Object, error) {
return true, &authorizationv1.SubjectAccessReview{
Status: authorizationv1.SubjectAccessReviewStatus{
Allowed: true,
},
}, nil
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xdstest
import (
"fmt"
"sort"
"testing"
endpointv3 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
)
type LbEpInfo struct {
Address string
// nolint: structcheck
Weight uint32
}
type LocLbEpInfo struct {
LbEps []LbEpInfo
Weight uint32
}
func (i LocLbEpInfo) GetAddrs() []string {
addrs := make([]string, 0)
for _, ep := range i.LbEps {
addrs = append(addrs, ep.Address)
}
return addrs
}
func CompareEndpointsOrFail(t *testing.T, cluster string, got []*endpointv3.LocalityLbEndpoints, want []LocLbEpInfo) {
if err := CompareEndpoints(cluster, got, want); err != nil {
t.Error(err)
}
}
func CompareEndpoints(cluster string, got []*endpointv3.LocalityLbEndpoints, want []LocLbEpInfo) error {
if len(got) != len(want) {
return fmt.Errorf("unexpected number of filtered endpoints for %s: got %v, want %v", cluster, len(got), len(want))
}
sort.Slice(got, func(i, j int) bool {
addrI := got[i].LbEndpoints[0].GetEndpoint().Address.GetSocketAddress().Address
addrJ := got[j].LbEndpoints[0].GetEndpoint().Address.GetSocketAddress().Address
return addrI < addrJ
})
for i, ep := range got {
if len(ep.LbEndpoints) != len(want[i].LbEps) {
return fmt.Errorf("unexpected number of LB endpoints within endpoint %d: %v, want %v",
i, getLbEndpointAddrs(ep), want[i].GetAddrs())
}
if ep.LoadBalancingWeight.GetValue() != want[i].Weight {
return fmt.Errorf("unexpected weight for endpoint %d: got %v, want %v", i, ep.LoadBalancingWeight.GetValue(), want[i].Weight)
}
for _, lbEp := range ep.LbEndpoints {
addr := lbEp.GetEndpoint().Address.GetSocketAddress().Address
found := false
for _, wantLbEp := range want[i].LbEps {
if addr == wantLbEp.Address {
found = true
// Now compare the weight.
if lbEp.GetLoadBalancingWeight().Value != wantLbEp.Weight {
return fmt.Errorf("unexpected weight for endpoint %s: got %v, want %v",
addr, lbEp.GetLoadBalancingWeight().Value, wantLbEp.Weight)
}
break
}
}
if !found {
return fmt.Errorf("unexpected address for endpoint %d: %v", i, addr)
}
}
}
return nil
}
func getLbEndpointAddrs(ep *endpointv3.LocalityLbEndpoints) []string {
addrs := make([]string, 0)
for _, lbEp := range ep.LbEndpoints {
addrs = append(addrs, lbEp.GetEndpoint().Address.GetSocketAddress().Address)
}
return addrs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xdstest
import (
"fmt"
"reflect"
"sort"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
tcpproxy "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/tcp_proxy/v3"
tls "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/anypb"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/util/protoconv"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/test"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/wellknown"
)
func ExtractResource(res model.Resources) sets.String {
s := sets.New[string]()
for _, v := range res {
s.Insert(v.Name)
}
return s
}
func ExtractRoutesFromListeners(ll []*listener.Listener) []string {
routes := []string{}
for _, l := range ll {
for _, fc := range l.FilterChains {
for _, filter := range fc.Filters {
if filter.Name == wellknown.HTTPConnectionManager {
h := SilentlyUnmarshalAny[hcm.HttpConnectionManager](filter.GetTypedConfig())
switch r := h.GetRouteSpecifier().(type) {
case *hcm.HttpConnectionManager_Rds:
routes = append(routes, r.Rds.RouteConfigName)
}
}
}
}
}
return routes
}
func ExtractClusterSecretResources(t test.Failer, c *cluster.Cluster) []string {
resourceNames := sets.New[string]()
var sockets []*core.TransportSocket
if c.TransportSocket != nil {
sockets = append(sockets, c.TransportSocket)
}
for _, ts := range c.TransportSocketMatches {
sockets = append(sockets, ts.TransportSocket)
}
for _, s := range sockets {
if s.GetTypedConfig().TypeUrl != TypeName[*tls.UpstreamTlsContext]() {
continue
}
tl := UnmarshalAny[tls.UpstreamTlsContext](t, s.GetTypedConfig())
resourceNames.Insert(tl.GetCommonTlsContext().GetCombinedValidationContext().GetValidationContextSdsSecretConfig().GetName())
for _, s := range tl.GetCommonTlsContext().GetTlsCertificateSdsSecretConfigs() {
resourceNames.Insert(s.GetName())
}
}
return resourceNames.UnsortedList()
}
func ExtractListenerSecretResources(t test.Failer, l *listener.Listener) []string {
resourceNames := sets.New[string]()
var sockets []*core.TransportSocket
for _, fc := range l.GetFilterChains() {
if fc.GetTransportSocket() != nil {
sockets = append(sockets, fc.GetTransportSocket())
}
}
if ts := l.GetDefaultFilterChain().GetTransportSocket(); ts != nil {
sockets = append(sockets, ts)
}
for _, s := range sockets {
tl := UnmarshalAny[tls.DownstreamTlsContext](t, s.GetTypedConfig())
resourceNames.Insert(tl.GetCommonTlsContext().GetCombinedValidationContext().GetValidationContextSdsSecretConfig().GetName())
for _, s := range tl.GetCommonTlsContext().GetTlsCertificateSdsSecretConfigs() {
resourceNames.Insert(s.GetName())
}
}
return resourceNames.UnsortedList()
}
// ExtractSecretResources fetches all referenced SDS resource names from a list of clusters and listeners
func ExtractSecretResources(t test.Failer, rs []*anypb.Any) []string {
resourceNames := sets.New[string]()
for _, r := range rs {
switch r.TypeUrl {
case v3.ClusterType:
c := UnmarshalAny[cluster.Cluster](t, r)
sockets := []*core.TransportSocket{}
if c.TransportSocket != nil {
sockets = append(sockets, c.TransportSocket)
}
for _, ts := range c.TransportSocketMatches {
sockets = append(sockets, ts.TransportSocket)
}
for _, s := range sockets {
tl := UnmarshalAny[tls.UpstreamTlsContext](t, s.GetTypedConfig())
resourceNames.Insert(tl.GetCommonTlsContext().GetCombinedValidationContext().GetValidationContextSdsSecretConfig().GetName())
for _, s := range tl.GetCommonTlsContext().GetTlsCertificateSdsSecretConfigs() {
resourceNames.Insert(s.GetName())
}
}
case v3.ListenerType:
l := UnmarshalAny[listener.Listener](t, r)
sockets := []*core.TransportSocket{}
for _, fc := range l.GetFilterChains() {
if fc.GetTransportSocket() != nil {
sockets = append(sockets, fc.GetTransportSocket())
}
}
if ts := l.GetDefaultFilterChain().GetTransportSocket(); ts != nil {
sockets = append(sockets, ts)
}
for _, s := range sockets {
tl := UnmarshalAny[tls.DownstreamTlsContext](t, s.GetTypedConfig())
resourceNames.Insert(tl.GetCommonTlsContext().GetCombinedValidationContext().GetValidationContextSdsSecretConfig().GetName())
for _, s := range tl.GetCommonTlsContext().GetTlsCertificateSdsSecretConfigs() {
resourceNames.Insert(s.GetName())
}
}
}
}
resourceNames.Delete("")
ls := resourceNames.UnsortedList()
sort.Sort(sort.Reverse(sort.StringSlice(ls)))
return ls
}
func ExtractListenerNames(ll []*listener.Listener) []string {
res := []string{}
for _, l := range ll {
res = append(res, l.Name)
}
return res
}
func SilentlyUnmarshalAny[T any](a *anypb.Any) *T {
dst := any(new(T)).(proto.Message)
if err := a.UnmarshalTo(dst); err != nil {
var z *T
return z
}
return any(dst).(*T)
}
func UnmarshalAny[T any](t test.Failer, a *anypb.Any) *T {
dst := any(new(T)).(proto.Message)
if err := a.UnmarshalTo(dst); err != nil {
t.Fatalf("failed to unmarshal to %T: %v", dst, err)
}
return any(dst).(*T)
}
func ExtractListener(name string, ll []*listener.Listener) *listener.Listener {
for _, l := range ll {
if l.Name == name {
return l
}
}
return nil
}
func ExtractVirtualHosts(rc *route.RouteConfiguration) map[string][]string {
res := map[string][]string{}
for _, vh := range rc.GetVirtualHosts() {
var dests []string
for _, r := range vh.Routes {
if dc := r.GetRoute().GetCluster(); dc != "" {
dests = append(dests, dc)
}
}
sort.Strings(dests)
for _, d := range vh.Domains {
res[d] = dests
}
}
return res
}
func ExtractRouteConfigurations(rc []*route.RouteConfiguration) map[string]*route.RouteConfiguration {
res := map[string]*route.RouteConfiguration{}
for _, l := range rc {
res[l.Name] = l
}
return res
}
func ExtractListenerFilters(l *listener.Listener) map[string]*listener.ListenerFilter {
res := map[string]*listener.ListenerFilter{}
for _, lf := range l.ListenerFilters {
res[lf.Name] = lf
}
return res
}
func ExtractFilterChain(name string, l *listener.Listener) *listener.FilterChain {
for _, f := range l.GetFilterChains() {
if f.GetName() == name {
return f
}
}
return nil
}
func ExtractFilterChainNames(l *listener.Listener) []string {
res := []string{}
for _, f := range l.GetFilterChains() {
res = append(res, f.GetName())
}
return res
}
func ExtractFilterNames(t test.Failer, fcs *listener.FilterChain) ([]string, []string) {
nwFilters := []string{}
httpFilters := []string{}
for _, fc := range fcs.Filters {
if fc.Name == wellknown.HTTPConnectionManager {
h := &hcm.HttpConnectionManager{}
if fc.GetTypedConfig() != nil {
if err := fc.GetTypedConfig().UnmarshalTo(h); err != nil {
t.Fatalf("failed to unmarshal hcm: %v", err)
}
}
for _, hf := range h.HttpFilters {
httpFilters = append(httpFilters, hf.Name)
}
}
nwFilters = append(nwFilters, fc.Name)
}
return nwFilters, httpFilters
}
func ExtractTCPProxy(t test.Failer, fcs *listener.FilterChain) *tcpproxy.TcpProxy {
for _, fc := range fcs.Filters {
if fc.Name == wellknown.TCPProxy {
tcpProxy := &tcpproxy.TcpProxy{}
if fc.GetTypedConfig() != nil {
if err := fc.GetTypedConfig().UnmarshalTo(tcpProxy); err != nil {
t.Fatalf("failed to unmarshal tcp proxy: %v", err)
}
}
return tcpProxy
}
}
return nil
}
func ExtractHTTPConnectionManager(t test.Failer, fcs *listener.FilterChain) *hcm.HttpConnectionManager {
for _, fc := range fcs.Filters {
if fc.Name == wellknown.HTTPConnectionManager {
h := &hcm.HttpConnectionManager{}
if fc.GetTypedConfig() != nil {
if err := fc.GetTypedConfig().UnmarshalTo(h); err != nil {
t.Fatalf("failed to unmarshal hcm: %v", err)
}
}
return h
}
}
return nil
}
func ExtractLocalityLbEndpoints(cla []*endpoint.ClusterLoadAssignment) map[string][]*endpoint.LocalityLbEndpoints {
got := map[string][]*endpoint.LocalityLbEndpoints{}
for _, cla := range cla {
if cla == nil {
continue
}
got[cla.ClusterName] = cla.Endpoints
}
return got
}
func ExtractLoadAssignments(cla []*endpoint.ClusterLoadAssignment) map[string][]string {
got := map[string][]string{}
for _, cla := range cla {
if cla == nil {
continue
}
got[cla.ClusterName] = append(got[cla.ClusterName], ExtractEndpoints(cla)...)
}
return got
}
// ExtractHealthEndpoints returns all health and unhealth endpoints
func ExtractHealthEndpoints(cla *endpoint.ClusterLoadAssignment) ([]string, []string) {
if cla == nil {
return nil, nil
}
healthy := []string{}
unhealthy := []string{}
for _, ep := range cla.Endpoints {
for _, lb := range ep.LbEndpoints {
var addrString string
switch lb.GetEndpoint().GetAddress().Address.(type) {
case *core.Address_SocketAddress:
addrString = fmt.Sprintf("%s:%d",
lb.GetEndpoint().Address.GetSocketAddress().Address, lb.GetEndpoint().Address.GetSocketAddress().GetPortValue())
case *core.Address_Pipe:
addrString = lb.GetEndpoint().Address.GetPipe().Path
case *core.Address_EnvoyInternalAddress:
internalAddr := lb.GetEndpoint().Address.GetEnvoyInternalAddress().GetServerListenerName()
destinationAddr := lb.GetMetadata().GetFilterMetadata()[util.OriginalDstMetadataKey].GetFields()["local"].GetStringValue()
addrString = fmt.Sprintf("%s;%s", internalAddr, destinationAddr)
}
if lb.HealthStatus == core.HealthStatus_HEALTHY {
healthy = append(healthy, addrString)
} else {
unhealthy = append(unhealthy, addrString)
}
}
}
return healthy, unhealthy
}
// ExtractEndpoints returns all endpoints in the load assignment (including unhealthy endpoints)
func ExtractEndpoints(cla *endpoint.ClusterLoadAssignment) []string {
h, uh := ExtractHealthEndpoints(cla)
h = append(h, uh...)
return h
}
func ExtractClusters(cc []*cluster.Cluster) map[string]*cluster.Cluster {
res := map[string]*cluster.Cluster{}
for _, c := range cc {
res[c.Name] = c
}
return res
}
func ExtractCluster(name string, cc []*cluster.Cluster) *cluster.Cluster {
return ExtractClusters(cc)[name]
}
func ExtractClusterEndpoints(clusters []*cluster.Cluster) map[string][]string {
cla := []*endpoint.ClusterLoadAssignment{}
for _, c := range clusters {
cla = append(cla, c.LoadAssignment)
}
return ExtractLoadAssignments(cla)
}
func ExtractEdsClusterNames(cl []*cluster.Cluster) []string {
res := []string{}
for _, c := range cl {
switch v := c.ClusterDiscoveryType.(type) {
case *cluster.Cluster_Type:
if v.Type != cluster.Cluster_EDS {
continue
}
default:
continue
}
res = append(res, c.Name)
}
return res
}
func ExtractTLSSecrets(t test.Failer, secrets []*anypb.Any) map[string]*tls.Secret {
res := map[string]*tls.Secret{}
for _, a := range secrets {
scrt := UnmarshalAny[tls.Secret](t, a)
res[scrt.Name] = scrt
}
return res
}
func UnmarshalRouteConfiguration(t test.Failer, resp []*anypb.Any) []*route.RouteConfiguration {
un := make([]*route.RouteConfiguration, 0, len(resp))
for _, r := range resp {
u := &route.RouteConfiguration{}
if err := r.UnmarshalTo(u); err != nil {
t.Fatal(err)
}
un = append(un, u)
}
return un
}
func UnmarshalClusterLoadAssignment(t test.Failer, resp []*anypb.Any) []*endpoint.ClusterLoadAssignment {
un := make([]*endpoint.ClusterLoadAssignment, 0, len(resp))
for _, r := range resp {
u := &endpoint.ClusterLoadAssignment{}
if err := r.UnmarshalTo(u); err != nil {
t.Fatal(err)
}
un = append(un, u)
}
return un
}
func FilterClusters(cl []*cluster.Cluster, f func(c *cluster.Cluster) bool) []*cluster.Cluster {
res := make([]*cluster.Cluster, 0, len(cl))
for _, c := range cl {
if f(c) {
res = append(res, c)
}
}
return res
}
func ToDiscoveryResponse[T proto.Message](p []T) *discovery.DiscoveryResponse {
resources := make([]*anypb.Any, 0, len(p))
for _, v := range p {
resources = append(resources, protoconv.MessageToAny(v))
}
return &discovery.DiscoveryResponse{
Resources: resources,
TypeUrl: resources[0].TypeUrl,
}
}
// DumpList will dump a list of protos.
func DumpList[T any](t test.Failer, protoList []T) []string {
res := []string{}
for _, i := range protoList {
p, ok := any(i).(proto.Message)
if !ok {
t.Fatalf("expected proto, got %T", i)
}
res = append(res, Dump(t, p))
}
return res
}
func Dump(t test.Failer, p proto.Message) string {
v := reflect.ValueOf(p)
if p == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
return "nil"
}
s, err := protomarshal.ToJSONWithIndent(p, " ")
if err != nil {
t.Fatal(err)
}
return s
}
func MapKeys[M ~map[string]V, V any](mp M) []string {
res := maps.Keys(mp)
sort.Strings(res)
return res
}
func TypeName[T proto.Message]() string {
ft := new(T)
return resource.APITypePrefix + string((*ft).ProtoReflect().Descriptor().FullName())
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xdstest
import (
"context"
"time"
"google.golang.org/grpc"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/sleep"
)
type slowClientStream struct {
grpc.ClientStream
recv, send time.Duration
}
func (w *slowClientStream) RecvMsg(m any) error {
if w.recv > 0 {
sleep.UntilContext(w.Context(), w.recv)
log.Infof("delayed recv for %v", w.recv)
}
return w.ClientStream.RecvMsg(m)
}
func (w *slowClientStream) SendMsg(m any) error {
if w.send > 0 {
sleep.UntilContext(w.Context(), w.send)
log.Infof("delayed send for %v", w.send)
}
return w.ClientStream.SendMsg(m)
}
// SlowClientInterceptor is an interceptor that allows injecting delays on Send and Recv
func SlowClientInterceptor(recv, send time.Duration) grpc.StreamClientInterceptor {
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn,
method string, streamer grpc.Streamer, opts ...grpc.CallOption,
) (grpc.ClientStream, error) {
clientStream, err := streamer(ctx, desc, cc, method, opts...)
return &slowClientStream{clientStream, recv, send}, err
}
}
type slowServerStream struct {
grpc.ServerStream
recv, send time.Duration
}
func (w *slowServerStream) RecvMsg(m any) error {
if w.recv > 0 {
sleep.UntilContext(w.Context(), w.recv)
log.Infof("delayed recv for %v", w.recv)
}
return w.ServerStream.RecvMsg(m)
}
func (w *slowServerStream) SendMsg(m any) error {
if w.send > 0 {
sleep.UntilContext(w.Context(), w.send)
log.Infof("delayed send for %v", w.send)
}
return w.ServerStream.SendMsg(m)
}
// SlowServerInterceptor is an interceptor that allows injecting delays on Send and Recv
func SlowServerInterceptor(recv, send time.Duration) grpc.StreamServerInterceptor {
return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
return handler(srv, &slowServerStream{ss, recv, send})
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xdstest
import (
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"google.golang.org/grpc"
"google.golang.org/grpc/test/bufconn"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/test"
)
// MockDiscovery is a DiscoveryServer that allows users full control over responses.
type MockDiscovery struct {
Listener *bufconn.Listener
responses chan *discovery.DiscoveryResponse
deltaResponses chan *discovery.DeltaDiscoveryResponse
close chan struct{}
}
func NewMockServer(t test.Failer) *MockDiscovery {
s := &MockDiscovery{
close: make(chan struct{}),
responses: make(chan *discovery.DiscoveryResponse),
deltaResponses: make(chan *discovery.DeltaDiscoveryResponse),
}
buffer := 1024 * 1024
listener := bufconn.Listen(buffer)
grpcServer := grpc.NewServer()
discovery.RegisterAggregatedDiscoveryServiceServer(grpcServer, s)
go func() {
if err := grpcServer.Serve(listener); err != nil && !(err == grpc.ErrServerStopped || err.Error() == "closed") {
t.Fatal(err)
}
}()
t.Cleanup(func() {
grpcServer.Stop()
close(s.close)
})
s.Listener = listener
return s
}
func (f *MockDiscovery) StreamAggregatedResources(server discovery.AggregatedDiscoveryService_StreamAggregatedResourcesServer) error {
numberOfSends := 0
for {
select {
case <-f.close:
return nil
case resp := <-f.responses:
numberOfSends++
log.Infof("sending response from mock: %v", numberOfSends)
if err := server.Send(resp); err != nil {
return err
}
}
}
}
func (f *MockDiscovery) DeltaAggregatedResources(server discovery.AggregatedDiscoveryService_DeltaAggregatedResourcesServer) error {
numberOfSends := 0
for {
select {
case <-f.close:
return nil
case resp := <-f.deltaResponses:
numberOfSends++
log.Infof("sending delta response from mock: %v", numberOfSends)
if err := server.Send(resp); err != nil {
return err
}
}
}
}
// SendResponse sends a response to a (random) client. This can block if sends are blocked.
func (f *MockDiscovery) SendResponse(dr *discovery.DiscoveryResponse) {
f.responses <- dr
}
// SendDeltaResponse sends a response to a (random) client. This can block if sends are blocked.
func (f *MockDiscovery) SendDeltaResponse(dr *discovery.DeltaDiscoveryResponse) {
f.deltaResponses <- dr
}
var _ discovery.AggregatedDiscoveryServiceServer = &MockDiscovery{}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xdstest
import (
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
)
// EvaluateListenerFilterPredicates runs through the ListenerFilterChainMatchPredicate logic
// This is exposed for testing only, and should not be used in XDS generation code
func EvaluateListenerFilterPredicates(predicate *listener.ListenerFilterChainMatchPredicate, port int) bool {
if predicate == nil {
return true
}
switch r := predicate.Rule.(type) {
case *listener.ListenerFilterChainMatchPredicate_NotMatch:
return !EvaluateListenerFilterPredicates(r.NotMatch, port)
case *listener.ListenerFilterChainMatchPredicate_OrMatch:
matches := false
for _, r := range r.OrMatch.Rules {
matches = matches || EvaluateListenerFilterPredicates(r, port)
}
return matches
case *listener.ListenerFilterChainMatchPredicate_DestinationPortRange:
return int32(port) >= r.DestinationPortRange.GetStart() && int32(port) < r.DestinationPortRange.GetEnd()
default:
panic("unsupported predicate")
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xdstest
import (
"strings"
"testing"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
istio_route "istio.io/istio/pilot/pkg/networking/core/v1alpha3/route"
xdsfilters "istio.io/istio/pilot/pkg/xds/filters"
"istio.io/istio/pkg/util/sets"
)
func ValidateListeners(t testing.TB, ls []*listener.Listener) {
t.Helper()
found := sets.New[string]()
for _, l := range ls {
if found.InsertContains(l.Name) {
t.Errorf("duplicate listener name %v", l.Name)
}
ValidateListener(t, l)
}
}
func ValidateListener(t testing.TB, l *listener.Listener) {
t.Helper()
if err := l.Validate(); err != nil {
t.Errorf("listener %v is invalid: %v", l.Name, err)
}
validateInspector(t, l)
validateListenerTLS(t, l)
validateFilterChainMatch(t, l)
validateInboundListener(t, l)
validateListenerFilters(t, l)
}
func validateListenerFilters(t testing.TB, l *listener.Listener) {
found := sets.New[string]()
for _, lf := range l.GetListenerFilters() {
if found.InsertContains(lf.GetName()) {
// Technically legal in Envoy but should always be a bug when done in Istio based on our usage
t.Errorf("listener contains duplicate listener filter: %v", lf.GetName())
}
}
}
func validateInboundListener(t testing.TB, l *listener.Listener) {
if l.GetAddress().GetSocketAddress().GetPortValue() != 15006 {
// Not an inbound port
return
}
if l.GetTrafficDirection() != core.TrafficDirection_INBOUND {
// Not an inbound listener
return
}
for i, fc := range l.GetFilterChains() {
if fc.FilterChainMatch == nil {
t.Errorf("nil filter chain %d", i)
continue
}
if fc.FilterChainMatch.TransportProtocol == "" && fc.FilterChainMatch.GetDestinationPort().GetValue() != 15006 {
// Not setting transport protocol may lead to unexpected matching behavior due to https://github.com/istio/istio/issues/26079
// This is not *always* a bug, just a guideline - the 15006 blocker filter chain doesn't follow this rule and is exluced.
t.Errorf("filter chain %d had no transport protocol set", i)
}
}
}
func validateFilterChainMatch(t testing.TB, l *listener.Listener) {
t.Helper()
// Check for duplicate filter chains, to avoid "multiple filter chains with the same matching rules are defined" error
check := map[string]int{}
for i1, l1 := range l.FilterChains {
// We still create virtual inbound listeners before merging into single inbound
// This hack skips these ones, as they will be processed later
if hcm := ExtractHTTPConnectionManager(t, l1); strings.HasPrefix(hcm.GetStatPrefix(), "inbound_") && l.Name != "virtualInbound" {
continue
}
s := Dump(t, l1.FilterChainMatch)
if i2, ok := check[s]; ok {
var fcms []string
for _, fc := range l.FilterChains {
fcms = append(fcms, Dump(t, fc.GetFilterChainMatch()))
}
t.Errorf("overlapping filter chains %d and %d:\n%v\n Full listener: %v", i1, i2, strings.Join(fcms, ",\n"), Dump(t, l))
} else {
check[s] = i1
}
}
// Due to the trie based logic of FCM, an unset field is only a wildcard if no
// other FCM sets it. Therefore, we should ensure we explicitly set the FCM on
// all match clauses if its set on any other match clause See
// https://github.com/envoyproxy/envoy/issues/12572 for details
destPorts := sets.New[uint32]()
for _, fc := range l.FilterChains {
if fc.GetFilterChainMatch().GetDestinationPort() != nil {
destPorts.Insert(fc.GetFilterChainMatch().GetDestinationPort().GetValue())
}
}
for p := range destPorts {
hasTLSInspector := false
for _, fc := range l.FilterChains {
if p == fc.GetFilterChainMatch().GetDestinationPort().GetValue() && fc.GetFilterChainMatch().GetTransportProtocol() != "" {
hasTLSInspector = true
}
}
if hasTLSInspector {
for _, fc := range l.FilterChains {
if p == fc.GetFilterChainMatch().GetDestinationPort().GetValue() && fc.GetFilterChainMatch().GetTransportProtocol() == "" {
// Note: matches [{transport=tls},{}] and [{transport=tls},{transport=buffer}]
// are equivalent, so technically this error is overly sensitive. However, for
// more complicated use cases its generally best to be explicit rather than
// assuming that {} will be treated as wildcard when in reality it may not be.
// Instead, we should explicitly double the filter chain (one for raw buffer, one
// for TLS)
t.Errorf("filter chain should have transport protocol set for port %v: %v", p, Dump(t, fc))
}
}
}
}
}
func validateListenerTLS(t testing.TB, l *listener.Listener) {
t.Helper()
for _, fc := range l.FilterChains {
m := fc.FilterChainMatch
if m == nil {
continue
}
// if we are matching TLS traffic and doing HTTP traffic, we must terminate the TLS
if m.TransportProtocol == xdsfilters.TLSTransportProtocol && fc.TransportSocket == nil && ExtractHTTPConnectionManager(t, fc) != nil {
t.Errorf("listener %v is invalid: tls traffic may not be terminated: %v", l.Name, Dump(t, fc))
}
}
}
// Validate a tls inspect filter is added whenever it is needed
// matches logic in https://github.com/envoyproxy/envoy/blob/22683a0a24ffbb0cdeb4111eec5ec90246bec9cb/source/server/listener_impl.cc#L41
func validateInspector(t testing.TB, l *listener.Listener) {
t.Helper()
for _, lf := range l.ListenerFilters {
if lf.Name == xdsfilters.TLSInspector.Name {
return
}
}
for _, fc := range l.FilterChains {
m := fc.FilterChainMatch
if fc.FilterChainMatch == nil {
continue
}
if m.TransportProtocol == xdsfilters.TLSTransportProtocol {
t.Errorf("transport protocol set, but missing tls inspector: %v", Dump(t, l))
}
if m.TransportProtocol == "" && len(m.ServerNames) > 0 {
t.Errorf("server names set, but missing tls inspector: %v", Dump(t, l))
}
// This is a bit suspect; I suspect this could be done with just http inspector without tls inspector,
// but this mirrors Envoy validation logic
if m.TransportProtocol == "" && len(m.ApplicationProtocols) > 0 {
t.Errorf("application protocol set, but missing tls inspector: %v", Dump(t, l))
}
}
}
func ValidateClusters(t testing.TB, ls []*cluster.Cluster) {
found := sets.New[string]()
for _, l := range ls {
if found.Contains(l.Name) {
t.Errorf("duplicate cluster name %v", l.Name)
}
found.Insert(l.Name)
ValidateCluster(t, l)
}
}
func ValidateCluster(t testing.TB, c *cluster.Cluster) {
if err := c.Validate(); err != nil {
t.Errorf("cluster %v is invalid: %v", c.Name, err)
}
validateClusterTLS(t, c)
}
func validateClusterTLS(t testing.TB, c *cluster.Cluster) {
if c.TransportSocket != nil && c.TransportSocketMatches != nil {
t.Errorf("both transport_socket and transport_socket_matches set for %v", c)
}
}
func ValidateRoutes(t testing.TB, ls []*route.Route) {
for _, l := range ls {
ValidateRoute(t, l)
}
}
func ValidateRoute(t testing.TB, r *route.Route) {
if err := r.Validate(); err != nil {
t.Errorf("route %v is invalid: %v", r.Name, err)
}
}
func ValidateRouteConfigurations(t testing.TB, ls []*route.RouteConfiguration) {
found := sets.New[string]()
for _, l := range ls {
if found.InsertContains(l.Name) {
t.Errorf("duplicate route config name %v", l.Name)
}
ValidateRouteConfiguration(t, l)
}
}
func ValidateRouteConfiguration(t testing.TB, l *route.RouteConfiguration) {
t.Helper()
if err := l.Validate(); err != nil {
t.Errorf("route configuration %v is invalid: %v", l.Name, err)
}
if l.MaxDirectResponseBodySizeBytes.Value != istio_route.DefaultMaxDirectResponseBodySizeBytes.Value {
t.Errorf("expected MaxDirectResponseBodySizeBytes %v, got %v",
istio_route.DefaultMaxDirectResponseBodySizeBytes, l.MaxDirectResponseBodySizeBytes)
}
validateRouteConfigurationDomains(t, l)
}
func validateRouteConfigurationDomains(t testing.TB, l *route.RouteConfiguration) {
t.Helper()
vhosts := sets.New[string]()
domains := sets.New[string]()
for _, vhost := range l.VirtualHosts {
if vhosts.InsertContains(vhost.Name) {
t.Errorf("duplicate virtual host found %s", vhost.Name)
}
for _, domain := range vhost.Domains {
if domains.InsertContains(domain) {
t.Errorf("duplicate virtual host domain found %s", domain)
}
}
}
}
func ValidateClusterLoadAssignments(t testing.TB, ls []*endpoint.ClusterLoadAssignment) {
for _, l := range ls {
ValidateClusterLoadAssignment(t, l)
}
}
func ValidateClusterLoadAssignment(t testing.TB, l *endpoint.ClusterLoadAssignment) {
if err := l.Validate(); err != nil {
t.Errorf("cluster load assignment %v is invalid: %v", l.ClusterName, err)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adsc
import (
"context"
"crypto/tls"
"crypto/x509"
"encoding/json"
"fmt"
"math"
"net"
"os"
"sort"
"strings"
"sync"
"time"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"github.com/envoyproxy/go-control-plane/pkg/conversion"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/protobuf/proto"
anypb "google.golang.org/protobuf/types/known/anypb"
pstruct "google.golang.org/protobuf/types/known/structpb"
mcp "istio.io/api/mcp/v1alpha1"
"istio.io/api/mesh/v1alpha1"
mem "istio.io/istio/pilot/pkg/config/memory"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/serviceregistry/memory"
"istio.io/istio/pilot/pkg/util/network"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/backoff"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/wellknown"
)
const (
defaultClientMaxReceiveMessageSize = math.MaxInt32
defaultInitialConnWindowSize = 1024 * 1024 // default gRPC InitialWindowSize
defaultInitialWindowSize = 1024 * 1024 // default gRPC ConnWindowSize
)
type Config struct {
// Address of the xDS server
Address string
// Namespace defaults to 'default'
Namespace string
// Workload defaults to 'test'
Workload string
// Revision for this control plane instance. We will only read configs that match this revision.
Revision string
// Meta includes additional metadata for the node
Meta *pstruct.Struct
Locality *core.Locality
// NodeType defaults to sidecar. "ingress" and "router" are also supported.
NodeType model.NodeType
// IP is currently the primary key used to locate inbound configs. It is sent by client,
// must match a known endpoint IP. Tests can use a ServiceEntry to register fake IPs.
IP string
// CertDir is the directory where mTLS certs are configured.
// If CertDir and Secret are empty, an insecure connection will be used.
// TODO: implement SecretManager for cert dir
CertDir string
// Secrets is the interface used for getting keys and rootCA.
SecretManager security.SecretManager
// For getting the certificate, using same code as SDS server.
// Either the JWTPath or the certs must be present.
JWTPath string
// XDSSAN is the expected SAN of the XDS server. If not set, the ProxyConfig.DiscoveryAddress is used.
XDSSAN string
// XDSRootCAFile explicitly set the root CA to be used for the XDS connection.
// Mirrors Envoy file.
XDSRootCAFile string
// RootCert contains the XDS root certificate. Used mainly for tests, apps will normally use
// XDSRootCAFile
RootCert []byte
// InsecureSkipVerify skips client verification the server's certificate chain and host name.
InsecureSkipVerify bool
// BackoffPolicy determines the reconnect policy. Based on MCP client.
BackoffPolicy backoff.BackOff
GrpcOpts []grpc.DialOption
}
// ADSConfig for the ADS connection.
type ADSConfig struct {
Config
// InitialDiscoveryRequests is a list of resources to watch at first, represented as URLs (for new XDS resource naming)
// or type URLs.
InitialDiscoveryRequests []*discovery.DiscoveryRequest
// ResponseHandler will be called on each DiscoveryResponse.
// TODO: mirror Generator, allow adding handler per type
ResponseHandler ResponseHandler
}
func defaultGrpcDialOptions() []grpc.DialOption {
return []grpc.DialOption{
// TODO(SpecialYang) maybe need to make it configurable.
grpc.WithInitialWindowSize(int32(defaultInitialWindowSize)),
grpc.WithInitialConnWindowSize(int32(defaultInitialConnWindowSize)),
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaultClientMaxReceiveMessageSize)),
}
}
// ADSC implements a basic client for ADS, for use in stress tests and tools
// or libraries that need to connect to Istio pilot or other ADS servers.
type ADSC struct {
// Stream is the GRPC connection stream, allowing direct GRPC send operations.
// Set after Dial is called.
stream discovery.AggregatedDiscoveryService_StreamAggregatedResourcesClient
// xds client used to create a stream
client discovery.AggregatedDiscoveryServiceClient
conn *grpc.ClientConn
// Indicates if the ADSC client is closed
closed bool
// NodeID is the node identity sent to Pilot.
nodeID string
watchTime time.Time
// InitialLoad tracks the time to receive the initial configuration.
InitialLoad time.Duration
// httpListeners contains received listeners with a http_connection_manager filter.
httpListeners map[string]*listener.Listener
// tcpListeners contains all listeners of type TCP (not-HTTP)
tcpListeners map[string]*listener.Listener
// All received clusters of type eds, keyed by name
edsClusters map[string]*cluster.Cluster
// All received clusters of no-eds type, keyed by name
clusters map[string]*cluster.Cluster
// All received routes, keyed by route name
routes map[string]*route.RouteConfiguration
// All received endpoints, keyed by cluster name
eds map[string]*endpoint.ClusterLoadAssignment
// Metadata has the node metadata to send to pilot.
// If nil, the defaults will be used.
Metadata *pstruct.Struct
// Updates includes the type of the last update received from the server.
Updates chan string
errChan chan error
XDSUpdates chan *discovery.DiscoveryResponse
VersionInfo map[string]string
// Last received message, by type
Received map[string]*discovery.DiscoveryResponse
mutex sync.RWMutex
Mesh *v1alpha1.MeshConfig
// Retrieved configurations can be stored using the common istio model interface.
Store model.ConfigStore
// Retrieved endpoints can be stored in the memory registry. This is used for CDS and EDS responses.
Registry *memory.ServiceDiscovery
// LocalCacheDir is set to a base name used to save fetched resources.
// If set, each update will be saved.
// TODO: also load at startup - so we can support warm up in init-container, and survive
// restarts.
LocalCacheDir string
cfg *ADSConfig
// sendNodeMeta is set to true if the connection is new - and we need to send node meta.,
sendNodeMeta bool
sync map[string]time.Time
Locality *core.Locality
}
type ResponseHandler interface {
HandleResponse(con *ADSC, response *discovery.DiscoveryResponse)
}
// jsonMarshalProtoWithName wraps a proto.Message with name so it can be marshaled with the standard encoding/json library
type jsonMarshalProtoWithName struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Message proto.Message
}
func (p jsonMarshalProtoWithName) MarshalJSON() ([]byte, error) {
strSer, serr := protomarshal.ToJSONWithIndent(p.Message, " ")
if serr != nil {
adscLog.Warnf("Error for marshaling [%s]: %v", p.Name, serr)
return []byte(""), serr
}
serialItem := []byte("{\"" + p.Name + "\":" + strSer + "}")
return serialItem, nil
}
var adscLog = log.RegisterScope("adsc", "adsc debugging")
func NewWithBackoffPolicy(discoveryAddr string, opts *ADSConfig, backoffPolicy backoff.BackOff) (*ADSC, error) {
adsc, err := New(discoveryAddr, opts)
if err != nil {
return nil, err
}
adsc.cfg.BackoffPolicy = backoffPolicy
return adsc, err
}
// New creates a new ADSC, maintaining a connection to an XDS server.
// Will:
// - get certificate using the Secret provider, if CertRequired
// - connect to the XDS server specified in ProxyConfig
// - send initial request for watched resources
// - wait for response from XDS server
// - on success, start a background thread to maintain the connection, with exp. backoff.
func New(discoveryAddr string, opts *ADSConfig) (*ADSC, error) {
if opts == nil {
opts = &ADSConfig{}
}
opts.Config = setDefaultConfig(&opts.Config)
opts.Address = discoveryAddr
adsc := &ADSC{
Updates: make(chan string, 100),
XDSUpdates: make(chan *discovery.DiscoveryResponse, 100),
VersionInfo: map[string]string{},
Received: map[string]*discovery.DiscoveryResponse{},
cfg: opts,
sync: map[string]time.Time{},
errChan: make(chan error, 10),
}
adsc.Metadata = opts.Meta
adsc.Locality = opts.Locality
adsc.nodeID = nodeID(&adsc.cfg.Config)
if err := adsc.Dial(); err != nil {
return nil, err
}
return adsc, nil
}
func setDefaultConfig(config *Config) Config {
if config == nil {
config = &Config{}
}
if config.Namespace == "" {
config.Namespace = "default"
}
if config.NodeType == "" {
config.NodeType = model.SidecarProxy
}
if config.IP == "" {
ips, ok := network.GetPrivateIPsIfAvailable()
if ok && len(ips) > 0 {
config.IP = ips[0]
}
}
if config.Workload == "" {
config.Workload = "test-1"
}
if config.BackoffPolicy == nil {
config.BackoffPolicy = backoff.NewExponentialBackOff(backoff.DefaultOption())
}
return *config
}
// Dial connects to a ADS server, with optional MTLS authentication if a cert dir is specified.
func (a *ADSC) Dial() error {
conn, err := dialWithConfig(&a.cfg.Config)
if err != nil {
return err
}
a.conn = conn
return nil
}
func dialWithConfig(config *Config) (*grpc.ClientConn, error) {
defaultGrpcDialOptions := defaultGrpcDialOptions()
var grpcDialOptions []grpc.DialOption
grpcDialOptions = append(grpcDialOptions, defaultGrpcDialOptions...)
grpcDialOptions = append(grpcDialOptions, config.GrpcOpts...)
var err error
// If we need MTLS - CertDir or Secrets provider is set.
if len(config.CertDir) > 0 || config.SecretManager != nil {
tlsCfg, err := tlsConfig(config)
if err != nil {
return nil, err
}
creds := credentials.NewTLS(tlsCfg)
grpcDialOptions = append(grpcDialOptions, grpc.WithTransportCredentials(creds))
}
if len(grpcDialOptions) == len(defaultGrpcDialOptions) {
// Only disable transport security if the user didn't supply custom dial options
grpcDialOptions = append(grpcDialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
conn, err := grpc.Dial(config.Address, grpcDialOptions...)
if err != nil {
return nil, err
}
return conn, nil
}
func tlsConfig(config *Config) (*tls.Config, error) {
var clientCerts []tls.Certificate
var serverCABytes []byte
var err error
getClientCertificate := getClientCertFn(config)
// Load the root CAs
if config.RootCert != nil {
serverCABytes = config.RootCert
} else if config.XDSRootCAFile != "" {
serverCABytes, err = os.ReadFile(config.XDSRootCAFile)
if err != nil {
return nil, err
}
} else if config.SecretManager != nil {
// This is a bit crazy - we could just use the file
rootCA, err := config.SecretManager.GenerateSecret(security.RootCertReqResourceName)
if err != nil {
return nil, err
}
serverCABytes = rootCA.RootCert
} else if config.CertDir != "" {
serverCABytes, err = os.ReadFile(config.CertDir + "/root-cert.pem")
if err != nil {
return nil, err
}
}
serverCAs := x509.NewCertPool()
if ok := serverCAs.AppendCertsFromPEM(serverCABytes); !ok {
return nil, err
}
shost, _, _ := net.SplitHostPort(config.Address)
if config.XDSSAN != "" {
shost = config.XDSSAN
}
// nolint: gosec
// it's insecure only when a user explicitly enable insecure mode.
return &tls.Config{
GetClientCertificate: getClientCertificate,
Certificates: clientCerts,
RootCAs: serverCAs,
ServerName: shost,
InsecureSkipVerify: config.InsecureSkipVerify,
}, nil
}
// Close the stream.
func (a *ADSC) Close() {
a.mutex.Lock()
_ = a.conn.Close()
a.closed = true
a.mutex.Unlock()
}
// Run will create a new stream using the existing grpc client connection and send the initial xds requests.
// And then it will run a go routine receiving and handling xds response.
// Note: it is non blocking
func (a *ADSC) Run() error {
var err error
a.client = discovery.NewAggregatedDiscoveryServiceClient(a.conn)
a.stream, err = a.client.StreamAggregatedResources(context.Background())
if err != nil {
return err
}
a.sendNodeMeta = true
a.InitialLoad = 0
// Send the initial requests
for _, r := range a.cfg.InitialDiscoveryRequests {
if r.TypeUrl == v3.ClusterType {
a.watchTime = time.Now()
}
_ = a.Send(r)
}
go a.handleRecv()
return nil
}
// HasSynced returns true if MCP configs have synced
func (a *ADSC) HasSynced() bool {
if a.cfg == nil || len(a.cfg.InitialDiscoveryRequests) == 0 {
return true
}
a.mutex.RLock()
defer a.mutex.RUnlock()
for _, req := range a.cfg.InitialDiscoveryRequests {
_, isMCP := convertTypeURLToMCPGVK(req.TypeUrl)
if !isMCP {
continue
}
if _, ok := a.sync[req.TypeUrl]; !ok {
return false
}
}
return true
}
// reconnect will create a new stream
func (a *ADSC) reconnect() {
a.mutex.RLock()
if a.closed {
a.mutex.RUnlock()
return
}
a.mutex.RUnlock()
err := a.Run()
if err != nil {
// TODO: fix reconnect
time.AfterFunc(a.cfg.BackoffPolicy.NextBackOff(), a.reconnect)
}
}
func (a *ADSC) handleRecv() {
// We connected, so reset the backoff
if a.cfg.BackoffPolicy != nil {
a.cfg.BackoffPolicy.Reset()
}
for {
var err error
msg, err := a.stream.Recv()
if err != nil {
adscLog.Infof("Connection closed for node %v with err: %v", a.nodeID, err)
select {
case a.errChan <- err:
default:
}
// if 'reconnect' enabled - schedule a new Run
if a.cfg.BackoffPolicy != nil {
time.AfterFunc(a.cfg.BackoffPolicy.NextBackOff(), a.reconnect)
} else {
a.Close()
a.WaitClear()
a.Updates <- ""
a.XDSUpdates <- nil
close(a.errChan)
}
return
}
// Group-value-kind - used for high level api generator.
resourceGvk, isMCP := convertTypeURLToMCPGVK(msg.TypeUrl)
adscLog.WithLabels("type", msg.TypeUrl, "count", len(msg.Resources), "nonce", msg.Nonce).Info("Received")
if a.cfg.ResponseHandler != nil {
a.cfg.ResponseHandler.HandleResponse(a, msg)
}
if msg.TypeUrl == gvk.MeshConfig.String() &&
len(msg.Resources) > 0 {
rsc := msg.Resources[0]
m := &v1alpha1.MeshConfig{}
err = proto.Unmarshal(rsc.Value, m)
if err != nil {
adscLog.Warnf("Failed to unmarshal mesh config: %v", err)
}
a.Mesh = m
if a.LocalCacheDir != "" {
strResponse, err := protomarshal.ToJSONWithIndent(m, " ")
if err != nil {
continue
}
err = os.WriteFile(a.LocalCacheDir+"_mesh.json", []byte(strResponse), 0o644)
if err != nil {
continue
}
}
continue
}
// Process the resources.
a.VersionInfo[msg.TypeUrl] = msg.VersionInfo
switch msg.TypeUrl {
case v3.ListenerType:
listeners := make([]*listener.Listener, 0, len(msg.Resources))
for _, rsc := range msg.Resources {
valBytes := rsc.Value
ll := &listener.Listener{}
_ = proto.Unmarshal(valBytes, ll)
listeners = append(listeners, ll)
}
a.handleLDS(listeners)
case v3.ClusterType:
clusters := make([]*cluster.Cluster, 0, len(msg.Resources))
for _, rsc := range msg.Resources {
valBytes := rsc.Value
cl := &cluster.Cluster{}
_ = proto.Unmarshal(valBytes, cl)
clusters = append(clusters, cl)
}
a.handleCDS(clusters)
case v3.EndpointType:
eds := make([]*endpoint.ClusterLoadAssignment, 0, len(msg.Resources))
for _, rsc := range msg.Resources {
valBytes := rsc.Value
el := &endpoint.ClusterLoadAssignment{}
_ = proto.Unmarshal(valBytes, el)
eds = append(eds, el)
}
a.handleEDS(eds)
case v3.RouteType:
routes := make([]*route.RouteConfiguration, 0, len(msg.Resources))
for _, rsc := range msg.Resources {
valBytes := rsc.Value
rl := &route.RouteConfiguration{}
_ = proto.Unmarshal(valBytes, rl)
routes = append(routes, rl)
}
a.handleRDS(routes)
default:
if isMCP {
a.handleMCP(resourceGvk, msg.Resources)
}
}
// If we got no resource - still save to the store with empty name/namespace, to notify sync
// This scheme also allows us to chunk large responses !
// TODO: add hook to inject nacks
a.mutex.Lock()
if isMCP {
if _, exist := a.sync[resourceGvk.String()]; !exist {
a.sync[resourceGvk.String()] = time.Now()
}
}
a.Received[msg.TypeUrl] = msg
a.ack(msg)
a.mutex.Unlock()
select {
case a.XDSUpdates <- msg:
default:
}
}
}
func (a *ADSC) mcpToPilot(m *mcp.Resource) (*config.Config, error) {
if m == nil || m.Metadata == nil {
return &config.Config{}, nil
}
c := &config.Config{
Meta: config.Meta{
ResourceVersion: m.Metadata.Version,
Labels: m.Metadata.Labels,
Annotations: m.Metadata.Annotations,
},
}
if !config.ObjectInRevision(c, a.cfg.Revision) { // In case upstream does not support rev in node meta.
return nil, nil
}
if c.Meta.Annotations == nil {
c.Meta.Annotations = make(map[string]string)
}
nsn := strings.Split(m.Metadata.Name, "/")
if len(nsn) != 2 {
return nil, fmt.Errorf("invalid name %s", m.Metadata.Name)
}
c.Namespace = nsn[0]
c.Name = nsn[1]
var err error
c.CreationTimestamp = m.Metadata.CreateTime.AsTime()
pb, err := m.Body.UnmarshalNew()
if err != nil {
return nil, err
}
c.Spec = pb
return c, nil
}
// nolint: staticcheck
func (a *ADSC) handleLDS(ll []*listener.Listener) {
lh := map[string]*listener.Listener{}
lt := map[string]*listener.Listener{}
routes := []string{}
ldsSize := 0
for _, l := range ll {
ldsSize += proto.Size(l)
// The last filter is the actual destination for inbound listener
if l.ApiListener != nil {
// This is an API Listener
// TODO: extract VIP and RDS or cluster
continue
}
fc := l.FilterChains[len(l.FilterChains)-1]
// Find the terminal filter
filter := fc.Filters[len(fc.Filters)-1]
// The actual destination will be the next to the last if the last filter is a passthrough filter
if fc.GetName() == util.PassthroughFilterChain {
fc = l.FilterChains[len(l.FilterChains)-2]
filter = fc.Filters[len(fc.Filters)-1]
}
switch filter.Name {
case wellknown.TCPProxy:
lt[l.Name] = l
config, _ := conversion.MessageToStruct(filter.GetTypedConfig())
c := config.Fields["cluster"].GetStringValue()
adscLog.Debugf("TCP: %s -> %s", l.Name, c)
case wellknown.HTTPConnectionManager:
lh[l.Name] = l
// Getting from config is too painful..
port := l.Address.GetSocketAddress().GetPortValue()
if port == 15002 {
routes = append(routes, "http_proxy")
} else {
routes = append(routes, fmt.Sprintf("%d", port))
}
case wellknown.MongoProxy:
// ignore for now
case wellknown.RedisProxy:
// ignore for now
case wellknown.MySQLProxy:
// ignore for now
default:
adscLog.Infof(protomarshal.ToJSONWithIndent(l, " "))
}
}
adscLog.Infof("LDS: http=%d tcp=%d size=%d", len(lh), len(lt), ldsSize)
if adscLog.DebugEnabled() {
b, _ := json.MarshalIndent(ll, " ", " ")
adscLog.Debugf(string(b))
}
a.mutex.Lock()
defer a.mutex.Unlock()
if len(routes) > 0 {
a.sendRsc(v3.RouteType, routes)
}
a.httpListeners = lh
a.tcpListeners = lt
select {
case a.Updates <- v3.ListenerType:
default:
}
}
// Save will save the json configs to files, using the base directory
func (a *ADSC) Save(base string) error {
a.mutex.Lock()
defer a.mutex.Unlock()
// guarantee the persistence order for each element in tcpListeners
var sortTCPListeners []string
for key := range a.tcpListeners {
sortTCPListeners = append(sortTCPListeners, key)
}
sort.Strings(sortTCPListeners)
arrTCPListenersJSONProto := make([]jsonMarshalProtoWithName, 0, len(sortTCPListeners))
for _, element := range sortTCPListeners {
sliceItem := &jsonMarshalProtoWithName{element, a.tcpListeners[element]}
arrTCPListenersJSONProto = append(arrTCPListenersJSONProto, *sliceItem)
}
byteJSONResponse, err := json.MarshalIndent(arrTCPListenersJSONProto, "", " ")
if err != nil {
adscLog.Warnf("Error for marshaling TCPListeners: %v", err)
}
err = os.WriteFile(base+"_lds_tcp.json", byteJSONResponse, 0o644)
if err != nil {
return err
}
// guarantee the persistence order for each element in httpListeners
var sortHTTPListeners []string
for key := range a.httpListeners {
sortHTTPListeners = append(sortHTTPListeners, key)
}
sort.Strings(sortHTTPListeners)
arrHTTPListenersJSONProto := make([]jsonMarshalProtoWithName, 0, len(sortHTTPListeners))
for _, element := range sortHTTPListeners {
sliceItem := &jsonMarshalProtoWithName{element, a.httpListeners[element]}
arrHTTPListenersJSONProto = append(arrHTTPListenersJSONProto, *sliceItem)
}
byteJSONResponse, err = json.MarshalIndent(arrHTTPListenersJSONProto, "", " ")
if err != nil {
return err
}
err = os.WriteFile(base+"_lds_http.json", byteJSONResponse, 0o644)
if err != nil {
return err
}
// guarantee the persistence order for each element in routes
var sortRoutes []string
for key := range a.routes {
sortRoutes = append(sortRoutes, key)
}
sort.Strings(sortRoutes)
arrRoutesJSONProto := make([]jsonMarshalProtoWithName, 0, len(sortRoutes))
for _, element := range sortRoutes {
sliceItem := &jsonMarshalProtoWithName{element, a.routes[element]}
arrRoutesJSONProto = append(arrRoutesJSONProto, *sliceItem)
}
byteJSONResponse, err = json.MarshalIndent(arrRoutesJSONProto, "", " ")
if err != nil {
return err
}
err = os.WriteFile(base+"_rds.json", byteJSONResponse, 0o644)
if err != nil {
return err
}
// guarantee the persistence order for each element in edsClusters
var sortEdsClusters []string
for key := range a.edsClusters {
sortEdsClusters = append(sortEdsClusters, key)
}
sort.Strings(sortEdsClusters)
arrEdsClustersJSONProto := make([]jsonMarshalProtoWithName, 0, len(sortEdsClusters))
for _, element := range sortEdsClusters {
sliceItem := &jsonMarshalProtoWithName{element, a.edsClusters[element]}
arrEdsClustersJSONProto = append(arrEdsClustersJSONProto, *sliceItem)
}
byteJSONResponse, err = json.MarshalIndent(arrEdsClustersJSONProto, "", " ")
if err != nil {
return err
}
err = os.WriteFile(base+"_ecds.json", byteJSONResponse, 0o644)
if err != nil {
return err
}
// guarantee the persistence order for each element in clusters
var sortClusters []string
for key := range a.clusters {
sortClusters = append(sortClusters, key)
}
sort.Strings(sortClusters)
arrClustersJSONProto := make([]jsonMarshalProtoWithName, 0, len(sortClusters))
for _, element := range sortClusters {
sliceItem := &jsonMarshalProtoWithName{element, a.clusters[element]}
arrClustersJSONProto = append(arrClustersJSONProto, *sliceItem)
}
byteJSONResponse, err = json.MarshalIndent(arrClustersJSONProto, "", " ")
if err != nil {
return err
}
err = os.WriteFile(base+"_cds.json", byteJSONResponse, 0o644)
if err != nil {
return err
}
// guarantee the persistence order for each element in eds
var sortEds []string
for key := range a.eds {
sortEds = append(sortEds, key)
}
sort.Strings(sortEds)
arrEdsJSONProto := make([]jsonMarshalProtoWithName, 0, len(sortEds))
for _, element := range sortEds {
sliceItem := &jsonMarshalProtoWithName{element, a.eds[element]}
arrEdsJSONProto = append(arrEdsJSONProto, *sliceItem)
}
byteJSONResponse, err = json.MarshalIndent(arrEdsJSONProto, "", " ")
if err != nil {
return err
}
err = os.WriteFile(base+"_eds.json", byteJSONResponse, 0o644)
if err != nil {
return err
}
return err
}
func (a *ADSC) handleCDS(ll []*cluster.Cluster) {
cn := make([]string, 0, len(ll))
cdsSize := 0
edscds := map[string]*cluster.Cluster{}
cds := map[string]*cluster.Cluster{}
for _, c := range ll {
cdsSize += proto.Size(c)
switch v := c.ClusterDiscoveryType.(type) {
case *cluster.Cluster_Type:
if v.Type != cluster.Cluster_EDS {
cds[c.Name] = c
continue
}
}
cn = append(cn, c.Name)
edscds[c.Name] = c
}
adscLog.Infof("CDS: %d size=%d", len(cn), cdsSize)
if len(cn) > 0 {
a.sendRsc(v3.EndpointType, cn)
}
if adscLog.DebugEnabled() {
b, _ := json.MarshalIndent(ll, " ", " ")
adscLog.Debugf(string(b))
}
a.mutex.Lock()
defer a.mutex.Unlock()
a.edsClusters = edscds
a.clusters = cds
select {
case a.Updates <- v3.ClusterType:
default:
}
}
func (a *ADSC) node() *core.Node {
return buildNode(&a.cfg.Config)
}
func nodeID(config *Config) string {
return fmt.Sprintf("%s~%s~%s.%s~%s.svc.%s", config.NodeType, config.IP,
config.Workload, config.Namespace, config.Namespace, constants.DefaultClusterLocalDomain)
}
func buildNode(config *Config) *core.Node {
n := &core.Node{
Id: nodeID(config),
Locality: config.Locality,
}
if config.Meta == nil {
n.Metadata = &pstruct.Struct{
Fields: map[string]*pstruct.Value{
"ISTIO_VERSION": {Kind: &pstruct.Value_StringValue{StringValue: "65536.65536.65536"}},
},
}
} else {
n.Metadata = config.Meta
if config.Meta.Fields["ISTIO_VERSION"] == nil {
config.Meta.Fields["ISTIO_VERSION"] = &pstruct.Value{Kind: &pstruct.Value_StringValue{StringValue: "65536.65536.65536"}}
}
}
return n
}
// Raw send of a request.
func (a *ADSC) Send(req *discovery.DiscoveryRequest) error {
if a.sendNodeMeta {
req.Node = a.node()
a.sendNodeMeta = false
}
req.ResponseNonce = time.Now().String()
if adscLog.DebugEnabled() {
strReq, _ := protomarshal.ToJSONWithIndent(req, " ")
adscLog.Debugf("Sending Discovery Request to istiod: %s", strReq)
}
return a.stream.Send(req)
}
func (a *ADSC) handleEDS(eds []*endpoint.ClusterLoadAssignment) {
la := map[string]*endpoint.ClusterLoadAssignment{}
edsSize := 0
ep := 0
for _, cla := range eds {
edsSize += proto.Size(cla)
la[cla.ClusterName] = cla
ep += len(cla.Endpoints)
}
adscLog.Infof("eds: %d size=%d ep=%d", len(eds), edsSize, ep)
if adscLog.DebugEnabled() {
b, _ := json.MarshalIndent(eds, " ", " ")
adscLog.Debugf(string(b))
}
if a.InitialLoad == 0 {
// first load - Envoy loads listeners after endpoints
_ = a.stream.Send(&discovery.DiscoveryRequest{
Node: a.node(),
TypeUrl: v3.ListenerType,
})
}
a.mutex.Lock()
defer a.mutex.Unlock()
a.eds = la
select {
case a.Updates <- v3.EndpointType:
default:
}
}
func (a *ADSC) handleRDS(configurations []*route.RouteConfiguration) {
vh := 0
rcount := 0
size := 0
rds := map[string]*route.RouteConfiguration{}
for _, r := range configurations {
for _, h := range r.VirtualHosts {
vh++
for _, rt := range h.Routes {
rcount++
// Example: match:<prefix:"/" > route:<cluster:"outbound|9154||load-se-154.local" ...
adscLog.Debugf("Handle route %v, path %v, cluster %v", h.Name, rt.Match.PathSpecifier, rt.GetRoute().GetCluster())
}
}
rds[r.Name] = r
size += proto.Size(r)
}
if a.InitialLoad == 0 {
a.InitialLoad = time.Since(a.watchTime)
adscLog.Infof("RDS: %d size=%d vhosts=%d routes=%d time=%d", len(configurations), size, vh, rcount, a.InitialLoad)
} else {
adscLog.Infof("RDS: %d size=%d vhosts=%d routes=%d", len(configurations), size, vh, rcount)
}
if adscLog.DebugEnabled() {
b, _ := json.MarshalIndent(configurations, " ", " ")
adscLog.Debugf(string(b))
}
a.mutex.Lock()
a.routes = rds
a.mutex.Unlock()
select {
case a.Updates <- v3.RouteType:
default:
}
}
// WaitClear will clear the waiting events, so next call to Wait will get
// the next push type.
func (a *ADSC) WaitClear() {
for {
select {
case <-a.Updates:
default:
return
}
}
}
// WaitSingle waits for a single resource, and fails if the rejected type is
// returned. We avoid rejecting all other types to avoid race conditions. For
// example, a test asserting an incremental update of EDS may fail if a previous
// push's RDS response comes in later. Instead, we can reject events coming
// before (ie CDS). The only real alternative is to wait which introduces its own
// issues.
func (a *ADSC) WaitSingle(to time.Duration, want string, reject string) error {
t := time.NewTimer(to)
for {
select {
case t := <-a.Updates:
if t == "" {
return fmt.Errorf("closed")
}
if t != want && t == reject {
return fmt.Errorf("wanted update for %v got %v", want, t)
}
if t == want {
return nil
}
continue
case <-t.C:
return fmt.Errorf("timeout, still waiting for update for %v", want)
}
}
}
// Wait for an updates for all the specified types
// If updates is empty, this will wait for any update
func (a *ADSC) Wait(to time.Duration, updates ...string) ([]string, error) {
t := time.NewTimer(to)
want := sets.New[string](updates...)
got := make([]string, 0, len(updates))
for {
select {
case toDelete := <-a.Updates:
if toDelete == "" {
return got, fmt.Errorf("closed")
}
want.Delete(toDelete)
got = append(got, toDelete)
if want.Len() == 0 {
return got, nil
}
case <-t.C:
return got, fmt.Errorf("timeout, still waiting for updates: %v", want)
}
}
}
// WaitVersion waits for a new or updated for a typeURL.
func (a *ADSC) WaitVersion(to time.Duration, typeURL, lastVersion string) (*discovery.DiscoveryResponse, error) {
t := time.NewTimer(to)
a.mutex.Lock()
ex := a.Received[typeURL]
a.mutex.Unlock()
if ex != nil {
if lastVersion == "" {
return ex, nil
}
if lastVersion != ex.VersionInfo {
return ex, nil
}
}
for {
select {
case t := <-a.XDSUpdates:
if t == nil {
return nil, fmt.Errorf("closed")
}
if t.TypeUrl == typeURL {
return t, nil
}
case <-t.C:
return nil, fmt.Errorf("timeout, still waiting for updates: %v", typeURL)
case err, ok := <-a.errChan:
if ok {
return nil, err
}
return nil, fmt.Errorf("connection closed")
}
}
}
// EndpointsJSON returns the endpoints, formatted as JSON, for debugging.
func (a *ADSC) EndpointsJSON() string {
a.mutex.Lock()
defer a.mutex.Unlock()
out, _ := json.MarshalIndent(a.eds, " ", " ")
return string(out)
}
func ConfigInitialRequests() []*discovery.DiscoveryRequest {
out := make([]*discovery.DiscoveryRequest, 0, len(collections.Pilot.All())+1)
out = append(out, &discovery.DiscoveryRequest{
TypeUrl: gvk.MeshConfig.String(),
})
for _, sch := range collections.Pilot.All() {
out = append(out, &discovery.DiscoveryRequest{
TypeUrl: sch.GroupVersionKind().String(),
})
}
return out
}
func (a *ADSC) sendRsc(typeurl string, rsc []string) {
ex := a.Received[typeurl]
version := ""
nonce := ""
if ex != nil {
version = ex.VersionInfo
nonce = ex.Nonce
}
_ = a.stream.Send(&discovery.DiscoveryRequest{
ResponseNonce: nonce,
VersionInfo: version,
Node: a.node(),
TypeUrl: typeurl,
ResourceNames: rsc,
})
}
func (a *ADSC) ack(msg *discovery.DiscoveryResponse) {
var resources []string
if strings.HasPrefix(msg.TypeUrl, v3.DebugType) {
// If the response is for istio.io/debug or istio.io/debug/*,
// skip to send ACK.
return
}
if msg.TypeUrl == v3.EndpointType {
for c := range a.edsClusters {
resources = append(resources, c)
}
}
if msg.TypeUrl == v3.RouteType {
for r := range a.routes {
resources = append(resources, r)
}
}
_ = a.stream.Send(&discovery.DiscoveryRequest{
ResponseNonce: msg.Nonce,
TypeUrl: msg.TypeUrl,
Node: a.node(),
VersionInfo: msg.VersionInfo,
ResourceNames: resources,
})
}
// GetHTTPListeners returns all the http listeners.
func (a *ADSC) GetHTTPListeners() map[string]*listener.Listener {
a.mutex.Lock()
defer a.mutex.Unlock()
return a.httpListeners
}
// GetTCPListeners returns all the tcp listeners.
func (a *ADSC) GetTCPListeners() map[string]*listener.Listener {
a.mutex.RLock()
defer a.mutex.RUnlock()
return a.tcpListeners
}
// GetEdsClusters returns all the eds type clusters.
func (a *ADSC) GetEdsClusters() map[string]*cluster.Cluster {
a.mutex.RLock()
defer a.mutex.RUnlock()
return a.edsClusters
}
// GetClusters returns all the non-eds type clusters.
func (a *ADSC) GetClusters() map[string]*cluster.Cluster {
a.mutex.RLock()
defer a.mutex.RUnlock()
return a.clusters
}
// GetRoutes returns all the routes.
func (a *ADSC) GetRoutes() map[string]*route.RouteConfiguration {
a.mutex.RLock()
defer a.mutex.RUnlock()
return a.routes
}
// GetEndpoints returns all the routes.
func (a *ADSC) GetEndpoints() map[string]*endpoint.ClusterLoadAssignment {
a.mutex.RLock()
defer a.mutex.RUnlock()
return a.eds
}
func (a *ADSC) handleMCP(groupVersionKind config.GroupVersionKind, resources []*anypb.Any) {
// Generic - fill up the store
if a.Store == nil {
return
}
existingConfigs := a.Store.List(groupVersionKind, "")
received := make(map[string]*config.Config)
for _, rsc := range resources {
m := &mcp.Resource{}
err := rsc.UnmarshalTo(m)
if err != nil {
adscLog.Warnf("Error unmarshalling received MCP config %v", err)
continue
}
newCfg, err := a.mcpToPilot(m)
if err != nil {
adscLog.Warnf("Invalid data: %v (%v)", err, string(rsc.Value))
continue
}
if newCfg == nil {
continue
}
received[newCfg.Namespace+"/"+newCfg.Name] = newCfg
newCfg.GroupVersionKind = groupVersionKind
oldCfg := a.Store.Get(newCfg.GroupVersionKind, newCfg.Name, newCfg.Namespace)
if oldCfg == nil {
if _, err = a.Store.Create(*newCfg); err != nil {
adscLog.Warnf("Error adding a new resource to the store %v", err)
continue
}
} else if oldCfg.ResourceVersion != newCfg.ResourceVersion || newCfg.ResourceVersion == "" {
// update the store only when resource version differs or unset.
newCfg.Annotations[mem.ResourceVersion] = newCfg.ResourceVersion
newCfg.ResourceVersion = oldCfg.ResourceVersion
if _, err = a.Store.Update(*newCfg); err != nil {
adscLog.Warnf("Error updating an existing resource in the store %v", err)
continue
}
}
if a.LocalCacheDir != "" {
strResponse, err := json.MarshalIndent(newCfg, " ", " ")
if err != nil {
adscLog.Warnf("Error marshaling received MCP config %v", err)
continue
}
err = os.WriteFile(a.LocalCacheDir+"_res."+
newCfg.GroupVersionKind.Kind+"."+newCfg.Namespace+"."+newCfg.Name+".json", strResponse, 0o644)
if err != nil {
adscLog.Warnf("Error writing received MCP config to local file %v", err)
}
}
}
// remove deleted resources from cache
for _, config := range existingConfigs {
if _, ok := received[config.Namespace+"/"+config.Name]; !ok {
if err := a.Store.Delete(config.GroupVersionKind, config.Name, config.Namespace, nil); err != nil {
adscLog.Warnf("Error deleting an outdated resource from the store %v", err)
continue
}
if a.LocalCacheDir != "" {
err := os.Remove(a.LocalCacheDir + "_res." +
config.GroupVersionKind.Kind + "." + config.Namespace + "." + config.Name + ".json")
if err != nil {
adscLog.Warnf("Error deleting received MCP config to local file %v", err)
}
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adsc
import (
"context"
"errors"
"fmt"
"math"
"strings"
"sync"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
discovery "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
"github.com/envoyproxy/go-control-plane/pkg/resource/v3"
"go.uber.org/atomic"
"google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"k8s.io/utils/set"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/backoff"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
var deltaLog = log.RegisterScope("deltaadsc", "delta adsc debugging")
type resourceKey struct {
Name string
TypeURL string
}
func (k resourceKey) shortName() string {
return v3.GetShortType(k.TypeURL) + "/" + k.Name
}
type keySet = sets.Set[resourceKey]
// resourceNode represents a resource state in the dynamic tree structure of the service mesh.
// It tracks the relationships of a resource with its parents and children within the mesh.
//
// Example: Consider a scenario where we have a direct wildcard CDS watch.
// Upon receiving a response, suppose some CDS resources named A, B, etc., are added. The resulting tree structure would be:
//
// CDS/*:
// CDS/A:
// CDS/B:
//
// In this case, CDS/A and CDS/B are nodes under the wildcard CDS watch.
//
// Further, if we register a dependency on an EDS resource named C for CDS added resources,
// the tree expands to:
//
// CDS/*:
// CDS/A:
// EDS/C:
// CDS/B:
// EDS/C:
//
// Here, CDS/A and CDS/B become parents of EDS/C, and EDS/C is a child of both CDS/A and CDS/B.
//
// If a response later indicates that the CDS resource A is removed, all relationships originating from A are also removed.
// The updated tree would then be:
//
// CDS/*:
// CDS/B:
// EDS/C:
//
// This change reflects the removal of CDS/A and its associated child link to EDS/C.
type resourceNode struct {
// Parents of the resource. If nil, this is explicitly watched
Parents keySet
// Children of the resource
Children keySet
}
type HandlerContext interface {
RegisterDependency(typeURL string, resourceName ...string)
Reject(reason error)
}
var _ HandlerContext = &handlerContext{}
// HandlerContext provides an event for a single delta response, allowing handlers to react to it.
// Operations done in the handler may be batched together with other handler's.
type handlerContext struct {
sub keySet
nack error
}
func (h *handlerContext) RegisterDependency(typeURL string, resourceName ...string) {
if h.sub == nil {
h.sub = make(keySet)
}
for _, r := range resourceName {
key := resourceKey{
Name: r,
TypeURL: typeURL,
}
h.sub.Insert(key)
}
}
func (h *handlerContext) Reject(reason error) {
h.nack = reason
}
// DeltaADSConfig for delta ADS connection.
type DeltaADSConfig struct {
Config
}
type Resource struct {
Name string
Version string
Entity proto.Message
}
type HandlerFunc func(ctx HandlerContext, res *Resource, event Event)
// Client is a stateful ADS (Aggregated Discovery Service) client designed to handle delta updates from an xDS server.
// Central to this client is a dynamic 'tree' of resources, representing the relationships and states of resources in the service mesh.
// The client's operation unfolds in the following steps:
//
// 1. Sending Initial Requests: The client initiates requests for resources it needs, as specified by the Watch function.
// This step sets the stage for receiving relevant DeltaDiscoveryResponse from the server.
//
// 2. Processing DeltaDiscoveryResponses: Upon receiving a delta response, the client performs several key actions:
// - Event Handling: Triggers specific handlers for each resource, as register using Register function during client initialization.
// - Tree Update: Modifies its 'tree' to reflect changes in resources, such as adding new resources,
// updating relationships between parents and children, and removing or unlinking resources.
//
// 3. State Synchronization: Post-processing the delta response, the client updates its internal state. This involves:
// - Acknowledgements and Errors: Communicating acknowledgements or errors back to the server based on the
// processing outcome. In cases of error or rejection, a Nack can be sent using HandlerContext.Reject.
// - Dependency Updates: Triggering requests for dependent resources. These dependencies are established via
// HandlerContext.RegisterDependency.
//
// An example of a handler registration is as follows:
//
// clusterHandler := Register(func(ctx HandlerContext, res *cluster.Cluster, event Event) {
// if event == EventDelete {
// return
// }
// ctx.RegisterDependency(v3.SecretType, ExtractClusterSecretResources(t, res)...)
// ctx.RegisterDependency(v3.EndpointType, ExtractEdsClusterNames([]*cluster.Cluster{res})...)
// })
//
// It means that when a cluster is added or updated, the client will trigger requests for the
// secrets and endpoints that the cluster depends on.
//
// An example of register handlers:
//
// handlers := []Option{
// clusterHandler,
// Watch[*cluster.Cluster]("*"),
// listenerHandler,
// Watch[*listener.Listener]("*"),
// endpointsHandler,
// routesHandler,
// secretsHandler,
// }
//
// client := NewDelta("localhost:8080", handlers...)
//
// It means that the client will watch all clusters and listeners, and trigger resource events for
// clusters, listeners, endpoints, routes and secrets that the clusters and listeners depend on.
type Client struct {
cfg *DeltaADSConfig
handlers map[string]HandlerFunc
// tree is a map where each key is a `resourceKey` (comprising the resource name and typeURL)
// and each added resource is a `resourceNode`. This tree structure represents the dynamic state
// and relationships of resources.
tree map[resourceKey]resourceNode
xdsClient discovery.AggregatedDiscoveryService_DeltaAggregatedResourcesClient
conn *grpc.ClientConn
// initialWatches is the list of resources we are watching on startup
initialWatches []resourceKey
// sendNodeMeta is set to true if the connection is new - and we need to send node meta
sendNodeMeta atomic.Bool
mutex sync.RWMutex
// lastReceived message, by type
lastReceived map[string]*discovery.DeltaDiscoveryResponse
deltaXDSUpdates chan *discovery.DeltaDiscoveryResponse
// errChan is used to signal errors from the delta stream
errChan chan error
// closed is set to true when the client is closed
closed bool
}
func (c *Client) trigger(ctx *handlerContext, typeURL string, r *discovery.Resource, event Event) error {
var res *Resource
if r == nil {
return fmt.Errorf("trigged by event %d,but resource is nil", event)
}
if event == EventAdd {
if r.Resource == nil {
return fmt.Errorf("trigged by EventAdd,but be added resource object is nil")
}
entity := newProto(typeURL)
if entity == nil {
return fmt.Errorf("new resource entity by typeURL: %s error", entity)
}
if err := r.Resource.UnmarshalTo(entity); err != nil {
return err
}
res = &Resource{
Name: r.Name,
Version: r.Version,
Entity: entity,
}
} else {
// EventDelete
res = &Resource{
Name: r.Name,
}
}
handler, f := c.handlers[typeURL]
if !f {
deltaLog.Warnf("ignoring unknown type %v", typeURL)
return nil
}
handler(ctx, res, event)
return nil
}
// getProtoMessageType returns the Golang type of the proto with the specified name.
func newProto(tt string) proto.Message {
name := protoreflect.FullName(strings.TrimPrefix(tt, resource.APITypePrefix))
t, err := protoregistry.GlobalTypes.FindMessageByName(name)
if err != nil || t == nil {
return nil
}
return t.New().Interface()
}
func (c *Client) Run(ctx context.Context) error {
if err := c.Dial(); err != nil {
return fmt.Errorf("dial context: %v", err)
}
xds := discovery.NewAggregatedDiscoveryServiceClient(c.conn)
xdsClient, err := xds.DeltaAggregatedResources(ctx, grpc.MaxCallRecvMsgSize(math.MaxInt32))
if err != nil {
return fmt.Errorf("delta stream: %v", err)
}
c.sendNodeMeta.Store(true)
c.xdsClient = xdsClient
go c.handleRecv()
for _, w := range c.initialWatches {
c.request(w)
}
return nil
}
func (c *Client) Dial() error {
conn, err := dialWithConfig(&c.cfg.Config)
if err != nil {
return err
}
c.conn = conn
return nil
}
// reconnect will create a new stream
func (c *Client) reconnect() {
c.mutex.RLock()
if c.closed {
c.mutex.RUnlock()
return
}
c.mutex.RUnlock()
err := c.Run(context.Background())
if err != nil {
time.AfterFunc(c.cfg.BackoffPolicy.NextBackOff(), c.reconnect)
} else if c.cfg.BackoffPolicy != nil {
// We connected, so reset the backoff
c.cfg.BackoffPolicy.Reset()
}
}
type Option func(c *Client)
func NewDelta(discoveryAddr string, config *DeltaADSConfig, opts ...Option) *Client {
if config == nil {
config = &DeltaADSConfig{}
}
config.Address = discoveryAddr
config.Config = setDefaultConfig(&config.Config)
c := &Client{
cfg: config,
handlers: map[string]HandlerFunc{},
tree: map[resourceKey]resourceNode{},
errChan: make(chan error, 10),
deltaXDSUpdates: make(chan *discovery.DeltaDiscoveryResponse, 100),
lastReceived: map[string]*discovery.DeltaDiscoveryResponse{},
mutex: sync.RWMutex{},
}
for _, o := range opts {
o(c)
}
return c
}
func NewDeltaWithBackoffPolicy(discoveryAddr string, config *DeltaADSConfig, backoffPolicy backoff.BackOff, opts ...Option) *Client {
if config == nil {
config = &DeltaADSConfig{}
}
delta := NewDelta(discoveryAddr, config, opts...)
delta.cfg.BackoffPolicy = backoffPolicy
return delta
}
func typeName[T proto.Message]() string {
ft := new(T)
return resource.APITypePrefix + string((*ft).ProtoReflect().Descriptor().FullName())
}
// Register registers a handler for a type which is reflected by the proto message.
func Register[T proto.Message](f func(ctx HandlerContext, resourceName string, resourceVersion string, resourceEntity T, event Event)) Option {
return func(c *Client) {
c.handlers[typeName[T]()] = func(ctx HandlerContext, res *Resource, event Event) {
if res.Entity == nil {
var nilEntity T
f(ctx, res.Name, res.Version, nilEntity, event)
} else {
f(ctx, res.Name, res.Version, res.Entity.(T), event)
}
}
}
}
// Watch registers an initial watch for a type based on the type reflected by the proto message.
func Watch[T proto.Message](resourceName string) Option {
return initWatch(typeName[T](), resourceName)
}
func initWatch(typeURL string, resourceName string) Option {
return func(c *Client) {
if resourceName == "*" {
// Normalize to allow both forms
resourceName = ""
}
key := resourceKey{
Name: resourceName,
TypeURL: typeURL,
}
existing, f := c.tree[key]
if f {
// We are watching directly now, so erase any parents
existing.Parents = nil
existing.Children = nil
} else {
c.tree[key] = resourceNode{
Parents: make(keySet),
Children: make(keySet),
}
}
c.initialWatches = append(c.initialWatches, key)
}
}
func (c *Client) handleRecv() {
for {
deltaLog.Infof("Start Recv for node %v", c.nodeID)
msg, err := c.xdsClient.Recv()
if err != nil {
deltaLog.Infof("Connection closed for node %v with err: %v", c.nodeID, err)
select {
case c.errChan <- err:
default:
}
// if 'reconnect' enabled - schedule a new Run
if c.cfg.BackoffPolicy != nil {
time.AfterFunc(c.cfg.BackoffPolicy.NextBackOff(), c.reconnect)
} else {
c.Close()
}
return
}
deltaLog.Infof("Received response: %s", msg.TypeUrl)
if err := c.handleDeltaResponse(msg); err != nil {
deltaLog.Infof("Handle response %s failed: %v", msg.TypeUrl, err)
c.Close()
return
}
c.mutex.Lock()
c.lastReceived[msg.TypeUrl] = msg
c.mutex.Unlock()
c.deltaXDSUpdates <- msg
}
}
func (c *Client) handleDeltaResponse(d *discovery.DeltaDiscoveryResponse) error {
var rejects []error
allAdds := map[string]set.Set[string]{}
allRemoves := map[string]set.Set[string]{}
ctx := &handlerContext{}
if isDebugType(d.TypeUrl) {
// No need to ack and type check for debug types
return nil
}
for _, r := range d.Resources {
if d.TypeUrl != r.Resource.TypeUrl {
deltaLog.Errorf("Invalid response: mismatch of type url: %v vs %v", d.TypeUrl, r.Resource.TypeUrl)
continue
}
err := c.trigger(ctx, d.TypeUrl, r, EventAdd)
if err != nil {
return err
}
parentKey := resourceKey{
Name: r.Name,
TypeURL: r.Resource.TypeUrl,
}
c.establishResource(parentKey)
if ctx.nack != nil {
rejects = append(rejects, ctx.nack)
// On NACK, do not apply resource changes
continue
}
remove, add := c.tree[parentKey].Children.Diff(ctx.sub)
for _, key := range add {
if _, f := allAdds[key.TypeURL]; !f {
allAdds[key.TypeURL] = set.New[string]()
}
allAdds[key.TypeURL].Insert(key.Name)
c.relate(parentKey, key)
}
for _, key := range remove {
if _, f := allRemoves[key.TypeURL]; !f {
allRemoves[key.TypeURL] = set.New[string]()
}
allRemoves[key.TypeURL].Insert(key.Name)
c.unrelate(parentKey, key)
}
}
for _, r := range d.RemovedResources {
key := resourceKey{
Name: r,
TypeURL: d.TypeUrl,
}
removed := &discovery.Resource{
Name: r,
}
err := c.trigger(ctx, d.TypeUrl, removed, EventDelete)
if err != nil {
return err
}
if _, f := allRemoves[key.TypeURL]; !f {
allRemoves[key.TypeURL] = set.New[string]()
}
allRemoves[key.TypeURL].Insert(key.Name)
c.drop(key)
}
c.send(resourceKey{TypeURL: d.TypeUrl}, d.Nonce, joinError(rejects))
for t, sub := range allAdds {
unsub, f := allRemoves[t]
if f {
delete(allRemoves, t)
}
c.update(t, sub, unsub, d)
}
return nil
}
func joinError(rejects []error) error {
var e []string
for _, r := range rejects {
if r == nil {
continue
}
e = append(e, r.Error())
}
if len(e) == 0 {
return nil
}
return errors.New(strings.Join(e, "; "))
}
// establishResource sets up the relationship for a resource we received.
func (c *Client) establishResource(key resourceKey) {
// Check if we have a watch for this resource
parentNode, f := c.tree[key]
if !f {
parentNode = resourceNode{
Parents: make(keySet),
Children: make(keySet),
}
c.tree[key] = parentNode
}
// Check if we have a Watch for all "*" resources, and if so, this specific resource is a child
// of that watch.
wildcardKey := resourceKey{TypeURL: key.TypeURL}
wildNode, wildFound := c.tree[wildcardKey]
if wildFound {
wildNode.Children.Insert(key)
parentNode.Parents.Insert(wildcardKey)
}
if !f && !wildFound {
// We are receiving an unwanted resource, silently ignore it.
deltaLog.Debugf("Received unsubscribed resource: %v, %v", key, c.tree)
}
}
func (c *Client) relate(parent, child resourceKey) {
parentNode, f := c.tree[parent]
if !f {
deltaLog.Fatalf("Failed to relate resource: unknown parent: %v, %v", parent, c.tree)
}
childNode, f := c.tree[child]
if !f {
// Not yet watching child, create a node
c.tree[child] = resourceNode{
Parents: make(keySet),
Children: make(keySet),
}
childNode = c.tree[child]
}
// We are already watching, just update
childNode.Parents.Insert(parent)
parentNode.Children.Insert(child)
}
func (c *Client) drop(parent resourceKey) {
parentNode, f := c.tree[parent]
if !f {
deltaLog.Fatalf("Failed to drop resource: unknown parent: %v, %v", parent, c.tree)
}
for p := range parentNode.Parents {
c.unrelate(p, parent)
}
if _, f := c.tree[parent]; f {
deltaLog.Fatalf("Failed to drop resource: unrelate should have handled this: %v", c.dumpTree())
}
}
func (c *Client) unrelate(parent, child resourceKey) {
parentNode, f := c.tree[parent]
if !f {
deltaLog.Fatalf("Failed to unrelate resource: unknown parent: %v, %v", parent, c.tree)
}
parentNode.Children.Delete(child)
childNode, f := c.tree[child]
if !f {
deltaLog.Fatalf("Failed to unrelate resource: unknown child: %v, %v", parent, c.tree)
}
// We are already watching, just update
childNode.Parents.Delete(parent)
if len(childNode.Parents) == 0 {
// Node fully removed
deltaLog.Infof("Removed resource: %v", child.shortName())
delete(c.tree, child)
}
}
// Event represents a registry update event
type Event int
const (
// EventAdd is sent when an object is added
EventAdd Event = iota
// EventDelete is sent when an object is deleted
// Captures the object at the last known state
EventDelete
)
func (event Event) String() string {
out := "unknown"
switch event {
case EventAdd:
out = "add"
case EventDelete:
out = "delete"
}
return out
}
func (c *Client) dumpTree() string {
sb := strings.Builder{}
roots := make(keySet)
for key := range c.tree {
if len(c.tree[key].Parents) == 0 {
roots.Insert(key)
}
}
keys := slices.SortFunc(roots.UnsortedList(), func(a, b resourceKey) int {
return strings.Compare(a.shortName(), b.shortName())
})
for _, key := range keys {
c.dumpNode(&sb, key, "")
}
return sb.String()
}
func (c *Client) dumpNode(sb *strings.Builder, key resourceKey, indent string) {
sb.WriteString(indent + key.shortName() + ":\n")
if len(indent) > 10 {
return
}
node := c.tree[key]
keys := slices.SortFunc(node.Children.UnsortedList(), func(a, b resourceKey) int {
return strings.Compare(a.shortName(), b.shortName())
})
for _, child := range keys {
id := indent + " "
// Not sure what this is -- two different parents?
//if _, f := child.Parents[node]; !f {
// id = indent + "**"
//}
c.dumpNode(sb, child, id)
}
}
func (c *Client) Close() {
c.mutex.Lock()
defer c.mutex.Unlock()
c.conn.Close()
c.closed = true
c.WaitClear()
// Signal the channel to close
c.deltaXDSUpdates <- nil
}
func (c *Client) request(w resourceKey) {
c.mutex.Lock()
ex := c.lastReceived[w.TypeURL]
c.mutex.Unlock()
nonce := ""
if ex != nil {
nonce = ex.Nonce
}
c.send(w, nonce, nil)
}
func (c *Client) send(w resourceKey, nonce string, err error) {
req := &discovery.DeltaDiscoveryRequest{
Node: &core.Node{
Id: c.nodeID(),
},
TypeUrl: w.TypeURL,
ResponseNonce: nonce,
}
if c.sendNodeMeta.Load() {
req.Node = c.node()
c.sendNodeMeta.Store(false)
}
if w.Name != "" && err == nil {
req.ResourceNamesSubscribe = []string{w.Name}
}
if err != nil {
req.ErrorDetail = &status.Status{Message: err.Error()}
}
err = c.xdsClient.Send(req)
if err != nil {
c.errChan <- err
}
}
func (c *Client) nodeID() string {
return nodeID(&c.cfg.Config)
}
func (c *Client) node() *core.Node {
return buildNode(&c.cfg.Config)
}
func (c *Client) update(t string, sub, unsub set.Set[string], d *discovery.DeltaDiscoveryResponse) {
req := &discovery.DeltaDiscoveryRequest{
Node: &core.Node{
Id: c.nodeID(),
},
TypeUrl: t,
ResponseNonce: d.Nonce,
}
if sub != nil {
req.ResourceNamesSubscribe = sub.UnsortedList()
}
if unsub != nil {
req.ResourceNamesUnsubscribe = unsub.UnsortedList()
}
err := c.xdsClient.Send(req)
if err != nil {
c.errChan <- err
}
}
// WaitClear will clear the waiting events, so next call to Wait will get
// the next push type.
func (c *Client) WaitClear() {
for {
select {
case <-c.deltaXDSUpdates:
case <-c.errChan:
default:
return
}
}
}
// WaitResp waits for the latest delta response for a typeURL.
func (c *Client) WaitResp(to time.Duration, typeURL string) (*discovery.DeltaDiscoveryResponse, error) {
t := time.NewTimer(to)
c.mutex.Lock()
ex := c.lastReceived[typeURL]
c.mutex.Unlock()
if ex != nil {
return ex, nil
}
for {
select {
case t := <-c.deltaXDSUpdates:
if t == nil {
return nil, fmt.Errorf("closed")
}
if t.TypeUrl == typeURL {
return t, nil
}
case <-t.C:
return nil, fmt.Errorf("timeout, still waiting for updates: %v", typeURL)
case err, ok := <-c.errChan:
if ok {
return nil, err
}
return nil, fmt.Errorf("connection closed")
}
}
}
func isDebugType(typeURL string) bool {
return strings.HasPrefix(typeURL, v3.DebugType)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package adsc
import (
"crypto/tls"
"strings"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/security"
)
func getClientCertFn(config *Config) func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
if config.SecretManager != nil {
return func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
key, err := config.SecretManager.GenerateSecret(security.WorkloadKeyCertResourceName)
if err != nil {
return nil, err
}
clientCert, err := tls.X509KeyPair(key.CertificateChain, key.PrivateKey)
if err != nil {
return nil, err
}
return &clientCert, nil
}
}
if config.CertDir != "" {
return func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
certName := config.CertDir + "/cert-chain.pem"
clientCert, err := tls.LoadX509KeyPair(certName, config.CertDir+"/key.pem")
if err != nil {
return nil, err
}
return &clientCert, nil
}
}
return nil
}
func convertTypeURLToMCPGVK(typeURL string) (config.GroupVersionKind, bool) {
parts := strings.SplitN(typeURL, "/", 3)
if len(parts) != 3 {
return config.GroupVersionKind{}, false
}
gvk := config.GroupVersionKind{
Group: parts[0],
Version: parts[1],
Kind: parts[2],
}
_, isMCP := collections.Pilot.FindByGroupVersionKind(gvk)
if isMCP {
return gvk, true
}
return config.GroupVersionKind{}, false
}
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package appsignals
import (
"os"
"os/signal"
"sync"
"syscall"
"github.com/fsnotify/fsnotify"
"istio.io/istio/pkg/log"
)
var handlers struct {
sync.Mutex
listeners []chan<- Signal
signals chan os.Signal
}
type Signal struct {
// Source of the event trigger as we simulate signal generation from a variety of triggers
Source string
Signal os.Signal
}
// Notify a channel if a an event is triggered. A notification is always triggered for SIGUSR1
func Watch(c chan<- Signal) {
handlers.Lock()
defer handlers.Unlock()
if handlers.listeners == nil {
// Watch for SIGUSR1 by default
handlers.signals = make(chan os.Signal, 1)
signal.Notify(handlers.signals, syscall.SIGUSR1)
go func() {
for range handlers.signals {
Notify("os", syscall.SIGUSR1)
}
}()
handlers.listeners = make([]chan<- Signal, 0, 10)
}
handlers.listeners = append(handlers.listeners, c)
}
// Directly trigger a notification
func Notify(trigger string, signal os.Signal) {
handlers.Lock()
defer handlers.Unlock()
for _, v := range handlers.listeners {
log.Debugf("watcher.Notify: Dispatching to listener '%v' (trigger: %q, signal: %v)", v, trigger, signal)
select {
case v <- Signal{trigger, signal}:
default:
log.Warnf("watcher.Notify: Signal channel is full (trigger: %q, signal: %v)", trigger, signal)
}
}
}
// Trigger notifications when a file is mutated
func FileTrigger(path string, signal os.Signal, shutdown chan os.Signal) error {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return err
}
if err = watcher.Add(path); err != nil {
return err
}
go func() {
defer watcher.Close()
for {
select {
case _, ok := <-watcher.Events:
if ok {
log.Warnf("File watch triggered: %v", path)
Notify(path, signal)
} else {
return
}
case err := <-watcher.Errors:
log.Warnf("Error watching file trigger: %v %v", path, err)
return
case signal := <-shutdown:
log.Infof("Shutting down file watcher: %v %v", path, signal)
return
}
}
}()
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package backoff is a wrapper of `github.com/cenkalti/backoff/v4`.
// It is to prevent misuse of `github.com/cenkalti/backoff/v4`,
// thus application could fall into dead loop.
package backoff
import (
"context"
"fmt"
"time"
"github.com/cenkalti/backoff/v4"
)
// BackOff is a backoff policy for retrying an operation.
type BackOff interface {
// NextBackOff returns the duration to wait before retrying the next operation.
NextBackOff() time.Duration
// Reset to initial state.
Reset()
// RetryWithContext tries the operation until it does not return error,
// or when the context expires, whichever happens first.
RetryWithContext(ctx context.Context, operation func() error) error
}
type Option struct {
InitialInterval time.Duration
MaxInterval time.Duration
}
// ExponentialBackOff is a wrapper of backoff.ExponentialBackOff to override its NextBackOff().
type ExponentialBackOff struct {
exponentialBackOff *backoff.ExponentialBackOff
}
// Default values for ExponentialBackOff.
const (
defaultInitialInterval = 500 * time.Millisecond
defaultMaxInterval = 60 * time.Second
)
func DefaultOption() Option {
return Option{
InitialInterval: defaultInitialInterval,
MaxInterval: defaultMaxInterval,
}
}
// NewExponentialBackOff creates an istio wrapped ExponentialBackOff.
// By default, it never stops.
func NewExponentialBackOff(o Option) BackOff {
b := ExponentialBackOff{}
b.exponentialBackOff = backoff.NewExponentialBackOff()
b.exponentialBackOff.InitialInterval = o.InitialInterval
b.exponentialBackOff.MaxInterval = o.MaxInterval
b.Reset()
return b
}
func (b ExponentialBackOff) NextBackOff() time.Duration {
duration := b.exponentialBackOff.NextBackOff()
// always return maxInterval after it reaches MaxElapsedTime
if duration == b.exponentialBackOff.Stop {
return b.exponentialBackOff.MaxInterval
}
return duration
}
func (b ExponentialBackOff) Reset() {
b.exponentialBackOff.Reset()
}
// RetryWithContext tries the operation until it does not return error,
// or when the context expires, whichever happens first.
// o is guaranteed to be run at least once.
// RetryWithContext sleeps the goroutine for the duration returned by BackOff after a
// failed operation returns.
func (b ExponentialBackOff) RetryWithContext(ctx context.Context, operation func() error) error {
b.Reset()
for {
err := operation()
if err == nil {
return nil
}
next := b.NextBackOff()
select {
case <-ctx.Done():
return fmt.Errorf("%v with last error: %v", context.DeadlineExceeded, err)
case <-time.After(next):
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"encoding/json"
"errors"
"fmt"
"os"
"path"
"sort"
"strconv"
"strings"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"google.golang.org/protobuf/types/known/structpb"
"google.golang.org/protobuf/types/known/wrapperspb"
"istio.io/api/annotation"
meshAPI "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/util/network"
"istio.io/istio/pkg/bootstrap/option"
"istio.io/istio/pkg/bootstrap/platform"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/kube/labels"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/version"
)
const (
// IstioMetaPrefix is used to pass env vars as node metadata.
IstioMetaPrefix = "ISTIO_META_"
// IstioMetaJSONPrefix is used to pass annotations and similar environment info.
IstioMetaJSONPrefix = "ISTIO_METAJSON_"
lightstepAccessTokenBase = "lightstep_access_token.txt"
// required stats are used by readiness checks.
requiredEnvoyStatsMatcherInclusionPrefixes = "cluster_manager,listener_manager,server,cluster.xds-grpc,wasm"
rbacEnvoyStatsMatcherInclusionSuffix = "rbac.allowed,rbac.denied,shadow_allowed,shadow_denied"
requiredEnvoyStatsMatcherInclusionSuffixes = rbacEnvoyStatsMatcherInclusionSuffix + ",downstream_cx_active" // Needed for draining.
// required for metrics based on stat_prefix in virtual service.
requiredEnvoyStatsMatcherInclusionRegexes = `vhost\.*\.route\.*`
// Prefixes of V2 metrics.
// "reporter" prefix is for istio standard metrics.
// "component" suffix is for istio_build metric.
v2Prefixes = "reporter=,"
v2Suffix = ",component,istio"
)
var envoyWellKnownCompressorLibrary = sets.String{
"gzip": {},
"zstd": {},
"brotli": {},
}
// Config for creating a bootstrap file.
type Config struct {
*model.Node
}
// toTemplateParams creates a new template configuration for the given configuration.
func (cfg Config) toTemplateParams() (map[string]any, error) {
opts := make([]option.Instance, 0)
discHost := strings.Split(cfg.Metadata.ProxyConfig.DiscoveryAddress, ":")[0]
xdsType := "GRPC"
if features.DeltaXds {
xdsType = "DELTA_GRPC"
}
// Waypoint overrides
metadataDiscovery := cfg.Metadata.MetadataDiscovery
if strings.HasPrefix(cfg.ID, "waypoint~") {
xdsType = "DELTA_GRPC"
metadataDiscovery = true
}
opts = append(opts,
option.NodeID(cfg.ID),
option.NodeType(cfg.ID),
option.PilotSubjectAltName(cfg.Metadata.PilotSubjectAltName),
option.OutlierLogPath(cfg.Metadata.OutlierLogPath),
option.DiscoveryHost(discHost),
option.Metadata(cfg.Metadata),
option.XdsType(xdsType),
option.MetadataDiscovery(bool(metadataDiscovery)))
// Add GCPProjectNumber to access in bootstrap template.
md := cfg.Metadata.PlatformMetadata
if projectNumber, found := md[platform.GCPProjectNumber]; found {
opts = append(opts, option.GCPProjectNumber(projectNumber))
}
if cfg.Metadata.StsPort != "" {
stsPort, err := strconv.Atoi(cfg.Metadata.StsPort)
if err == nil && stsPort > 0 {
opts = append(opts,
option.STSEnabled(true),
option.STSPort(stsPort))
md := cfg.Metadata.PlatformMetadata
if projectID, found := md[platform.GCPProject]; found {
opts = append(opts, option.GCPProjectID(projectID))
}
}
}
// Support passing extra info from node environment as metadata
opts = append(opts, getNodeMetadataOptions(cfg.Node)...)
// Check if nodeIP carries IPv4 or IPv6 and set up proxy accordingly
if network.AllIPv4(cfg.Metadata.InstanceIPs) {
// IPv4 only
opts = append(opts,
option.Localhost(option.LocalhostIPv4),
option.Wildcard(option.WildcardIPv4),
option.DNSLookupFamily(option.DNSLookupFamilyIPv4))
} else if network.AllIPv6(cfg.Metadata.InstanceIPs) {
// IPv6 only
opts = append(opts,
option.Localhost(option.LocalhostIPv6),
option.Wildcard(option.WildcardIPv6),
option.DNSLookupFamily(option.DNSLookupFamilyIPv6))
} else {
// Dual Stack
if features.EnableDualStack {
// If dual-stack, it may be [IPv4, IPv6] or [IPv6, IPv4]
// So let the first ip family policy to decide its DNSLookupFamilyIP policy
ipFamily, err := network.CheckIPFamilyTypeForFirstIPs(cfg.Metadata.InstanceIPs)
if err != nil {
return nil, err
}
if ipFamily == network.IPv6 {
opts = append(opts,
option.Localhost(option.LocalhostIPv6),
option.Wildcard(option.WildcardIPv6),
option.AdditionalWildCard(option.WildcardIPv4),
option.DNSLookupFamily(option.DNSLookupFamilyIPS))
} else {
opts = append(opts,
option.Localhost(option.LocalhostIPv4),
option.Wildcard(option.WildcardIPv4),
option.AdditionalWildCard(option.WildcardIPv6),
option.DNSLookupFamily(option.DNSLookupFamilyIPS))
}
opts = append(opts, option.DualStack(true))
} else {
// keep the original logic if Dual Stack is disabled
opts = append(opts,
option.Localhost(option.LocalhostIPv4),
option.Wildcard(option.WildcardIPv4),
option.DNSLookupFamily(option.DNSLookupFamilyIPv4))
}
}
proxyOpts, err := getProxyConfigOptions(cfg.Metadata)
if err != nil {
return nil, err
}
opts = append(opts, proxyOpts...)
// Append LRS related options.
opts = append(opts, option.LoadStatsConfigJSONStr(cfg.Node))
// TODO: allow reading a file with additional metadata (for example if created with
// 'envref'. This will allow Istio to generate the right config even if the pod info
// is not available (in particular in some multi-cluster cases)
return option.NewTemplateParams(opts...)
}
// substituteValues substitutes variables known to the bootstrap like pod_ip.
// "http.{pod_ip}_" with pod_id = [10.3.3.3,10.4.4.4] --> [http.10.3.3.3_,http.10.4.4.4_]
func substituteValues(patterns []string, varName string, values []string) []string {
ret := make([]string, 0, len(patterns))
for _, pattern := range patterns {
if !strings.Contains(pattern, varName) {
ret = append(ret, pattern)
continue
}
for _, val := range values {
ret = append(ret, strings.Replace(pattern, varName, val, -1))
}
}
return ret
}
func getStatsOptions(meta *model.BootstrapNodeMetadata) []option.Instance {
nodeIPs := meta.InstanceIPs
config := meta.ProxyConfig
tagAnno := meta.Annotations[annotation.SidecarExtraStatTags.Name]
prefixAnno := meta.Annotations[annotation.SidecarStatsInclusionPrefixes.Name]
RegexAnno := meta.Annotations[annotation.SidecarStatsInclusionRegexps.Name]
suffixAnno := meta.Annotations[annotation.SidecarStatsInclusionSuffixes.Name]
parseOption := func(metaOption string, required string, proxyConfigOption []string) []string {
var inclusionOption []string
if len(metaOption) > 0 {
inclusionOption = strings.Split(metaOption, ",")
} else if proxyConfigOption != nil {
// In case user relies on mixed usage of annotation and proxy config,
// only consider proxy config if annotation is not set instead of merging.
inclusionOption = proxyConfigOption
}
if len(required) > 0 {
inclusionOption = append(inclusionOption, strings.Split(required, ",")...)
}
// At the sidecar we can limit downstream metrics collection to the inbound listener.
// Inbound downstream metrics are named as: http.{pod_ip}_{port}.downstream_rq_*
// Other outbound downstream metrics are numerous and not very interesting for a sidecar.
// specifying http.{pod_ip}_ as a prefix will capture these downstream metrics.
return substituteValues(inclusionOption, "{pod_ip}", nodeIPs)
}
extraStatTags := make([]string, 0, len(config.ExtraStatTags))
for _, tag := range config.ExtraStatTags {
if tag != "" {
extraStatTags = append(extraStatTags, tag)
}
}
for _, tag := range strings.Split(tagAnno, ",") {
if tag != "" {
extraStatTags = append(extraStatTags, tag)
}
}
extraStatTags = removeDuplicates(extraStatTags)
var proxyConfigPrefixes, proxyConfigSuffixes, proxyConfigRegexps []string
if config.ProxyStatsMatcher != nil {
proxyConfigPrefixes = config.ProxyStatsMatcher.InclusionPrefixes
proxyConfigSuffixes = config.ProxyStatsMatcher.InclusionSuffixes
proxyConfigRegexps = config.ProxyStatsMatcher.InclusionRegexps
}
inclusionSuffixes := rbacEnvoyStatsMatcherInclusionSuffix
if meta.ExitOnZeroActiveConnections {
inclusionSuffixes = requiredEnvoyStatsMatcherInclusionSuffixes
}
var buckets []option.HistogramBucket
if bucketsAnno, ok := meta.Annotations[annotation.SidecarStatsHistogramBuckets.Name]; ok {
js := map[string][]float64{}
err := json.Unmarshal([]byte(bucketsAnno), &js)
if err == nil {
for prefix, value := range js {
buckets = append(buckets, option.HistogramBucket{Match: option.HistogramMatch{Prefix: prefix}, Buckets: value})
}
sort.Slice(buckets, func(i, j int) bool {
return buckets[i].Match.Prefix < buckets[j].Match.Prefix
})
} else {
log.Warnf("Failed to unmarshal histogram buckets: %v", bucketsAnno, err)
}
}
var compression string
// TODO: move annotation to api repo
if statsCompression, ok := meta.Annotations["sidecar.istio.io/statsCompression"]; ok && envoyWellKnownCompressorLibrary.Contains(statsCompression) {
compression = statsCompression
}
return []option.Instance{
option.EnvoyStatsMatcherInclusionPrefix(parseOption(prefixAnno,
requiredEnvoyStatsMatcherInclusionPrefixes, proxyConfigPrefixes)),
option.EnvoyStatsMatcherInclusionSuffix(parseOption(suffixAnno,
inclusionSuffixes, proxyConfigSuffixes)),
option.EnvoyStatsMatcherInclusionRegexp(parseOption(RegexAnno, requiredEnvoyStatsMatcherInclusionRegexes, proxyConfigRegexps)),
option.EnvoyExtraStatTags(extraStatTags),
option.EnvoyHistogramBuckets(buckets),
option.EnvoyStatsCompression(compression),
}
}
func lightstepAccessTokenFile(config string) string {
return path.Join(config, lightstepAccessTokenBase)
}
func getNodeMetadataOptions(node *model.Node) []option.Instance {
// Add locality options.
opts := getLocalityOptions(node.Locality)
opts = append(opts, getStatsOptions(node.Metadata)...)
opts = append(opts,
option.NodeMetadata(node.Metadata, node.RawMetadata),
option.RuntimeFlags(extractRuntimeFlags(node.Metadata.ProxyConfig)),
option.EnvoyStatusPort(node.Metadata.EnvoyStatusPort),
option.EnvoyPrometheusPort(node.Metadata.EnvoyPrometheusPort))
return opts
}
var StripFragment = env.Register("HTTP_STRIP_FRAGMENT_FROM_PATH_UNSAFE_IF_DISABLED", true, "").Get()
func extractRuntimeFlags(cfg *model.NodeMetaProxyConfig) map[string]any {
// Setup defaults
runtimeFlags := map[string]any{
"overload.global_downstream_max_connections": "2147483647",
"re2.max_program_size.error_level": "32768",
"envoy.deprecated_features:envoy.config.listener.v3.Listener.hidden_envoy_deprecated_use_original_dst": true,
"envoy.reloadable_features.http_reject_path_with_fragment": false,
"envoy.restart_features.use_eds_cache_for_ads": true,
}
if !StripFragment {
// Note: the condition here is basically backwards. This was a mistake in the initial commit and cannot be reverted
runtimeFlags["envoy.reloadable_features.http_strip_fragment_from_path_unsafe_if_disabled"] = "false"
}
for k, v := range cfg.RuntimeValues {
if v == "" {
// Envoy runtime doesn't see "" as a special value, so we use it to mean 'unset default flag'
delete(runtimeFlags, k)
continue
}
// Envoy used to allow everything as string but stopped in https://github.com/envoyproxy/envoy/issues/27434
// However, our API always takes in strings.
// Convert strings to bools for backwards compat.
switch v {
case "false":
runtimeFlags[k] = false
case "true":
runtimeFlags[k] = true
default:
runtimeFlags[k] = v
}
}
return runtimeFlags
}
func getLocalityOptions(l *core.Locality) []option.Instance {
return []option.Instance{option.Region(l.Region), option.Zone(l.Zone), option.SubZone(l.SubZone)}
}
func getServiceCluster(metadata *model.BootstrapNodeMetadata) string {
switch name := metadata.ProxyConfig.ClusterName.(type) {
case *meshAPI.ProxyConfig_ServiceCluster:
return serviceClusterOrDefault(name.ServiceCluster, metadata)
case *meshAPI.ProxyConfig_TracingServiceName_:
workloadName := metadata.WorkloadName
if workloadName == "" {
workloadName = "istio-proxy"
}
switch name.TracingServiceName {
case meshAPI.ProxyConfig_APP_LABEL_AND_NAMESPACE:
return serviceClusterOrDefault("istio-proxy", metadata)
case meshAPI.ProxyConfig_CANONICAL_NAME_ONLY:
cs, _ := labels.CanonicalService(metadata.Labels, workloadName)
return serviceClusterOrDefault(cs, metadata)
case meshAPI.ProxyConfig_CANONICAL_NAME_AND_NAMESPACE:
cs, _ := labels.CanonicalService(metadata.Labels, workloadName)
if metadata.Namespace != "" {
return cs + "." + metadata.Namespace
}
return serviceClusterOrDefault(cs, metadata)
default:
return serviceClusterOrDefault("istio-proxy", metadata)
}
default:
return serviceClusterOrDefault("istio-proxy", metadata)
}
}
func serviceClusterOrDefault(name string, metadata *model.BootstrapNodeMetadata) string {
if name != "" && name != "istio-proxy" {
return name
}
if app, ok := metadata.Labels["app"]; ok {
return app + "." + metadata.Namespace
}
if metadata.WorkloadName != "" {
return metadata.WorkloadName + "." + metadata.Namespace
}
if metadata.Namespace != "" {
return "istio-proxy." + metadata.Namespace
}
return "istio-proxy"
}
func getProxyConfigOptions(metadata *model.BootstrapNodeMetadata) ([]option.Instance, error) {
config := metadata.ProxyConfig
// Add a few misc options.
opts := make([]option.Instance, 0)
opts = append(opts, option.ProxyConfig(config),
option.Cluster(getServiceCluster(metadata)),
option.PilotGRPCAddress(config.DiscoveryAddress),
option.DiscoveryAddress(config.DiscoveryAddress),
option.StatsdAddress(config.StatsdUdpAddress),
option.XDSRootCert(metadata.XDSRootCert))
// Add tracing options.
if config.Tracing != nil {
isH2 := false
switch tracer := config.Tracing.Tracer.(type) {
case *meshAPI.Tracing_Zipkin_:
opts = append(opts, option.ZipkinAddress(tracer.Zipkin.Address))
case *meshAPI.Tracing_Lightstep_:
isH2 = true
// Write the token file.
lightstepAccessTokenPath := lightstepAccessTokenFile(config.ConfigPath)
//nolint: staticcheck // Lightstep deprecated
err := os.WriteFile(lightstepAccessTokenPath, []byte(tracer.Lightstep.AccessToken), 0o666)
if err != nil {
return nil, err
}
opts = append(opts, option.LightstepAddress(tracer.Lightstep.Address),
option.LightstepToken(lightstepAccessTokenPath))
case *meshAPI.Tracing_Datadog_:
opts = append(opts, option.DataDogAddress(tracer.Datadog.Address))
case *meshAPI.Tracing_Stackdriver_:
projectID, projFound := metadata.PlatformMetadata[platform.GCPProject]
if !projFound {
return nil, errors.New("unable to process Stackdriver tracer: missing GCP Project")
}
opts = append(opts, option.StackDriverEnabled(true),
option.StackDriverProjectID(projectID),
option.StackDriverDebug(tracer.Stackdriver.Debug),
option.StackDriverMaxAnnotations(getInt64ValueOrDefault(tracer.Stackdriver.MaxNumberOfAnnotations, 200)),
option.StackDriverMaxAttributes(getInt64ValueOrDefault(tracer.Stackdriver.MaxNumberOfAttributes, 200)),
option.StackDriverMaxEvents(getInt64ValueOrDefault(tracer.Stackdriver.MaxNumberOfMessageEvents, 200)))
case *meshAPI.Tracing_OpenCensusAgent_:
c := tracer.OpenCensusAgent.Context
opts = append(opts, option.OpenCensusAgentAddress(tracer.OpenCensusAgent.Address),
option.OpenCensusAgentContexts(c))
}
opts = append(opts, option.TracingTLS(config.Tracing.TlsSettings, metadata, isH2))
}
// Add options for Envoy metrics.
if config.EnvoyMetricsService != nil && config.EnvoyMetricsService.Address != "" {
opts = append(opts, option.EnvoyMetricsServiceAddress(config.EnvoyMetricsService.Address),
option.EnvoyMetricsServiceTLS(config.EnvoyMetricsService.TlsSettings, metadata),
option.EnvoyMetricsServiceTCPKeepalive(config.EnvoyMetricsService.TcpKeepalive))
} else if config.EnvoyMetricsServiceAddress != "" { // nolint: staticcheck
opts = append(opts, option.EnvoyMetricsServiceAddress(config.EnvoyMetricsService.Address))
}
// Add options for Envoy access log.
if config.EnvoyAccessLogService != nil && config.EnvoyAccessLogService.Address != "" {
opts = append(opts, option.EnvoyAccessLogServiceAddress(config.EnvoyAccessLogService.Address),
option.EnvoyAccessLogServiceTLS(config.EnvoyAccessLogService.TlsSettings, metadata),
option.EnvoyAccessLogServiceTCPKeepalive(config.EnvoyAccessLogService.TcpKeepalive))
}
return opts, nil
}
func getInt64ValueOrDefault(src *wrapperspb.Int64Value, defaultVal int64) int64 {
val := defaultVal
if src != nil {
val = src.Value
}
return val
}
type setMetaFunc func(m map[string]any, key string, val string)
func extractMetadata(envs []string, prefix string, set setMetaFunc, meta map[string]any) {
metaPrefixLen := len(prefix)
for _, e := range envs {
if !shouldExtract(e, prefix) {
continue
}
v := e[metaPrefixLen:]
if !isEnvVar(v) {
continue
}
metaKey, metaVal := parseEnvVar(v)
set(meta, metaKey, metaVal)
}
}
func shouldExtract(envVar, prefix string) bool {
return strings.HasPrefix(envVar, prefix)
}
func isEnvVar(str string) bool {
return strings.Contains(str, "=")
}
func parseEnvVar(varStr string) (string, string) {
parts := strings.SplitN(varStr, "=", 2)
if len(parts) != 2 {
return varStr, ""
}
return parts[0], parts[1]
}
func jsonStringToMap(jsonStr string) (m map[string]string) {
err := json.Unmarshal([]byte(jsonStr), &m)
if err != nil {
log.Warnf("Env variable with value %q failed json unmarshal: %v", jsonStr, err)
}
return
}
func extractAttributesMetadata(envVars []string, plat platform.Environment, meta *model.BootstrapNodeMetadata) {
for _, varStr := range envVars {
name, val := parseEnvVar(varStr)
switch name {
case "ISTIO_METAJSON_LABELS":
m := jsonStringToMap(val)
if len(m) > 0 {
meta.Labels = m
meta.StaticLabels = m
}
case "POD_NAME":
meta.InstanceName = val
case "POD_NAMESPACE":
meta.Namespace = val
case "SERVICE_ACCOUNT":
meta.ServiceAccount = val
}
}
if plat != nil && len(plat.Metadata()) > 0 {
meta.PlatformMetadata = plat.Metadata()
}
}
// MetadataOptions for constructing node metadata.
type MetadataOptions struct {
Envs []string
Platform platform.Environment
InstanceIPs []string
StsPort int
ID string
ProxyConfig *meshAPI.ProxyConfig
PilotSubjectAltName []string
CredentialSocketExists bool
XDSRootCert string
OutlierLogPath string
annotationFilePath string
EnvoyStatusPort int
EnvoyPrometheusPort int
ExitOnZeroActiveConnections bool
MetadataDiscovery bool
}
const (
// DefaultDeploymentUniqueLabelKey is the default key of the selector that is added
// to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets
// to select new pods (and old pods being select by new ReplicaSet).
DefaultDeploymentUniqueLabelKey string = "pod-template-hash"
)
// GetNodeMetaData function uses an environment variable contract
// ISTIO_METAJSON_* env variables contain json_string in the value.
// The name of variable is ignored.
// ISTIO_META_* env variables are passed through
func GetNodeMetaData(options MetadataOptions) (*model.Node, error) {
meta := &model.BootstrapNodeMetadata{}
untypedMeta := map[string]any{}
for k, v := range options.ProxyConfig.GetProxyMetadata() {
if strings.HasPrefix(k, IstioMetaPrefix) {
untypedMeta[strings.TrimPrefix(k, IstioMetaPrefix)] = v
}
}
extractMetadata(options.Envs, IstioMetaPrefix, func(m map[string]any, key string, val string) {
m[key] = val
}, untypedMeta)
extractMetadata(options.Envs, IstioMetaJSONPrefix, func(m map[string]any, key string, val string) {
err := json.Unmarshal([]byte(val), &m)
if err != nil {
log.Warnf("Env variable %s [%s] failed json unmarshal: %v", key, val, err)
}
}, untypedMeta)
j, err := json.Marshal(untypedMeta)
if err != nil {
return nil, err
}
if err := json.Unmarshal(j, meta); err != nil {
return nil, err
}
meta = SetIstioVersion(meta)
// Support multiple network interfaces, removing duplicates.
meta.InstanceIPs = removeDuplicates(options.InstanceIPs)
// Add STS port into node metadata if it is not 0. This is read by envoy telemetry filters
if options.StsPort != 0 {
meta.StsPort = strconv.Itoa(options.StsPort)
}
meta.EnvoyStatusPort = options.EnvoyStatusPort
meta.EnvoyPrometheusPort = options.EnvoyPrometheusPort
meta.ExitOnZeroActiveConnections = model.StringBool(options.ExitOnZeroActiveConnections)
meta.MetadataDiscovery = model.StringBool(options.MetadataDiscovery)
meta.ProxyConfig = (*model.NodeMetaProxyConfig)(options.ProxyConfig)
extractAttributesMetadata(options.Envs, options.Platform, meta)
// Add all instance labels with lower precedence than pod labels
extractInstanceLabels(options.Platform, meta)
// Add all pod labels found from filesystem
// These are typically volume mounted by the downward API
lbls, err := readPodLabels()
if err == nil {
meta.Labels = map[string]string{}
for k, v := range meta.StaticLabels {
meta.Labels[k] = v
}
for k, v := range lbls {
// ignore `pod-template-hash` label
if k == DefaultDeploymentUniqueLabelKey {
continue
}
meta.Labels[k] = v
}
} else {
if os.IsNotExist(err) {
log.Debugf("failed to read pod labels: %v", err)
} else {
log.Warnf("failed to read pod labels: %v", err)
}
}
// Add all pod annotations found from filesystem
// These are typically volume mounted by the downward API
annos, err := ReadPodAnnotations(options.annotationFilePath)
if err == nil {
if meta.Annotations == nil {
meta.Annotations = map[string]string{}
}
for k, v := range annos {
meta.Annotations[k] = v
}
} else {
if os.IsNotExist(err) {
log.Debugf("failed to read pod annotations: %v", err)
} else {
log.Warnf("failed to read pod annotations: %v", err)
}
}
var l *core.Locality
if meta.Labels[model.LocalityLabel] == "" && options.Platform != nil {
// The locality string was not set, try to get locality from platform
l = options.Platform.Locality()
} else {
// replace "." with "/"
localityString := model.GetLocalityLabel(meta.Labels[model.LocalityLabel])
if localityString != "" {
// override the label with the sanitized value
meta.Labels[model.LocalityLabel] = localityString
}
l = util.ConvertLocality(localityString)
}
meta.PilotSubjectAltName = options.PilotSubjectAltName
meta.XDSRootCert = options.XDSRootCert
meta.OutlierLogPath = options.OutlierLogPath
if options.CredentialSocketExists {
untypedMeta[security.CredentialMetaDataName] = "true"
}
return &model.Node{
ID: options.ID,
Metadata: meta,
RawMetadata: untypedMeta,
Locality: l,
}, nil
}
func SetIstioVersion(meta *model.BootstrapNodeMetadata) *model.BootstrapNodeMetadata {
if meta.IstioVersion == "" {
meta.IstioVersion = version.Info.Version
}
return meta
}
// ConvertNodeToXDSNode creates an Envoy node descriptor from Istio node descriptor.
func ConvertNodeToXDSNode(node *model.Node) *core.Node {
// First pass translates typed metadata
js, err := json.Marshal(node.Metadata)
if err != nil {
log.Warnf("Failed to marshal node metadata to JSON %#v: %v", node.Metadata, err)
}
pbst := &structpb.Struct{}
if err = protomarshal.Unmarshal(js, pbst); err != nil {
log.Warnf("Failed to unmarshal node metadata from JSON %#v: %v", node.Metadata, err)
}
// Second pass translates untyped metadata for "unknown" fields
for k, v := range node.RawMetadata {
if _, f := pbst.Fields[k]; !f {
fjs, err := json.Marshal(v)
if err != nil {
log.Warnf("Failed to marshal field metadata to JSON %#v: %v", k, err)
}
pbv := &structpb.Value{}
if err = protomarshal.Unmarshal(fjs, pbv); err != nil {
log.Warnf("Failed to unmarshal field metadata from JSON %#v: %v", k, err)
}
pbst.Fields[k] = pbv
}
}
return &core.Node{
Id: node.ID,
Cluster: getServiceCluster(node.Metadata),
Locality: node.Locality,
Metadata: pbst,
}
}
// ConvertXDSNodeToNode parses Istio node descriptor from an Envoy node descriptor, using only typed metadata.
func ConvertXDSNodeToNode(node *core.Node) *model.Node {
b, err := protomarshal.MarshalProtoNames(node.Metadata)
if err != nil {
log.Warnf("Failed to marshal node metadata to JSON %q: %v", node.Metadata, err)
}
metadata := &model.BootstrapNodeMetadata{}
err = json.Unmarshal(b, metadata)
if err != nil {
log.Warnf("Failed to unmarshal node metadata from JSON %q: %v", node.Metadata, err)
}
if metadata.ProxyConfig == nil {
metadata.ProxyConfig = &model.NodeMetaProxyConfig{}
metadata.ProxyConfig.ClusterName = &meshAPI.ProxyConfig_ServiceCluster{ServiceCluster: node.Cluster}
}
return &model.Node{
ID: node.Id,
Locality: node.Locality,
Metadata: metadata,
}
}
// Extracts instance labels for the platform into model.NodeMetadata.Labels
// only if not running on Kubernetes
func extractInstanceLabels(plat platform.Environment, meta *model.BootstrapNodeMetadata) {
if plat == nil || meta == nil || plat.IsKubernetes() {
return
}
instanceLabels := plat.Labels()
if meta.StaticLabels == nil {
meta.StaticLabels = map[string]string{}
}
for k, v := range instanceLabels {
meta.StaticLabels[k] = v
}
}
func readPodLabels() (map[string]string, error) {
b, err := os.ReadFile(constants.PodInfoLabelsPath)
if err != nil {
return nil, err
}
return ParseDownwardAPI(string(b))
}
func ReadPodAnnotations(path string) (map[string]string, error) {
if path == "" {
path = constants.PodInfoAnnotationsPath
}
b, err := os.ReadFile(path)
if err != nil {
return nil, err
}
return ParseDownwardAPI(string(b))
}
// ParseDownwardAPI parses fields which are stored as format `%s=%q` back to a map
func ParseDownwardAPI(i string) (map[string]string, error) {
res := map[string]string{}
for _, line := range strings.Split(i, "\n") {
sl := strings.SplitN(line, "=", 2)
if len(sl) != 2 {
continue
}
key := sl[0]
// Strip the leading/trailing quotes
val, err := strconv.Unquote(sl[1])
if err != nil {
return nil, fmt.Errorf("failed to unquote %v: %v", sl[1], err)
}
res[key] = val
}
return res, nil
}
func removeDuplicates(values []string) []string {
set := sets.New[string]()
newValues := make([]string, 0, len(values))
for _, v := range values {
if !set.InsertContains(v) {
newValues = append(newValues, v)
}
}
return newValues
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bootstrap
import (
"encoding/json"
"fmt"
"io"
"os"
"path"
"strings"
"text/template"
"github.com/Masterminds/sprig/v3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/log"
)
const (
// EnvoyFileTemplate is a template for the root config JSON
EnvoyFileTemplate = "envoy-rev.%s"
DefaultCfgDir = "./var/lib/istio/envoy/envoy_bootstrap_tmpl.json"
)
// TODO(nmittler): Move this to application code. This shouldn't be declared in a library.
var overrideVar = env.Register("ISTIO_BOOTSTRAP", "", "")
// Instance of a configured Envoy bootstrap writer.
type Instance interface {
// WriteTo writes the content of the Envoy bootstrap to the given writer.
WriteTo(templateFile string, w io.Writer) error
// CreateFile generates an Envoy bootstrap file.
CreateFile() (string, error)
}
// New creates a new Instance of an Envoy bootstrap writer.
func New(cfg Config) Instance {
return &instance{
Config: cfg,
}
}
type instance struct {
Config
}
func (i *instance) WriteTo(templateFile string, w io.Writer) error {
// Get the input bootstrap template.
t, err := newTemplate(templateFile)
if err != nil {
return err
}
// Create the parameters for the template.
templateParams, err := i.toTemplateParams()
if err != nil {
return err
}
// Execute the template.
return t.Execute(w, templateParams)
}
func toJSON(i any) string {
if i == nil {
return "{}"
}
ba, err := json.Marshal(i)
if err != nil {
log.Warnf("Unable to marshal %v: %v", i, err)
return "{}"
}
return string(ba)
}
// GetEffectiveTemplatePath gets the template file that should be used for bootstrap
func GetEffectiveTemplatePath(pc *model.NodeMetaProxyConfig) string {
var templateFilePath string
switch {
case pc.CustomConfigFile != "":
templateFilePath = pc.CustomConfigFile
case pc.ProxyBootstrapTemplatePath != "":
templateFilePath = pc.ProxyBootstrapTemplatePath
default:
templateFilePath = DefaultCfgDir
}
override := overrideVar.Get()
if len(override) > 0 {
templateFilePath = override
}
return templateFilePath
}
func (i *instance) CreateFile() (string, error) {
// Create the output file.
if err := os.MkdirAll(i.Metadata.ProxyConfig.ConfigPath, 0o700); err != nil {
return "", err
}
templateFile := GetEffectiveTemplatePath(i.Metadata.ProxyConfig)
outputFilePath := configFile(i.Metadata.ProxyConfig.ConfigPath, templateFile)
outputFile, err := os.Create(outputFilePath)
if err != nil {
return "", err
}
defer func() { _ = outputFile.Close() }()
// Write the content of the file.
if err := i.WriteTo(templateFile, outputFile); err != nil {
return "", err
}
return outputFilePath, err
}
func configFile(config string, templateFile string) string {
suffix := "json"
// Envoy will interpret the file extension to determine the type. We should detect yaml inputs
if strings.HasSuffix(templateFile, ".yaml.tmpl") || strings.HasSuffix(templateFile, ".yaml") {
suffix = "yaml"
}
return path.Join(config, fmt.Sprintf(EnvoyFileTemplate, suffix))
}
func newTemplate(templateFilePath string) (*template.Template, error) {
cfgTmpl, err := os.ReadFile(templateFilePath)
if err != nil {
return nil, err
}
funcMap := template.FuncMap{
"toJSON": toJSON,
}
return template.New("bootstrap").Funcs(funcMap).Funcs(sprig.GenericFuncMap()).Parse(string(cfgTmpl))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package option
import (
"encoding/json"
"fmt"
"net"
"os"
"strings"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
auth "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
"github.com/envoyproxy/go-control-plane/pkg/conversion"
"google.golang.org/protobuf/types/known/durationpb"
pstruct "google.golang.org/protobuf/types/known/structpb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
meshAPI "istio.io/api/mesh/v1alpha1"
networkingAPI "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/util"
authn_model "istio.io/istio/pilot/pkg/security/model"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/wellknown"
)
// TransportSocket wraps UpstreamTLSContext
type TransportSocket struct {
Name string `json:"name,omitempty"`
TypedConfig *pstruct.Struct `json:"typed_config,omitempty"`
}
func keepaliveConverter(value *networkingAPI.ConnectionPoolSettings_TCPSettings_TcpKeepalive) convertFunc {
return func(*instance) (any, error) {
upstreamConnectionOptions := &cluster.UpstreamConnectionOptions{
TcpKeepalive: &core.TcpKeepalive{},
}
if value.Probes > 0 {
upstreamConnectionOptions.TcpKeepalive.KeepaliveProbes = &wrappers.UInt32Value{Value: value.Probes}
}
if value.Time != nil && value.Time.Seconds > 0 {
upstreamConnectionOptions.TcpKeepalive.KeepaliveTime = &wrappers.UInt32Value{Value: uint32(value.Time.Seconds)}
}
if value.Interval != nil && value.Interval.Seconds > 0 {
upstreamConnectionOptions.TcpKeepalive.KeepaliveInterval = &wrappers.UInt32Value{Value: uint32(value.Interval.Seconds)}
}
return convertToJSON(upstreamConnectionOptions), nil
}
}
func transportSocketConverter(tls *networkingAPI.ClientTLSSettings, sniName string, metadata *model.BootstrapNodeMetadata, isH2 bool) convertFunc {
return func(*instance) (any, error) {
tlsContext := tlsContextConvert(tls, sniName, metadata)
if tlsContext == nil {
return "", nil
}
if !isH2 {
tlsContext.CommonTlsContext.AlpnProtocols = nil
}
// This double conversion is to encode the typed config and get it out as struct
// so that convertToJSON properly encodes the structure. Since this is just for
// bootstrap generation this is better than having our custom structs.
tlsContextStruct, _ := conversion.MessageToStruct(protoconv.MessageToAny(tlsContext))
transportSocket := &TransportSocket{
Name: wellknown.TransportSocketTLS,
TypedConfig: tlsContextStruct,
}
return convertToJSON(transportSocket), nil
}
}
// TODO(ramaraochavali): Unify this code with cluster upstream TLS settings logic.
func tlsContextConvert(tls *networkingAPI.ClientTLSSettings, sniName string, metadata *model.BootstrapNodeMetadata) *auth.UpstreamTlsContext {
tlsContext := &auth.UpstreamTlsContext{
CommonTlsContext: &auth.CommonTlsContext{},
}
switch tls.Mode {
case networkingAPI.ClientTLSSettings_SIMPLE:
res := security.SdsCertificateConfig{
CaCertificatePath: model.GetOrDefault(metadata.TLSClientRootCert, tls.CaCertificates),
}
tlsContext.CommonTlsContext.ValidationContextType = &auth.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: &auth.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)},
ValidationContextSdsSecretConfig: authn_model.ConstructSdsSecretConfig(res.GetRootResourceName()),
},
}
tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNH2Only
tlsContext.Sni = tls.Sni
case networkingAPI.ClientTLSSettings_MUTUAL:
res := security.SdsCertificateConfig{
CertificatePath: model.GetOrDefault(metadata.TLSClientCertChain, tls.ClientCertificate),
PrivateKeyPath: model.GetOrDefault(metadata.TLSClientKey, tls.PrivateKey),
CaCertificatePath: model.GetOrDefault(metadata.TLSClientRootCert, tls.CaCertificates),
}
if len(res.GetResourceName()) > 0 {
tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs = append(tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs,
authn_model.ConstructSdsSecretConfig(res.GetResourceName()))
}
tlsContext.CommonTlsContext.ValidationContextType = &auth.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: &auth.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)},
ValidationContextSdsSecretConfig: authn_model.ConstructSdsSecretConfig(res.GetRootResourceName()),
},
}
tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNH2Only
tlsContext.Sni = tls.Sni
case networkingAPI.ClientTLSSettings_ISTIO_MUTUAL:
tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs = append(tlsContext.CommonTlsContext.TlsCertificateSdsSecretConfigs,
authn_model.ConstructSdsSecretConfig(authn_model.SDSDefaultResourceName))
tlsContext.CommonTlsContext.ValidationContextType = &auth.CommonTlsContext_CombinedValidationContext{
CombinedValidationContext: &auth.CommonTlsContext_CombinedCertificateValidationContext{
DefaultValidationContext: &auth.CertificateValidationContext{MatchSubjectAltNames: util.StringToExactMatch(tls.SubjectAltNames)},
ValidationContextSdsSecretConfig: authn_model.ConstructSdsSecretConfig(authn_model.SDSRootResourceName),
},
}
tlsContext.CommonTlsContext.AlpnProtocols = util.ALPNInMeshH2
tlsContext.Sni = tls.Sni
// For ISTIO_MUTUAL if custom SNI is not provided, use the default SNI name.
if len(tls.Sni) == 0 {
tlsContext.Sni = sniName
}
default:
// No TLS.
return nil
}
return tlsContext
}
func nodeMetadataConverter(metadata *model.BootstrapNodeMetadata, rawMeta map[string]any) convertFunc {
return func(*instance) (any, error) {
marshalString, err := marshalMetadata(metadata, rawMeta)
if err != nil {
return "", err
}
return marshalString, nil
}
}
func sanConverter(sans []string) convertFunc {
return func(*instance) (any, error) {
matchers := []string{}
for _, s := range sans {
matchers = append(matchers, fmt.Sprintf(`{"exact":"%s"}`, s))
}
return "[" + strings.Join(matchers, ",") + "]", nil
}
}
func addressConverter(addr string) convertFunc {
return func(o *instance) (any, error) {
host, port, err := net.SplitHostPort(addr)
if err != nil {
return nil, fmt.Errorf("unable to parse %s address %q: %v", o.name, addr, err)
}
if host == "$(HOST_IP)" {
// Replace host with HOST_IP env var if it is "$(HOST_IP)".
// This is to support some tracer setting (Datadog, Zipkin), where "$(HOST_IP)"" is used for address.
// Tracer address used to be specified within proxy container params, and thus could be interpreted with pod HOST_IP env var.
// Now tracer config is passed in with mesh config volume at gateway, k8s env var interpretation does not work.
// This is to achieve the same interpretation as k8s.
hostIPEnv := os.Getenv("HOST_IP")
if hostIPEnv != "" {
host = hostIPEnv
}
}
return fmt.Sprintf("{\"address\": \"%s\", \"port_value\": %s}", host, port), nil
}
}
func jsonConverter(d any) convertFunc {
return func(o *instance) (any, error) {
b, err := json.Marshal(d)
return string(b), err
}
}
func durationConverter(value *durationpb.Duration) convertFunc {
return func(*instance) (any, error) {
return value.AsDuration().String(), nil
}
}
// openCensusAgentContextConverter returns a converter that returns the list of
// distributed trace contexts to propagate with envoy.
func openCensusAgentContextConverter(contexts []meshAPI.Tracing_OpenCensusAgent_TraceContext) convertFunc {
allContexts := `["TRACE_CONTEXT","GRPC_TRACE_BIN","CLOUD_TRACE_CONTEXT","B3"]`
return func(*instance) (any, error) {
if len(contexts) == 0 {
return allContexts, nil
}
var envoyContexts []string
for _, c := range contexts {
switch c {
// Ignore UNSPECIFIED
case meshAPI.Tracing_OpenCensusAgent_W3C_TRACE_CONTEXT:
envoyContexts = append(envoyContexts, "TRACE_CONTEXT")
case meshAPI.Tracing_OpenCensusAgent_GRPC_BIN:
envoyContexts = append(envoyContexts, "GRPC_TRACE_BIN")
case meshAPI.Tracing_OpenCensusAgent_CLOUD_TRACE_CONTEXT:
envoyContexts = append(envoyContexts, "CLOUD_TRACE_CONTEXT")
case meshAPI.Tracing_OpenCensusAgent_B3:
envoyContexts = append(envoyContexts, "B3")
}
}
return convertToJSON(envoyContexts), nil
}
}
func convertToJSON(v any) string {
if v == nil {
return ""
}
b, err := json.Marshal(v)
if err != nil {
log.Error(err.Error())
return ""
}
return string(b)
}
// marshalMetadata combines type metadata and untyped metadata and marshals to json
// This allows passing arbitrary metadata to Envoy, while still supported typed metadata for known types
func marshalMetadata(metadata *model.BootstrapNodeMetadata, rawMeta map[string]any) (string, error) {
b, err := json.Marshal(metadata)
if err != nil {
return "", err
}
var output map[string]any
if err := json.Unmarshal(b, &output); err != nil {
return "", err
}
// Add all untyped metadata
for k, v := range rawMeta {
// Do not override fields, as we may have made modifications to the type metadata
// This means we will only add "unknown" fields here
if _, f := output[k]; !f {
output[k] = v
}
}
res, err := json.Marshal(output)
if err != nil {
return "", err
}
return string(res), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package option
import (
"reflect"
"google.golang.org/protobuf/types/known/durationpb"
networkingAPI "istio.io/api/networking/v1alpha3"
)
// NewTemplateParams creates a new golang template parameter map from the given list of options.
func NewTemplateParams(is ...Instance) (map[string]any, error) {
params := make(map[string]any)
for _, i := range is {
if err := i.apply(params); err != nil {
return nil, err
}
}
return params, nil
}
// Name unique name for an option.
type Name string
func (n Name) String() string {
return string(n)
}
// Instance of a bootstrap option.
type Instance interface {
Name() Name
// apply this option to the given template parameter map.
apply(map[string]any) error
}
var _ Instance = &instance{}
type (
convertFunc func(*instance) (any, error)
applyFunc func(map[string]any, *instance) error
)
type instance struct {
name Name
convertFn convertFunc
applyFn applyFunc
}
func (i *instance) Name() Name {
return i.name
}
func (i *instance) withConvert(fn convertFunc) *instance {
out := *i
out.convertFn = fn
return &out
}
func (i *instance) apply(params map[string]any) error {
return i.applyFn(params, i)
}
func newOption(name Name, value any) *instance {
return &instance{
name: name,
convertFn: func(i *instance) (any, error) {
return value, nil
},
applyFn: func(params map[string]any, o *instance) error {
convertedValue, err := o.convertFn(o)
if err != nil {
return err
}
params[o.name.String()] = convertedValue
return nil
},
}
}
// skipOption creates a placeholder option that will not be applied to the output template map.
func skipOption(name Name) *instance {
return &instance{
name: name,
convertFn: func(*instance) (any, error) {
return nil, nil
},
applyFn: func(map[string]any, *instance) error {
// Don't apply the option.
return nil
},
}
}
func newStringArrayOptionOrSkipIfEmpty(name Name, value []string) *instance {
if len(value) == 0 {
return skipOption(name)
}
return newOption(name, value)
}
func newOptionOrSkipIfZero(name Name, value any) *instance {
v := reflect.ValueOf(value)
if v.IsZero() {
return skipOption(name)
}
return newOption(name, value)
}
func newDurationOption(name Name, value *durationpb.Duration) *instance {
return newOptionOrSkipIfZero(name, value).withConvert(durationConverter(value))
}
func newTCPKeepaliveOption(name Name, value *networkingAPI.ConnectionPoolSettings_TCPSettings_TcpKeepalive) *instance {
return newOptionOrSkipIfZero(name, value).withConvert(keepaliveConverter(value))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package option
import (
"strings"
"google.golang.org/protobuf/types/known/durationpb"
meshAPI "istio.io/api/mesh/v1alpha1"
networkingAPI "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/model"
)
type (
LocalhostValue string
WildcardValue string
DNSLookupFamilyValue string
)
const (
LocalhostIPv4 LocalhostValue = "127.0.0.1"
LocalhostIPv6 LocalhostValue = "::1"
WildcardIPv4 WildcardValue = "0.0.0.0"
WildcardIPv6 WildcardValue = "::"
DNSLookupFamilyIPv4 DNSLookupFamilyValue = "V4_ONLY"
DNSLookupFamilyIPv6 DNSLookupFamilyValue = "V6_ONLY"
DNSLookupFamilyIPS DNSLookupFamilyValue = "ALL"
)
func ProxyConfig(value *model.NodeMetaProxyConfig) Instance {
return newOption("config", value)
}
func PilotSubjectAltName(value []string) Instance {
return newOption("pilot_SAN", value).withConvert(sanConverter(value))
}
func ConnectTimeout(value *durationpb.Duration) Instance {
return newDurationOption("connect_timeout", value)
}
func Cluster(value string) Instance {
return newOption("cluster", value)
}
func NodeID(value string) Instance {
return newOption("nodeID", value)
}
func NodeType(value string) Instance {
ntype := strings.Split(value, "~")[0]
return newOption("nodeType", ntype)
}
func XdsType(value string) Instance {
return newOption("xds_type", value)
}
func Region(value string) Instance {
return newOptionOrSkipIfZero("region", value)
}
func Zone(value string) Instance {
return newOptionOrSkipIfZero("zone", value)
}
func SubZone(value string) Instance {
return newOptionOrSkipIfZero("sub_zone", value)
}
func NodeMetadata(meta *model.BootstrapNodeMetadata, rawMeta map[string]any) Instance {
return newOptionOrSkipIfZero("meta_json_str", meta).withConvert(nodeMetadataConverter(meta, rawMeta))
}
func RuntimeFlags(flags map[string]any) Instance {
return newOptionOrSkipIfZero("runtime_flags", flags).withConvert(jsonConverter(flags))
}
func DiscoveryAddress(value string) Instance {
return newOption("discovery_address", value)
}
func XDSRootCert(value string) Instance {
return newOption("xds_root_cert", value)
}
func Localhost(value LocalhostValue) Instance {
return newOption("localhost", value)
}
func Wildcard(value WildcardValue) Instance {
return newOption("wildcard", value)
}
func AdditionalWildCard(value WildcardValue) Instance {
return newOption("additional_wildcard", value)
}
func DualStack(value bool) Instance {
return newOption("dual_stack", value)
}
func DNSLookupFamily(value DNSLookupFamilyValue) Instance {
return newOption("dns_lookup_family", value)
}
func OutlierLogPath(value string) Instance {
return newOptionOrSkipIfZero("outlier_log_path", value)
}
func LightstepAddress(value string) Instance {
return newOptionOrSkipIfZero("lightstep", value).withConvert(addressConverter(value))
}
func LightstepToken(value string) Instance {
return newOption("lightstepToken", value)
}
func OpenCensusAgentAddress(value string) Instance {
return newOptionOrSkipIfZero("openCensusAgent", value)
}
func OpenCensusAgentContexts(value []meshAPI.Tracing_OpenCensusAgent_TraceContext) Instance {
return newOption("openCensusAgentContexts", value).
withConvert(openCensusAgentContextConverter(value))
}
func StackDriverEnabled(value bool) Instance {
return newOption("stackdriver", value)
}
func StackDriverProjectID(value string) Instance {
return newOption("stackdriverProjectID", value)
}
func StackDriverDebug(value bool) Instance {
return newOption("stackdriverDebug", value)
}
func StackDriverMaxAnnotations(value int64) Instance {
return newOption("stackdriverMaxAnnotations", value)
}
func StackDriverMaxAttributes(value int64) Instance {
return newOption("stackdriverMaxAttributes", value)
}
func StackDriverMaxEvents(value int64) Instance {
return newOption("stackdriverMaxEvents", value)
}
func PilotGRPCAddress(value string) Instance {
return newOptionOrSkipIfZero("pilot_grpc_address", value).withConvert(addressConverter(value))
}
func ZipkinAddress(value string) Instance {
return newOptionOrSkipIfZero("zipkin", value).withConvert(addressConverter(value))
}
func DataDogAddress(value string) Instance {
return newOptionOrSkipIfZero("datadog", value).withConvert(addressConverter(value))
}
func StatsdAddress(value string) Instance {
return newOptionOrSkipIfZero("statsd", value).withConvert(addressConverter(value))
}
func TracingTLS(value *networkingAPI.ClientTLSSettings, metadata *model.BootstrapNodeMetadata, isH2 bool) Instance {
return newOptionOrSkipIfZero("tracing_tls", value).
withConvert(transportSocketConverter(value, "tracer", metadata, isH2))
}
func EnvoyMetricsServiceAddress(value string) Instance {
return newOptionOrSkipIfZero("envoy_metrics_service_address", value).withConvert(addressConverter(value))
}
func EnvoyMetricsServiceTLS(value *networkingAPI.ClientTLSSettings, metadata *model.BootstrapNodeMetadata) Instance {
return newOptionOrSkipIfZero("envoy_metrics_service_tls", value).
withConvert(transportSocketConverter(value, "envoy_metrics_service", metadata, true))
}
func EnvoyMetricsServiceTCPKeepalive(value *networkingAPI.ConnectionPoolSettings_TCPSettings_TcpKeepalive) Instance {
return newTCPKeepaliveOption("envoy_metrics_service_tcp_keepalive", value)
}
func EnvoyAccessLogServiceAddress(value string) Instance {
return newOptionOrSkipIfZero("envoy_accesslog_service_address", value).withConvert(addressConverter(value))
}
func EnvoyAccessLogServiceTLS(value *networkingAPI.ClientTLSSettings, metadata *model.BootstrapNodeMetadata) Instance {
return newOptionOrSkipIfZero("envoy_accesslog_service_tls", value).
withConvert(transportSocketConverter(value, "envoy_accesslog_service", metadata, true))
}
func EnvoyAccessLogServiceTCPKeepalive(value *networkingAPI.ConnectionPoolSettings_TCPSettings_TcpKeepalive) Instance {
return newTCPKeepaliveOption("envoy_accesslog_service_tcp_keepalive", value)
}
func EnvoyExtraStatTags(value []string) Instance {
return newStringArrayOptionOrSkipIfEmpty("extraStatTags", value)
}
func EnvoyStatsMatcherInclusionPrefix(value []string) Instance {
return newStringArrayOptionOrSkipIfEmpty("inclusionPrefix", value)
}
func EnvoyStatsMatcherInclusionSuffix(value []string) Instance {
return newStringArrayOptionOrSkipIfEmpty("inclusionSuffix", value)
}
func EnvoyStatsMatcherInclusionRegexp(value []string) Instance {
return newStringArrayOptionOrSkipIfEmpty("inclusionRegexps", value)
}
func EnvoyStatusPort(value int) Instance {
return newOption("envoy_status_port", value)
}
func EnvoyPrometheusPort(value int) Instance {
return newOption("envoy_prometheus_port", value)
}
func STSPort(value int) Instance {
return newOption("sts_port", value)
}
func GCPProjectID(value string) Instance {
return newOption("gcp_project_id", value)
}
func GCPProjectNumber(value string) Instance {
return newOption("gcp_project_number", value)
}
func Metadata(meta *model.BootstrapNodeMetadata) Instance {
return newOption("metadata", meta)
}
func STSEnabled(value bool) Instance {
return newOption("sts", value)
}
func DiscoveryHost(value string) Instance {
return newOption("discovery_host", value)
}
func MetadataDiscovery(value bool) Instance {
return newOption("metadata_discovery", value)
}
func LoadStatsConfigJSONStr(node *model.Node) Instance {
// JSON string for configuring Load Reporting Service.
if json, ok := node.RawMetadata["LOAD_STATS_CONFIG_JSON"].(string); ok {
return newOption("load_stats_config_json_str", json)
}
return skipOption("load_stats_config_json_str")
}
type HistogramMatch struct {
Prefix string `json:"prefix"`
}
type HistogramBucket struct {
Match HistogramMatch `json:"match"`
Buckets []float64 `json:"buckets"`
}
func EnvoyHistogramBuckets(value []HistogramBucket) Instance {
return newOption("histogram_buckets", value)
}
func EnvoyStatsCompression(value string) Instance {
return newOption("stats_compression", value)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package platform
import (
"strings"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"istio.io/istio/pkg/http"
"istio.io/istio/pkg/log"
)
const (
AWSRegion = "aws_region"
AWSAvailabilityZone = "aws_availability_zone"
AWSInstanceID = "aws_instance_id"
)
var (
awsMetadataIPv4URL = "http://169.254.169.254/latest/meta-data"
awsMetadataIPv6URL = "http://[fd00:ec2::254]/latest/meta-data"
awsMetadataTokenIPv4URL = "http://169.254.169.254/latest/api/token"
awsMetadataTokenIPv6URL = "http://[fd00:ec2::254]/latest/api/token"
)
// Approach derived from the following:
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
// IsAWS returns whether the platform for bootstrapping is Amazon Web Services.
func IsAWS(ipv6 bool) bool {
headers := requestHeaders(ipv6)
info, err := getAWSInfo("iam/info", ipv6, headers)
return err == nil && strings.Contains(info, "arn:aws:iam")
}
type awsEnv struct {
region string
availabilityZone string
instanceID string
}
// NewAWS returns a platform environment customized for AWS.
// Metadata returned by the AWS Environment is taken link-local address running on each node.
func NewAWS(ipv6 bool) Environment {
headers := requestHeaders(ipv6)
return &awsEnv{
region: getRegion(ipv6, headers),
availabilityZone: getAvailabilityZone(ipv6, headers),
instanceID: getInstanceID(ipv6, headers),
}
}
func requestHeaders(ipv6 bool) map[string]string {
// try to get token first, if it fails, fallback to IMDSv1
token := getToken(ipv6)
if token == "" {
log.Debugf("token is empty, will fallback to IMDSv1")
}
headers := make(map[string]string, 1)
if token != "" {
headers["X-aws-ec2-metadata-token"] = token
}
return headers
}
func (a *awsEnv) Metadata() map[string]string {
md := map[string]string{}
if len(a.availabilityZone) > 0 {
md[AWSAvailabilityZone] = a.availabilityZone
}
if len(a.region) > 0 {
md[AWSRegion] = a.region
}
if len(a.instanceID) > 0 {
md[AWSInstanceID] = a.instanceID
}
return md
}
func (a *awsEnv) Locality() *core.Locality {
return &core.Locality{
Zone: a.availabilityZone,
Region: a.region,
}
}
func (a *awsEnv) Labels() map[string]string {
return map[string]string{}
}
func (a *awsEnv) IsKubernetes() bool {
return true
}
func getAWSInfo(path string, ipv6 bool, headers map[string]string) (string, error) {
url := awsMetadataIPv4URL + "/" + path
if ipv6 {
url = awsMetadataIPv6URL + "/" + path
}
resp, err := http.GET(url, time.Millisecond*100, headers)
if err != nil {
log.Debugf("error in getting aws info for %s : %v", path, err)
return "", err
}
return resp.String(), nil
}
// getRegion returns the Region that the instance is running in.
func getRegion(ipv6 bool, headers map[string]string) string {
region, _ := getAWSInfo("placement/region", ipv6, headers)
return region
}
// getAvailabilityZone returns the AvailabilityZone that the instance is running in.
func getAvailabilityZone(ipv6 bool, headers map[string]string) string {
az, _ := getAWSInfo("placement/availability-zone", ipv6, headers)
return az
}
func getInstanceID(ipv6 bool, headers map[string]string) string {
instance, _ := getAWSInfo("instance-id", ipv6, headers)
return instance
}
func getToken(ipv6 bool) string {
url := awsMetadataTokenIPv4URL
if ipv6 {
url = awsMetadataTokenIPv6URL
}
resp, err := http.PUT(url, time.Millisecond*100, map[string]string{
// more details can be found at https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-retrieval.html
"X-aws-ec2-metadata-token-ttl-seconds": "60",
})
if err != nil {
log.Debugf("error in getting aws token : %v", err)
return ""
}
return resp.String()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package platform
import (
"encoding/json"
"fmt"
"net/http"
"os"
"strings"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
io2 "github.com/AdamKorcz/bugdetectors/io"
"istio.io/istio/pkg/log"
)
const (
AzureMetadataEndpoint = "http://169.254.169.254"
AzureInstanceURL = AzureMetadataEndpoint + "/metadata/instance"
AzureDefaultAPIVersion = "2019-08-15"
SysVendorPath = "/sys/class/dmi/id/sys_vendor"
MicrosoftIdentifier = "Microsoft Corporation"
)
var (
azureAPIVersionsFn = func() string {
return metadataRequest("")
}
azureMetadataFn = func(version string) string {
return metadataRequest(fmt.Sprintf("api-version=%s", version))
}
)
type azureEnv struct {
APIVersion string
prefix string
computeMetadata map[string]any
networkMetadata map[string]any
}
// IsAzure returns whether or not the platform for bootstrapping is Azure
// Checks the system vendor file (similar to https://github.com/banzaicloud/satellite/blob/master/providers/azure.go)
func IsAzure() bool {
sysVendor, err := os.ReadFile(SysVendorPath)
if err != nil {
log.Debugf("Error reading sys_vendor in Azure platform detection: %v", err)
}
return strings.Contains(string(sysVendor), MicrosoftIdentifier)
}
// Attempts to update the API version.
// Newer API versions can contain additional metadata fields
func (e *azureEnv) updateAPIVersion() {
bodyJSON := stringToJSON(azureAPIVersionsFn())
if newestVersions, ok := bodyJSON["newest-versions"]; ok {
for _, version := range newestVersions.([]any) {
if strings.Compare(version.(string), e.APIVersion) > 0 {
e.APIVersion = version.(string)
}
}
}
}
// NewAzure returns a platform environment for Azure
// Default prefix is azure_
func NewAzure() Environment {
return NewAzureWithPrefix("azure_")
}
func NewAzureWithPrefix(prefix string) Environment {
e := &azureEnv{APIVersion: AzureDefaultAPIVersion}
e.updateAPIVersion()
e.parseMetadata(e.azureMetadata())
e.prefix = prefix
return e
}
// Returns the name with the prefix attached
func (e *azureEnv) prefixName(name string) string {
return e.prefix + name
}
// Retrieves Azure instance metadata response body stores it in the Azure environment
func (e *azureEnv) parseMetadata(metadata string) {
bodyJSON := stringToJSON(metadata)
if computeMetadata, ok := bodyJSON["compute"]; ok {
e.computeMetadata = computeMetadata.(map[string]any)
}
if networkMetadata, ok := bodyJSON["network"]; ok {
e.networkMetadata = networkMetadata.(map[string]any)
}
}
// Generic Azure metadata GET request helper for the response body
// Uses the default timeout for the HTTP get request
func metadataRequest(query string) string {
client := http.Client{Timeout: defaultTimeout}
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s?%s", AzureInstanceURL, query), nil)
if err != nil {
log.Warnf("Failed to create HTTP request: %v", err)
return ""
}
req.Header.Add("Metadata", "True")
response, err := client.Do(req)
if err != nil {
log.Warnf("HTTP request failed: %v", err)
return ""
}
if response.StatusCode != http.StatusOK {
log.Warnf("HTTP request unsuccessful with status: %v", response.Status)
}
defer response.Body.Close()
body, err := io2.ReadAll(response.Body, "/src/istio/pkg/bootstrap/platform/azure.go:127:15 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
log.Warnf("Could not read response body: %v", err)
return ""
}
return string(body)
}
func stringToJSON(s string) map[string]any {
var stringJSON map[string]any
if err := json.Unmarshal([]byte(s), &stringJSON); err != nil {
log.Warnf("Could not unmarshal response: %v:", err)
}
return stringJSON
}
// Returns Azure instance metadata. Must be run on an Azure VM
func (e *azureEnv) Metadata() map[string]string {
md := map[string]string{}
if an := e.azureName(); an != "" {
md[e.prefixName("name")] = an
}
if al := e.azureLocation(); al != "" {
md[e.prefixName("location")] = al
}
if aid := e.azureVMID(); aid != "" {
md[e.prefixName("vmId")] = aid
}
for k, v := range e.azureTags() {
md[k] = v
}
return md
}
// Locality returns the region and zone
func (e *azureEnv) Locality() *core.Locality {
var l core.Locality
l.Region = e.azureLocation()
l.Zone = e.azureZone()
return &l
}
func (e *azureEnv) Labels() map[string]string {
return map[string]string{}
}
func (e *azureEnv) IsKubernetes() bool {
return true
}
func (e *azureEnv) azureMetadata() string {
return azureMetadataFn(e.APIVersion)
}
func (e *azureEnv) azureName() string {
if an, ok := e.computeMetadata["name"]; ok {
return an.(string)
}
return ""
}
// Returns the Azure tags
func (e *azureEnv) azureTags() map[string]string {
tags := map[string]string{}
if tl, ok := e.computeMetadata["tagsList"]; ok {
tlByte, err := json.Marshal(tl)
if err != nil {
return tags
}
var atl []azureTag
err = json.Unmarshal(tlByte, &atl)
if err != nil {
return tags
}
for _, tag := range atl {
tags[e.prefixName(tag.Name)] = tag.Value
}
return tags
}
// fall back to tags if tagsList is not available
if at, ok := e.computeMetadata["tags"]; ok && len(at.(string)) > 0 {
for _, tag := range strings.Split(at.(string), ";") {
kv := strings.SplitN(tag, ":", 2)
switch len(kv) {
case 2:
tags[e.prefixName(kv[0])] = kv[1]
case 1:
tags[e.prefixName(kv[0])] = ""
}
}
}
return tags
}
func (e *azureEnv) azureLocation() string {
if al, ok := e.computeMetadata["location"]; ok {
return al.(string)
}
return ""
}
func (e *azureEnv) azureZone() string {
if az, ok := e.computeMetadata["zone"]; ok {
return az.(string)
}
return ""
}
func (e *azureEnv) azureVMID() string {
if aid, ok := e.computeMetadata["vmId"]; ok {
return aid.(string)
}
return ""
}
// used for simpler JSON parsing
type azureTag struct {
Name string `json:"name"`
Value string `json:"value"`
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package platform
import (
"strings"
"sync"
"time"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/log"
)
const (
defaultTimeout = 5 * time.Second
numPlatforms = 3
)
var CloudPlatform = env.Register("CLOUD_PLATFORM", "", "Cloud Platform on which proxy is running, if not specified, "+
"Istio will try to discover the platform. Valid platform values are aws, azure, gcp, none").Get()
// Discover attempts to discover the host platform, defaulting to
// `Unknown` if a platform cannot be discovered.
func Discover(ipv6 bool) Environment {
// First check if user has specified platform - use it if provided.
if len(CloudPlatform) > 0 {
switch strings.ToLower(CloudPlatform) {
case "aws":
return NewAWS(ipv6)
case "azure":
return NewAzure()
case "gcp":
return NewGCP()
case "none":
return &Unknown{}
}
}
// Discover the platform if user has not specified.
return DiscoverWithTimeout(defaultTimeout, ipv6)
}
// DiscoverWithTimeout attempts to discover the host platform, defaulting to
// `Unknown` after the provided timeout.
func DiscoverWithTimeout(timeout time.Duration, ipv6 bool) Environment {
plat := make(chan Environment, numPlatforms) // sized to match number of platform goroutines
done := make(chan bool)
var wg sync.WaitGroup
wg.Add(numPlatforms) // check GCP, AWS, and Azure
go func() {
if IsGCP() {
log.Info("platform detected is GCP")
plat <- NewGCP()
}
wg.Done()
}()
go func() {
if IsAWS(ipv6) {
log.Info("platform detected is AWS")
plat <- NewAWS(ipv6)
}
wg.Done()
}()
go func() {
if IsAzure() {
log.Info("platform detected is Azure")
plat <- NewAzure()
}
wg.Done()
}()
go func() {
wg.Wait()
close(done)
}()
timer := time.NewTimer(timeout)
defer timer.Stop()
select {
case p := <-plat:
return p
case <-done:
select {
case p := <-plat:
return p
default:
return &Unknown{}
}
case <-timer.C:
log.Info("timed out waiting for platform detection, treating it as Unknown")
return &Unknown{}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package platform
import (
"context"
"encoding/json"
"fmt"
"net"
"os"
"regexp"
"strings"
"sync"
"time"
"cloud.google.com/go/compute/metadata"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"istio.io/istio/pkg/env"
"istio.io/istio/pkg/lazy"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/util/sets"
)
const (
GCPProject = "gcp_project"
GCPProjectNumber = "gcp_project_number"
GCPCluster = "gcp_gke_cluster_name"
GCPClusterURL = "gcp_gke_cluster_url"
GCPLocation = "gcp_location"
GCEInstance = "gcp_gce_instance"
GCEInstanceID = "gcp_gce_instance_id"
GCEInstanceTemplate = "gcp_gce_instance_template"
GCEInstanceCreatedBy = "gcp_gce_instance_created_by"
GCPQuotaProject = "gcp_quota_project"
)
// GCPStaticMetadata holds the statically defined GCP metadata
var GCPStaticMetadata = func() map[string]string {
gcpm := env.Register("GCP_METADATA", "", "Pipe separated GCP metadata, schemed as PROJECT_ID|PROJECT_NUMBER|CLUSTER_NAME|CLUSTER_ZONE").Get()
quota := env.Register("GCP_QUOTA_PROJECT", "", "Allows specification of a quota project to be used in requests to GCP APIs.").Get()
if len(gcpm) == 0 {
return map[string]string{}
}
md := map[string]string{}
parts := strings.Split(gcpm, "|")
if len(parts) == 4 {
md[GCPProject] = parts[0]
md[GCPProjectNumber] = parts[1]
md[GCPCluster] = parts[2]
md[GCPLocation] = parts[3]
}
if quota != "" {
md[GCPQuotaProject] = quota
}
if clusterURL, err := constructGKEClusterURL(md); err == nil {
md[GCPClusterURL] = clusterURL
}
return md
}()
var (
// shouldFillMetadata returns whether the workload is running on GCP and the metadata endpoint is accessible
// In contrast, DiscoverWithTimeout only checks if the workload is running on GCP
shouldFillMetadata = func() bool {
return metadata.OnGCE() && isMetadataEndpointAccessible()
}
projectIDFn = metadata.ProjectID
numericProjectIDFn = metadata.NumericProjectID
instanceNameFn = metadata.InstanceName
instanceIDFn = metadata.InstanceID
clusterNameFn = func() (string, error) {
cn, err := metadata.InstanceAttributeValue("cluster-name")
if err != nil {
return "", err
}
return cn, nil
}
clusterLocationFn = func() (string, error) {
cl, err := metadata.InstanceAttributeValue("cluster-location")
if err == nil {
return cl, nil
}
return metadata.Zone()
}
instanceTemplateFn = func() (string, error) {
it, err := metadata.InstanceAttributeValue("instance-template")
if err != nil {
return "", err
}
return it, nil
}
createdByFn = func() (string, error) {
cb, err := metadata.InstanceAttributeValue("created-by")
if err != nil {
return "", err
}
return cb, nil
}
constructGKEClusterURL = func(md map[string]string) (string, error) {
projectID, found := md[GCPProject]
if !found {
return "", fmt.Errorf("error constructing GKE cluster url: %s not found in GCP Metadata", GCPProject)
}
clusterLocation, found := md[GCPLocation]
if !found {
return "", fmt.Errorf("error constructing GKE cluster url: %s not found in GCP Metadata", GCPLocation)
}
clusterName, found := md[GCPCluster]
if !found {
return "", fmt.Errorf("error constructing GKE cluster url: %s not found in GCP Metadata", GCPCluster)
}
return fmt.Sprintf("https://container.googleapis.com/v1/projects/%s/locations/%s/clusters/%s",
projectID, clusterLocation, clusterName), nil
}
)
type (
shouldFillFn func() bool
metadataFn func() (string, error)
metadataSupplier struct {
Property string
Fn func() (string, error)
}
)
type gcpEnv struct {
sync.Mutex
metadata map[string]string
fillMetadata lazy.Lazy[bool]
}
// IsGCP returns whether or not the platform for bootstrapping is Google Cloud Platform.
func IsGCP() bool {
if len(GCPStaticMetadata) > 0 {
// Assume this is running on GCP if GCP project env variable is set.
return true
}
return metadata.OnGCE()
}
// NewGCP returns a platform environment customized for Google Cloud Platform.
// Metadata returned by the GCP Environment is taken from the GCE metadata
// service.
func NewGCP() Environment {
return &gcpEnv{
fillMetadata: lazy.New(func() (bool, error) {
return shouldFillMetadata(), nil
}),
}
}
func (e *gcpEnv) shouldFillMetadata() bool {
res, _ := e.fillMetadata.Get()
return res
}
// Metadata returns GCP environmental data, including project, cluster name, and
// location information.
func (e *gcpEnv) Metadata() map[string]string {
// If they statically configure metadata, use it immediately and exit. This does limit the ability to configure some static
// metadata, but extract the rest from the metadata server.
// However, the motivation to provide static metadata is to remove the dependency on the metadata server, which is unreliable.
// As a result, it doesn't make much sense to do lookups when this is set.
// If needed, the remaining pieces of metadata can be added to the static env var (missing is the gce_* ones).
if len(GCPStaticMetadata) != 0 {
return GCPStaticMetadata
}
// If we cannot reach the metadata server, bail out with only statically defined metadata
fillMetadata := e.shouldFillMetadata()
if !fillMetadata {
return nil
}
e.Lock()
defer e.Unlock()
// Use previously computed result...
if e.metadata != nil {
return e.metadata
}
md := map[string]string{}
// suppliers is an array of functions that supply the metadata for missing properties
suppliers := []metadataSupplier{
createMetadataSupplier(GCPProject, projectIDFn),
createMetadataSupplier(GCPProjectNumber, numericProjectIDFn),
createMetadataSupplier(GCPLocation, clusterLocationFn),
createMetadataSupplier(GCPCluster, clusterNameFn),
createMetadataSupplier(GCEInstance, instanceNameFn),
createMetadataSupplier(GCEInstanceID, instanceIDFn),
createMetadataSupplier(GCEInstanceTemplate, instanceTemplateFn),
createMetadataSupplier(GCEInstanceCreatedBy, createdByFn),
}
wg := waitForMetadataSuppliers(suppliers, md)
wg.Wait()
if clusterURL, err := constructGKEClusterURL(md); err == nil {
md[GCPClusterURL] = clusterURL
}
e.metadata = md
return md
}
func waitForMetadataSuppliers(suppliers []metadataSupplier, md map[string]string) *sync.WaitGroup {
wg := sync.WaitGroup{}
mx := sync.Mutex{}
have := sets.New(maps.Keys(md)...)
for _, mdSupplier := range suppliers {
property, supplierFunction := mdSupplier.Property, mdSupplier.Fn
if have.Contains(property) {
// We already have this property, we can skip it
continue
}
wg.Add(1)
go func() {
defer wg.Done()
if result, err := supplierFunction(); err == nil {
mx.Lock()
md[property] = result
mx.Unlock()
} else {
// Log at debug level as these are often missing (when using GKE metadata server)
log.Debugf("Error fetching GCP Metadata property %s: %v", property, err)
}
}()
}
return &wg
}
// Converts a GCP zone into a region.
func zoneToRegion(z string) (string, error) {
// Zones are in the form <region>-<zone_suffix>, so capture everything but the suffix.
re := regexp.MustCompile("(.*)-.*")
m := re.FindStringSubmatch(z)
if len(m) != 2 {
return "", fmt.Errorf("unable to extract region from GCP zone: %s", z)
}
return m[1], nil
}
// Locality returns the GCP-specific region and zone.
func (e *gcpEnv) Locality() *core.Locality {
var l core.Locality
loc := e.Metadata()[GCPLocation]
if loc == "" {
log.Warnf("Error fetching GCP zone: %v", loc)
return &l
}
r, err := zoneToRegion(loc)
if err != nil {
log.Warnf("Error fetching GCP region: %v", err)
return &l
}
return &core.Locality{
Region: r,
Zone: loc,
SubZone: "", // GCP has no subzone concept
}
}
const ComputeReadonlyScope = "https://www.googleapis.com/auth/compute.readonly"
// Labels attempts to retrieve the GCE instance labels within the timeout
// Requires read access to the Compute API (compute.instances.get)
func (e *gcpEnv) Labels() map[string]string {
md := e.Metadata()
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
defer cancel()
success := make(chan bool)
labels := map[string]string{}
var instanceLabels map[string]string
go func() {
// use explicit credentials with compute.instances.get IAM permissions
creds, err := google.FindDefaultCredentials(ctx, ComputeReadonlyScope)
if err != nil {
log.Warnf("failed to find default credentials: %v", err)
success <- false
return
}
url := fmt.Sprintf("https://compute.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s", md[GCPProject], md[GCPLocation], md[GCEInstance])
resp, err := oauth2.NewClient(ctx, creds.TokenSource).Get(url)
if err != nil {
log.Warnf("unable to retrieve instance labels: %v", err)
success <- false
return
}
defer resp.Body.Close()
instance := &GcpInstance{}
if err := json.NewDecoder(resp.Body).Decode(instance); err != nil {
log.Warnf("failed to decode response: %v", err)
success <- false
return
}
instanceLabels = instance.Labels
success <- true
}()
select {
case <-ctx.Done():
log.Warnf("context deadline exceeded for instance get request: %v", ctx.Err())
case ok := <-success:
if ok && instanceLabels != nil {
labels = instanceLabels
}
}
return labels
}
// GcpInstance the instances response. Only contains fields we care about, rest are ignored
type GcpInstance struct {
// Labels: Labels to apply to this instance.
Labels map[string]string `json:"labels,omitempty"`
}
// IsKubernetes checks to see if GKE metadata or Kubernetes env vars exist
func (e *gcpEnv) IsKubernetes() bool {
_, onKubernetes := os.LookupEnv(KubernetesServiceHost)
if onKubernetes {
return true
}
return e.Metadata()[GCPCluster] != ""
}
func createMetadataSupplier(property string, fn func() (string, error)) metadataSupplier {
return metadataSupplier{
Property: property,
Fn: fn,
}
}
func isMetadataEndpointAccessible() bool {
// From the Go package, but private so copied here
const metadataHostEnv = "GCE_METADATA_HOST"
const metadataIP = "169.254.169.254"
host := os.Getenv(metadataHostEnv)
if host == "" {
host = metadataIP
}
_, err := net.DialTimeout("tcp", defaultPort(host, "80"), 5*time.Second)
if err != nil {
log.Warnf("cannot reach the Google Instance metadata endpoint %v", err)
return false
}
return true
}
// defaultPort appends the default port, if a port is not already present
func defaultPort(hostMaybePort, dp string) string {
_, _, err := net.SplitHostPort(hostMaybePort)
if err != nil {
return net.JoinHostPort(hostMaybePort, dp)
}
return hostMaybePort
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package platform
import (
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
)
const (
KubernetesServiceHost = "KUBERNETES_SERVICE_HOST"
)
// Environment provides information for the platform on which the bootstrapping
// is taking place.
type Environment interface {
// Metadata returns a collection of environmental metadata, structured
// as a map for metadata names to values. An example for GCP would be a
// mapping from "gcp_project" to "2344534543". Keys should be prefixed
// by the short name for the platform (example: "gcp_").
Metadata() map[string]string
// Locality returns the run location for the bootstrap transformed from the
// platform-specific representation into the Envoy Locality schema.
Locality() *core.Locality
// Labels returns a collection of labels that exist on the underlying
// instance, structured as a map for label name to values.
Labels() map[string]string
// IsKubernetes determines if running on Kubernetes
IsKubernetes() bool
}
// Unknown provides a default platform environment for cases in which the platform
// on which the bootstrapping is taking place cannot be determined.
type Unknown struct{}
// Metadata returns an empty map.
func (*Unknown) Metadata() map[string]string {
return map[string]string{}
}
// Locality returns an empty core.Locality struct.
func (*Unknown) Locality() *core.Locality {
return &core.Locality{}
}
// Labels returns an empty map.
func (*Unknown) Labels() map[string]string {
return map[string]string{}
}
// IsKubernetes is true to avoid label collisions
func (*Unknown) IsKubernetes() bool {
return true
}
// Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cache
import (
"runtime"
"sync"
"sync/atomic"
"time"
)
// Due to the use of the time.Time.UnixNano function in this code, expiration
// will fail after the year 2262. Sorry, you'll need to upgrade to a newer version
// of Istio at that time :-)
//
// This code does some trickery with finalizers in order to avoid the need for a Close
// method. Given the nature of this code, forgetting to call Close on one of these objects
// can lead to a substantial permanent memory leak in a process by causing the cache to
// remain alive forever, along with all the entries the cache points to. The use of the
// ttlWrapper type makes it so we control the exposure of the underlying ttlCache pointer.
// When the pointer to ttlWrapper is finalized, this tells us to go ahead and stop the
// evicter goroutine, which allows the lruCache instance to be collected and everything
// ends well.
// See use of SetFinalizer below for an explanation of this weird composition
type ttlWrapper struct {
*ttlCache
}
type ttlCache struct {
// baseTimeNanos must be at start of struct to ensure 64bit alignment for atomics on
// 32bit architectures. See also: https://golang.org/pkg/sync/atomic/#pkg-note-BUG
baseTimeNanos int64
entries sync.Map
stats Stats
defaultExpiration time.Duration
stopEvicter chan bool
evicterTerminated sync.WaitGroup // used by unit tests to verify the finalizer ran
callback EvictionCallback
}
// A single cache entry. This is the values we use in our storage map
type entry struct {
value any
expiration int64 // nanoseconds
}
// EvictionCallback is a function that will be called on entry eviction
// from an ExpiringCache.
//
// This callback will be invoked immediately after the entry is deleted
// from the `sync.Map` that backs this cache (using `Map.Delete()`). No
// locks are held during the invocation of this callback. The callback
// should not result in blocking calls to long-running operations, however.
type EvictionCallback func(key, value any)
// NewTTL creates a new cache with a time-based eviction model.
//
// Cache eviction is done on a periodic basis. Individual cache entries are evicted
// after their expiration time has passed. The periodic nature of eviction means that
// cache entries tend to survive around (expirationTime + (evictionInterval / 2))
//
// defaultExpiration specifies the default minimum amount of time a cached
// entry remains in the cache before eviction. This value is used with the
// Set function. Explicit per-entry expiration times can be set with the
// SetWithExpiration function instead.
//
// evictionInterval specifies the frequency at which eviction activities take
// place. This should likely be >= 1 second.
//
// Since TTL caches only evict data based on the passage of time, it's possible to
// use up all available memory by continuing to add entries to the cache with a
// long enough expiration time. Don't do that.
func NewTTL(defaultExpiration time.Duration, evictionInterval time.Duration) ExpiringCache {
return NewTTLWithCallback(defaultExpiration, evictionInterval, func(key, value any) {})
}
// NewTTLWithCallback creates a new cache with a time-based eviction model that will invoke the supplied
// callback on all evictions. See also: NewTTL.
func NewTTLWithCallback(defaultExpiration time.Duration, evictionInterval time.Duration, callback EvictionCallback) ExpiringCache {
c := &ttlCache{
defaultExpiration: defaultExpiration,
callback: callback,
}
c.baseTimeNanos = time.Now().UnixNano()
if evictionInterval > 0 {
c.stopEvicter = make(chan bool, 1)
c.evicterTerminated.Add(1)
go c.evicter(evictionInterval)
// We return a 'see-through' wrapper for the real object such that
// the finalizer can trigger on the wrapper. We can't set a finalizer
// on the main cache object because it would never fire, since the
// evicter goroutine is keeping it alive
result := &ttlWrapper{c}
runtime.SetFinalizer(result, func(w *ttlWrapper) {
w.stopEvicter <- true
w.evicterTerminated.Wait()
})
return result
}
return c
}
func (c *ttlCache) evicter(evictionInterval time.Duration) {
// Wake up once in a while and evict stale items
ticker := time.NewTicker(evictionInterval)
for {
select {
case now := <-ticker.C:
c.evictExpired(now)
case <-c.stopEvicter:
ticker.Stop()
c.evicterTerminated.Done() // record this for the sake of unit tests
return
}
}
}
func (c *ttlCache) evictExpired(t time.Time) {
// We snapshot a base time here such that the time doesn't need to be
// sampled in the Set call as calling time.Now() is relatively expensive.
// Doing it here provides enough precision for our needs and tends to have
// much lower call frequency.
n := t.UnixNano()
atomic.StoreInt64(&c.baseTimeNanos, n)
// This loop is inherently racy. As we iterate through the
// key/value pairs, the value assigned to a particular key may
// change at any point. So when we find an expired entry and
// delete it, it's possible that a concurrent update assigned a
// fresh value to the key at hand, and so we'll proceed to delete
// the fresh key/value combo.
//
// This is a cache, not a map. So we're OK with this extremely rare
// situation. So long as the cache never lies, it's OK if it spuriously
// forgets.
c.entries.Range(func(key any, value any) bool {
e := value.(*entry)
if e.expiration <= n {
c.entries.Delete(key)
c.callback(key, value.(*entry).value)
// Note: can miscount if the key was removed before it was evicted
atomic.AddUint64(&c.stats.Evictions, 1)
}
return true
})
}
func (c *ttlCache) EvictExpired() {
c.evictExpired(time.Now())
}
func (c *ttlCache) Set(key any, value any) {
c.SetWithExpiration(key, value, c.defaultExpiration)
}
func (c *ttlCache) SetWithExpiration(key any, value any, expiration time.Duration) {
e := &entry{
value: value,
expiration: atomic.LoadInt64(&c.baseTimeNanos) + expiration.Nanoseconds(),
}
c.entries.Store(key, e)
atomic.AddUint64(&c.stats.Writes, 1)
}
func (c *ttlCache) Get(key any) (any, bool) {
e, ok := c.entries.Load(key)
if !ok {
atomic.AddUint64(&c.stats.Misses, 1)
return nil, false
}
// Note that we could check the current time here and discard the returned value
// if the expiration time has passed. But this would increase this function's execution
// time by > 50% (since time.Now is relatively expensive). Instead, we don't check time
// here and accept some imprecision in actual eviction times.
atomic.AddUint64(&c.stats.Hits, 1)
return e.(*entry).value, true
}
func (c *ttlCache) Remove(key any) {
c.entries.Delete(key)
// Note: we count this as a removal even in the case where the key wasn't actually in the map
atomic.AddUint64(&c.stats.Removals, 1)
}
func (c *ttlCache) RemoveAll() {
c.entries.Range(func(key any, value any) bool {
c.entries.Delete(key)
// Note: can miscount if the key was evicted before it was removed
atomic.AddUint64(&c.stats.Removals, 1)
return true
})
}
func (c *ttlCache) Stats() Stats {
return Stats{
Evictions: atomic.LoadUint64(&c.stats.Evictions),
Hits: atomic.LoadUint64(&c.stats.Hits),
Misses: atomic.LoadUint64(&c.stats.Misses),
Writes: atomic.LoadUint64(&c.stats.Writes),
Removals: atomic.LoadUint64(&c.stats.Removals),
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cluster
import "istio.io/istio/pkg/util/identifier"
// ID is the unique identifier for a k8s cluster.
type ID string
func (id ID) Equals(other ID) bool {
return identifier.IsSameOrEmpty(string(id), string(other))
}
func (id ID) String() string {
return string(id)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package analysis
import (
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis/scope"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/util/sets"
)
// Analyzer is an interface for analyzing configuration.
type Analyzer interface {
Metadata() Metadata
Analyze(c Context)
}
// CombinedAnalyzer is a special Analyzer that combines multiple analyzers into one
type CombinedAnalyzer struct {
name string
analyzers []Analyzer
}
// Combine multiple analyzers into a single one.
// For input metadata, use the union of the component analyzers
func Combine(name string, analyzers ...Analyzer) *CombinedAnalyzer {
return &CombinedAnalyzer{
name: name,
analyzers: analyzers,
}
}
func (c *CombinedAnalyzer) RelevantSubset(kinds sets.Set[config.GroupVersionKind]) *CombinedAnalyzer {
var selected []Analyzer
for _, a := range c.analyzers {
for _, inputKind := range a.Metadata().Inputs {
if kinds.Contains(inputKind) {
selected = append(selected, a)
break
}
}
}
return Combine("subset", selected...)
}
// Metadata implements Analyzer
func (c *CombinedAnalyzer) Metadata() Metadata {
return Metadata{
Name: c.name,
Inputs: combineInputs(c.analyzers),
}
}
// Analyze implements Analyzer
func (c *CombinedAnalyzer) Analyze(ctx Context) {
for _, a := range c.analyzers {
scope.Analysis.Debugf("Started analyzer %q...", a.Metadata().Name)
if ctx.Canceled() {
scope.Analysis.Debugf("Analyzer %q has been cancelled...", c.Metadata().Name)
return
}
ctx.SetAnalyzer(a.Metadata().Name)
a.Analyze(ctx)
scope.Analysis.Debugf("Completed analyzer %q...", a.Metadata().Name)
}
}
// RemoveSkipped removes analyzers that should be skipped, meaning they meet one of the following criteria:
// 1. The analyzer requires disabled input collections. The names of removed analyzers are returned.
// Transformer information is used to determine, based on the disabled input collections, which output collections
// should be disabled. Any analyzers that require those output collections will be removed.
// 2. The analyzer requires a collection not available in the current snapshot(s)
func (c *CombinedAnalyzer) RemoveSkipped(schemas collection.Schemas) []string {
allSchemas := schemas.All()
s := sets.NewWithLength[config.GroupVersionKind](len(allSchemas))
for _, sc := range allSchemas {
s.Insert(sc.GroupVersionKind())
}
var enabled []Analyzer
var removedNames []string
mainloop:
for _, a := range c.analyzers {
for _, in := range a.Metadata().Inputs {
if !s.Contains(in) {
scope.Analysis.Infof("Skipping analyzer %q because collection %s is not in the snapshot(s).", a.Metadata().Name, in)
removedNames = append(removedNames, a.Metadata().Name)
continue mainloop
}
}
enabled = append(enabled, a)
}
c.analyzers = enabled
return removedNames
}
// AnalyzerNames returns the names of analyzers in this combined analyzer
func (c *CombinedAnalyzer) AnalyzerNames() []string {
result := make([]string, 0, len(c.analyzers))
for _, a := range c.analyzers {
result = append(result, a.Metadata().Name)
}
return result
}
func combineInputs(analyzers []Analyzer) []config.GroupVersionKind {
result := sets.NewWithLength[config.GroupVersionKind](len(analyzers))
for _, a := range analyzers {
result.InsertAll(a.Metadata().Inputs...)
}
return result.UnsortedList()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package analyzers
import (
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/annotations"
"istio.io/istio/pkg/config/analysis/analyzers/authz"
"istio.io/istio/pkg/config/analysis/analyzers/deployment"
"istio.io/istio/pkg/config/analysis/analyzers/deprecation"
"istio.io/istio/pkg/config/analysis/analyzers/destinationrule"
"istio.io/istio/pkg/config/analysis/analyzers/envoyfilter"
"istio.io/istio/pkg/config/analysis/analyzers/externalcontrolplane"
"istio.io/istio/pkg/config/analysis/analyzers/gateway"
"istio.io/istio/pkg/config/analysis/analyzers/injection"
"istio.io/istio/pkg/config/analysis/analyzers/k8sgateway"
"istio.io/istio/pkg/config/analysis/analyzers/multicluster"
"istio.io/istio/pkg/config/analysis/analyzers/schema"
"istio.io/istio/pkg/config/analysis/analyzers/service"
"istio.io/istio/pkg/config/analysis/analyzers/serviceentry"
"istio.io/istio/pkg/config/analysis/analyzers/sidecar"
"istio.io/istio/pkg/config/analysis/analyzers/telemetry"
"istio.io/istio/pkg/config/analysis/analyzers/virtualservice"
"istio.io/istio/pkg/config/analysis/analyzers/webhook"
)
// All returns all analyzers
func All() []analysis.Analyzer {
analyzers := []analysis.Analyzer{
// Please keep this list sorted alphabetically by pkg.name for convenience
&annotations.K8sAnalyzer{},
&authz.AuthorizationPoliciesAnalyzer{},
&deployment.ServiceAssociationAnalyzer{},
&deployment.ApplicationUIDAnalyzer{},
&deprecation.FieldAnalyzer{},
&externalcontrolplane.ExternalControlPlaneAnalyzer{},
&gateway.IngressGatewayPortAnalyzer{},
&gateway.CertificateAnalyzer{},
&gateway.SecretAnalyzer{},
&gateway.ConflictingGatewayAnalyzer{},
&injection.Analyzer{},
&injection.ImageAnalyzer{},
&injection.ImageAutoAnalyzer{},
&k8sgateway.SelectorAnalyzer{},
&multicluster.MeshNetworksAnalyzer{},
&service.PortNameAnalyzer{},
&sidecar.SelectorAnalyzer{},
&virtualservice.ConflictingMeshGatewayHostsAnalyzer{},
&virtualservice.DestinationHostAnalyzer{},
&virtualservice.DestinationRuleAnalyzer{},
&virtualservice.GatewayAnalyzer{},
&virtualservice.JWTClaimRouteAnalyzer{},
&virtualservice.RegexAnalyzer{},
&destinationrule.CaCertificateAnalyzer{},
&serviceentry.ProtocolAddressesAnalyzer{},
&webhook.Analyzer{},
&envoyfilter.EnvoyPatchAnalyzer{},
&telemetry.ProdiverAnalyzer{},
&telemetry.SelectorAnalyzer{},
&telemetry.DefaultSelectorAnalyzer{},
&telemetry.LightstepAnalyzer{},
}
analyzers = append(analyzers, schema.AllValidationAnalyzers()...)
return analyzers
}
// AllCombined returns all analyzers combined as one
func AllCombined() *analysis.CombinedAnalyzer {
return analysis.Combine("all", All()...)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package annotations
import (
"strings"
"istio.io/api/annotation"
"istio.io/api/label"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/maturity"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube/inject"
"istio.io/istio/pkg/slices"
)
// K8sAnalyzer checks for misplaced and invalid Istio annotations in K8s resources
type K8sAnalyzer struct{}
var istioAnnotations = annotation.AllResourceAnnotations()
// Metadata implements analyzer.Analyzer
func (*K8sAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "annotations.K8sAnalyzer",
Description: "Checks for misplaced and invalid Istio annotations in Kubernetes resources",
Inputs: []config.GroupVersionKind{
gvk.Namespace,
gvk.Service,
gvk.Pod,
gvk.Deployment,
},
}
}
// Analyze implements analysis.Analyzer
func (fa *K8sAnalyzer) Analyze(ctx analysis.Context) {
ctx.ForEach(gvk.Namespace, func(r *resource.Instance) bool {
fa.allowAnnotations(r, ctx, "Namespace", gvk.Namespace)
return true
})
ctx.ForEach(gvk.Service, func(r *resource.Instance) bool {
fa.allowAnnotations(r, ctx, "Service", gvk.Service)
return true
})
ctx.ForEach(gvk.Pod, func(r *resource.Instance) bool {
fa.allowAnnotations(r, ctx, "Pod", gvk.Pod)
return true
})
ctx.ForEach(gvk.Deployment, func(r *resource.Instance) bool {
fa.allowAnnotations(r, ctx, "Deployment", gvk.Deployment)
return true
})
}
var deprecationExtraMessages = map[string]string{
annotation.SidecarInject.Name: ` in favor of the "sidecar.istio.io/inject" label`,
}
func (*K8sAnalyzer) allowAnnotations(r *resource.Instance, ctx analysis.Context, kind string, collectionType config.GroupVersionKind) {
if len(r.Metadata.Annotations) == 0 {
return
}
// It is fine if the annotation is kubectl.kubernetes.io/last-applied-configuration.
outer:
for ann, value := range r.Metadata.Annotations {
if !istioAnnotation(ann) {
continue
}
if maturity.AlwaysIgnoredAnnotations[ann] {
continue
}
annotationDef := lookupAnnotation(ann)
if annotationDef == nil {
m := msg.NewUnknownAnnotation(r, ann)
util.AddLineNumber(r, ann, m)
ctx.Report(collectionType, m)
continue
}
if annotationDef.Deprecated {
if _, f := r.Metadata.Labels[label.SidecarInject.Name]; f && ann == annotation.SidecarInject.Name {
// Skip to avoid noise; the user has the deprecated annotation but they also have the replacement
// This means they are likely aware its deprecated, but are keeping both variants around for maximum
// compatibility
} else {
m := msg.NewDeprecatedAnnotation(r, ann, deprecationExtraMessages[annotationDef.Name])
util.AddLineNumber(r, ann, m)
ctx.Report(collectionType, m)
}
}
// If the annotation def attaches to Any, exit early
for _, rt := range annotationDef.Resources {
if rt == annotation.Any {
continue outer
}
}
attachesTo := resourceTypesAsStrings(annotationDef.Resources)
if !slices.Contains(attachesTo, kind) {
m := msg.NewMisplacedAnnotation(r, ann, strings.Join(attachesTo, ", "))
util.AddLineNumber(r, ann, m)
ctx.Report(collectionType, m)
continue
}
validationFunction := inject.AnnotationValidation[ann]
if validationFunction != nil {
if err := validationFunction(value); err != nil {
m := msg.NewInvalidAnnotation(r, ann, err.Error())
util.AddLineNumber(r, ann, m)
ctx.Report(collectionType, m)
continue
}
}
}
}
// istioAnnotation is true if the annotation is in Istio's namespace
func istioAnnotation(ann string) bool {
// We document this Kubernetes annotation, we should analyze it as well
if ann == "kubernetes.io/ingress.class" {
return true
}
parts := strings.Split(ann, "/")
if len(parts) == 0 {
return false
}
if !strings.HasSuffix(parts[0], "istio.io") {
return false
}
return true
}
func lookupAnnotation(ann string) *annotation.Instance {
for _, candidate := range istioAnnotations {
if candidate.Name == ann {
return candidate
}
}
return nil
}
func resourceTypesAsStrings(resourceTypes []annotation.ResourceTypes) []string {
retval := []string{}
for _, resourceType := range resourceTypes {
if s := resourceType.String(); s != "Unknown" {
retval = append(retval, s)
}
}
return retval
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authz
import (
"fmt"
"strings"
klabels "k8s.io/apimachinery/pkg/labels"
"istio.io/api/mesh/v1alpha1"
"istio.io/api/security/v1beta1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// AuthorizationPoliciesAnalyzer checks the validity of authorization policies
type AuthorizationPoliciesAnalyzer struct{}
var (
_ analysis.Analyzer = &AuthorizationPoliciesAnalyzer{}
meshConfig *v1alpha1.MeshConfig
)
func (a *AuthorizationPoliciesAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "auth.AuthorizationPoliciesAnalyzer",
Description: "Checks the validity of authorization policies",
Inputs: []config.GroupVersionKind{
gvk.MeshConfig,
gvk.AuthorizationPolicy,
gvk.Namespace,
gvk.Pod,
},
}
}
func (a *AuthorizationPoliciesAnalyzer) Analyze(c analysis.Context) {
podLabelsMap := initPodLabelsMap(c)
c.ForEach(gvk.AuthorizationPolicy, func(r *resource.Instance) bool {
a.analyzeNoMatchingWorkloads(r, c, podLabelsMap)
a.analyzeNamespaceNotFound(r, c)
return true
})
}
func (a *AuthorizationPoliciesAnalyzer) analyzeNoMatchingWorkloads(r *resource.Instance, c analysis.Context, podLabelsMap map[string][]klabels.Set) {
ap := r.Message.(*v1beta1.AuthorizationPolicy)
apNs := r.Metadata.FullName.Namespace.String()
// If AuthzPolicy is mesh-wide
if meshWidePolicy(apNs, c) {
// If it has selector, need further analysis
if ap.GetSelector() != nil {
apSelector := klabels.SelectorFromSet(ap.GetSelector().MatchLabels)
// If there is at least one pod matching the selector within the whole mesh
if !hasMatchingPodsRunning(apSelector, podLabelsMap) {
c.Report(gvk.AuthorizationPolicy, msg.NewNoMatchingWorkloadsFound(r, apSelector.String()))
}
}
// If AuthzPolicy is mesh-wide and selectorless,
// no need to keep the analysis
return
}
// If the AuthzPolicy is namespace-wide and there are present Pods,
// no messages should be triggered.
if ap.GetSelector() == nil {
if len(podLabelsMap[apNs]) == 0 {
c.Report(gvk.AuthorizationPolicy, msg.NewNoMatchingWorkloadsFound(r, ""))
}
return
}
// If the AuthzPolicy has Selector, then need to find a matching Pod.
apSelector := klabels.SelectorFromSet(ap.GetSelector().MatchLabels)
if !hasMatchingPodsRunningIn(apSelector, podLabelsMap[apNs]) {
c.Report(gvk.AuthorizationPolicy, msg.NewNoMatchingWorkloadsFound(r, apSelector.String()))
}
}
// Returns true when the namespace is the root namespace.
// It takes the MeshConfig names istio, if not the last instance found.
func meshWidePolicy(ns string, c analysis.Context) bool {
mConf := fetchMeshConfig(c)
return mConf != nil && ns == mConf.GetRootNamespace()
}
func fetchMeshConfig(c analysis.Context) *v1alpha1.MeshConfig {
if meshConfig != nil {
return meshConfig
}
c.ForEach(gvk.MeshConfig, func(r *resource.Instance) bool {
meshConfig = r.Message.(*v1alpha1.MeshConfig)
return r.Metadata.FullName.Name != util.MeshConfigName
})
return meshConfig
}
func hasMatchingPodsRunning(selector klabels.Selector, podLabelsMap map[string][]klabels.Set) bool {
for _, setList := range podLabelsMap {
if hasMatchingPodsRunningIn(selector, setList) {
return true
}
}
return false
}
func hasMatchingPodsRunningIn(selector klabels.Selector, setList []klabels.Set) bool {
hasMatchingPods := false
for _, labels := range setList {
if selector.Matches(labels) {
hasMatchingPods = true
break
}
}
return hasMatchingPods
}
func (a *AuthorizationPoliciesAnalyzer) analyzeNamespaceNotFound(r *resource.Instance, c analysis.Context) {
ap := r.Message.(*v1beta1.AuthorizationPolicy)
for i, rule := range ap.Rules {
for j, from := range rule.From {
for k, ns := range append(from.Source.Namespaces, from.Source.NotNamespaces...) {
if !matchNamespace(ns, c) {
m := msg.NewReferencedResourceNotFound(r, "namespace", ns)
nsIndex := k
if nsIndex >= len(from.Source.Namespaces) {
nsIndex -= len(from.Source.Namespaces)
}
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.AuthorizationPolicyNameSpace, i, j, nsIndex)); ok {
m.Line = line
}
c.Report(gvk.AuthorizationPolicy, m)
}
}
}
}
}
func matchNamespace(exp string, c analysis.Context) bool {
match := false
c.ForEach(gvk.Namespace, func(r *resource.Instance) bool {
ns := r.Metadata.FullName.String()
match = namespaceMatch(ns, exp)
return !match
})
return match
}
func namespaceMatch(ns, exp string) bool {
if strings.EqualFold(exp, "*") {
return true
}
if strings.HasPrefix(exp, "*") {
return strings.HasSuffix(ns, strings.TrimPrefix(exp, "*"))
}
if strings.HasSuffix(exp, "*") {
return strings.HasPrefix(ns, strings.TrimSuffix(exp, "*"))
}
return strings.EqualFold(ns, exp)
}
// Build a map indexed by namespace with in-mesh Pod's labels
func initPodLabelsMap(c analysis.Context) map[string][]klabels.Set {
podLabelsMap := make(map[string][]klabels.Set)
c.ForEach(gvk.Pod, func(r *resource.Instance) bool {
pLabels := klabels.Set(r.Metadata.Labels)
ns := r.Metadata.FullName.Namespace.String()
if podLabelsMap[ns] == nil {
podLabelsMap[ns] = make([]klabels.Set, 0)
}
if util.PodInMesh(r, c) {
podLabelsMap[ns] = append(podLabelsMap[ns], pLabels)
}
if util.PodInAmbientMode(r) {
podLabelsMap[ns] = append(podLabelsMap[ns], pLabels)
}
return true
})
return podLabelsMap
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package deployment
import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
type ApplicationUIDAnalyzer struct{}
var _ analysis.Analyzer = &ApplicationUIDAnalyzer{}
const (
UserID = int64(1337)
)
func (appUID *ApplicationUIDAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "applicationUID.Analyzer",
Description: "Checks invalid application UID",
Inputs: []config.GroupVersionKind{
gvk.Pod,
gvk.Deployment,
},
}
}
func (appUID *ApplicationUIDAnalyzer) Analyze(context analysis.Context) {
context.ForEach(gvk.Pod, func(resource *resource.Instance) bool {
appUID.analyzeAppUIDForPod(resource, context)
return true
})
context.ForEach(gvk.Deployment, func(resource *resource.Instance) bool {
appUID.analyzeAppUIDForDeployment(resource, context)
return true
})
}
func (appUID *ApplicationUIDAnalyzer) analyzeAppUIDForPod(resource *resource.Instance, context analysis.Context) {
p := resource.Message.(*v1.PodSpec)
// Skip analyzing control plane for IST0144
if util.IsIstioControlPlane(resource) {
return
}
message := msg.NewInvalidApplicationUID(resource)
if p.SecurityContext != nil && p.SecurityContext.RunAsUser != nil {
if *p.SecurityContext.RunAsUser == UserID {
context.Report(gvk.Pod, message)
}
}
for _, container := range p.Containers {
if container.Name != util.IstioProxyName && container.Name != util.IstioOperator {
if container.SecurityContext != nil && container.SecurityContext.RunAsUser != nil {
if *container.SecurityContext.RunAsUser == UserID {
context.Report(gvk.Pod, message)
}
}
}
}
}
func (appUID *ApplicationUIDAnalyzer) analyzeAppUIDForDeployment(resource *resource.Instance, context analysis.Context) {
d := resource.Message.(*appsv1.DeploymentSpec)
// Skip analyzing control plane for IST0144
if util.IsIstioControlPlane(resource) {
return
}
message := msg.NewInvalidApplicationUID(resource)
spec := d.Template.Spec
if spec.SecurityContext != nil && spec.SecurityContext.RunAsUser != nil {
if *spec.SecurityContext.RunAsUser == UserID {
context.Report(gvk.Deployment, message)
}
}
for _, container := range spec.Containers {
if container.Name != util.IstioProxyName && container.Name != util.IstioOperator {
if container.SecurityContext != nil && container.SecurityContext.RunAsUser != nil {
if *container.SecurityContext.RunAsUser == UserID {
context.Report(gvk.Deployment, message)
}
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package deployment
import (
"fmt"
"strconv"
appsv1 "k8s.io/api/apps/v1"
core_v1 "k8s.io/api/core/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
type ServiceAssociationAnalyzer struct{}
var _ analysis.Analyzer = &ServiceAssociationAnalyzer{}
type (
PortMap map[int32]ProtocolMap
ProtocolMap map[core_v1.Protocol]ServiceNames
ServiceNames []string
ServiceSpecWithName struct {
Name string
Spec *core_v1.ServiceSpec
}
)
// targetPort port serviceName
type targetPortMap map[string]map[int32]string
func (s *ServiceAssociationAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "deployment.MultiServiceAnalyzer",
Description: "Checks association between services and pods",
Inputs: []config.GroupVersionKind{
gvk.Service,
gvk.Deployment,
gvk.Namespace,
},
}
}
func (s *ServiceAssociationAnalyzer) Analyze(c analysis.Context) {
c.ForEach(gvk.Deployment, func(r *resource.Instance) bool {
if !isWaypointDeployment(r) && util.DeploymentInMesh(r, c) {
s.analyzeDeploymentPortProtocol(r, c)
s.analyzeDeploymentTargetPorts(r, c)
}
return true
})
}
func isWaypointDeployment(r *resource.Instance) bool {
return r.Metadata.Labels[constants.ManagedGatewayLabel] == constants.ManagedGatewayMeshControllerLabel
}
// analyzeDeploymentPortProtocol analyzes the specific service mesh deployment
func (s *ServiceAssociationAnalyzer) analyzeDeploymentPortProtocol(r *resource.Instance, c analysis.Context) {
// Find matching services with resulting pod from deployment
matchingSvcs := s.findMatchingServices(r, c)
// Generate a port map from the matching services.
// It creates a structure that will allow us to detect
// if there are different protocols for the same port.
portMap := servicePortMap(matchingSvcs)
// Determining which ports use more than one protocol.
for port := range portMap {
// In case there are two protocols using same port number, generate a message
protMap := portMap[port]
if len(protMap) > 1 {
// Collect names from both protocols
svcNames := make(ServiceNames, 0)
for protocol := range protMap {
svcNames = append(svcNames, protMap[protocol]...)
}
m := msg.NewDeploymentAssociatedToMultipleServices(r, r.Metadata.FullName.Name.String(), port, svcNames)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.MetadataName)); ok {
m.Line = line
}
// Reporting the message for the deployment, port and conflicting services.
c.Report(gvk.Deployment, m)
}
}
}
// analyzeDeploymentPortProtocol analyzes the targetPorts conflicting
func (s *ServiceAssociationAnalyzer) analyzeDeploymentTargetPorts(r *resource.Instance, c analysis.Context) {
// Find matching services with resulting pod from deployment
matchingSvcs := s.findMatchingServices(r, c)
tpm := serviceTargetPortsMap(matchingSvcs)
// Determining which ports use more than one protocol.
for targetPort, portServices := range tpm {
if len(portServices) > 1 {
// Collect names from both protocols
svcNames := make(ServiceNames, 0, len(portServices))
ports := make([]int32, 0, len(portServices))
for p, s := range portServices {
svcNames = append(svcNames, s)
ports = append(ports, p)
}
m := msg.NewDeploymentConflictingPorts(r, r.Metadata.FullName.Name.String(), svcNames, targetPort, ports)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.MetadataName)); ok {
m.Line = line
}
// Reporting the message for the deployment, port and conflicting services.
c.Report(gvk.Deployment, m)
}
}
}
// findMatchingServices returns an slice of Services that matches with deployment's pods.
func (s *ServiceAssociationAnalyzer) findMatchingServices(r *resource.Instance, c analysis.Context) []ServiceSpecWithName {
matchingSvcs := make([]ServiceSpecWithName, 0)
d := r.Message.(*appsv1.DeploymentSpec)
deploymentNS := r.Metadata.FullName.Namespace.String()
c.ForEach(gvk.Service, func(r *resource.Instance) bool {
s := r.Message.(*core_v1.ServiceSpec)
sSelector := klabels.SelectorFromSet(s.Selector)
pLabels := klabels.Set(d.Template.Labels)
if !sSelector.Empty() && sSelector.Matches(pLabels) && r.Metadata.FullName.Namespace.String() == deploymentNS {
matchingSvcs = append(matchingSvcs, ServiceSpecWithName{r.Metadata.FullName.String(), s})
}
return true
})
return matchingSvcs
}
// servicePortMap build a map of ports and protocols for each Service. e.g. m[80]["TCP"] -> svcA, svcB, svcC
func servicePortMap(svcs []ServiceSpecWithName) PortMap {
portMap := PortMap{}
for _, swn := range svcs {
svc := swn.Spec
for _, sPort := range svc.Ports {
// If it is the first occurrence of this port, create a ProtocolMap
if _, ok := portMap[sPort.Port]; !ok {
portMap[sPort.Port] = ProtocolMap{}
}
// Default protocol is TCP
protocol := sPort.Protocol
if protocol == "" {
protocol = core_v1.ProtocolTCP
}
// Appending the service information for the Port/Protocol combination
portMap[sPort.Port][protocol] = append(portMap[sPort.Port][protocol], swn.Name)
}
}
return portMap
}
// serviceTargetPortsMap build a map of targetPort and ports for each Service. e.g. m["80"][80] -> svc
func serviceTargetPortsMap(svcs []ServiceSpecWithName) targetPortMap {
pm := targetPortMap{}
for _, swn := range svcs {
svc := swn.Spec
for _, sPort := range svc.Ports {
p := sPort.TargetPort.String()
if p == "0" || p == "" {
// By default and for convenience, the targetPort is set to the same value as the port field.
p = strconv.Itoa(int(sPort.Port))
}
if _, ok := pm[p]; !ok {
pm[p] = map[int32]string{}
}
pm[p][sPort.Port] = swn.Name
}
}
return pm
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package deprecation
import (
"fmt"
k8sext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// FieldAnalyzer checks for deprecated Istio types and fields
type FieldAnalyzer struct{}
// Tracks Istio CRDs removed from manifests/charts/base/crds/crd-all.gen.yaml
var deprecatedCRDs = []k8sext.CustomResourceDefinitionSpec{
{
Group: "rbac.istio.io",
Names: k8sext.CustomResourceDefinitionNames{Kind: "ClusterRbacConfig"},
},
{
Group: "rbac.istio.io",
Names: k8sext.CustomResourceDefinitionNames{Kind: "RbacConfig"},
},
{
Group: "rbac.istio.io",
Names: k8sext.CustomResourceDefinitionNames{Kind: "ServiceRole"},
},
{
Group: "rbac.istio.io",
Names: k8sext.CustomResourceDefinitionNames{Kind: "ServiceRoleBinding"},
},
}
// Currently we don't have an Istio API that tells which Istio API fields are deprecated.
// Run `find . -name "*.proto" -exec grep -i "deprecated=true" \{\} \; -print`
// to see what is deprecated. This analyzer is hand-crafted.
// Metadata implements analyzer.Analyzer
func (*FieldAnalyzer) Metadata() analysis.Metadata {
deprecationInputs := []config.GroupVersionKind{
gvk.VirtualService,
gvk.Sidecar,
gvk.CustomResourceDefinition,
}
return analysis.Metadata{
Name: "deprecation.DeprecationAnalyzer",
Description: "Checks for deprecated Istio types and fields",
Inputs: deprecationInputs,
}
}
// Analyze implements analysis.Analyzer
func (fa *FieldAnalyzer) Analyze(ctx analysis.Context) {
ctx.ForEach(gvk.VirtualService, func(r *resource.Instance) bool {
fa.analyzeVirtualService(r, ctx)
return true
})
ctx.ForEach(gvk.Sidecar, func(r *resource.Instance) bool {
fa.analyzeSidecar(r, ctx)
return true
})
ctx.ForEach(gvk.CustomResourceDefinition, func(r *resource.Instance) bool {
fa.analyzeCRD(r, ctx)
return true
})
}
func (*FieldAnalyzer) analyzeCRD(r *resource.Instance, ctx analysis.Context) {
for _, depCRD := range deprecatedCRDs {
var group, kind string
switch crd := r.Message.(type) {
case *k8sext.CustomResourceDefinition:
group = crd.Spec.Group
kind = crd.Spec.Names.Kind
case *k8sext.CustomResourceDefinitionSpec:
group = crd.Group
kind = crd.Names.Kind
}
if group == depCRD.Group && kind == depCRD.Names.Kind {
ctx.Report(gvk.CustomResourceDefinition,
msg.NewDeprecated(r, crRemovedMessage(depCRD.Group, depCRD.Names.Kind)))
}
}
}
func (*FieldAnalyzer) analyzeSidecar(r *resource.Instance, ctx analysis.Context) {
sc := r.Message.(*v1alpha3.Sidecar)
if sc.OutboundTrafficPolicy != nil {
if sc.OutboundTrafficPolicy.EgressProxy != nil {
ctx.Report(gvk.VirtualService,
msg.NewDeprecated(r, ignoredMessage("OutboundTrafficPolicy.EgressProxy")))
}
}
}
func (*FieldAnalyzer) analyzeVirtualService(r *resource.Instance, ctx analysis.Context) {
vs := r.Message.(*v1alpha3.VirtualService)
for _, httpRoute := range vs.Http {
if httpRoute.Fault != nil {
if httpRoute.Fault.Delay != nil {
// nolint: staticcheck
if httpRoute.Fault.Delay.Percent > 0 {
ctx.Report(gvk.VirtualService,
msg.NewDeprecated(r, replacedMessage("HTTPRoute.fault.delay.percent", "HTTPRoute.fault.delay.percentage")))
}
}
}
}
}
func replacedMessage(deprecated, replacement string) string {
return fmt.Sprintf("%s is deprecated; use %s", deprecated, replacement)
}
func ignoredMessage(field string) string {
return fmt.Sprintf("%s ignored", field)
}
func crRemovedMessage(group, kind string) string {
return fmt.Sprintf("Custom resource type %s %s is removed", group, kind)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package destinationrule
import (
"fmt"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// CaCertificateAnalyzer checks if CaCertificate is set in case mode is SIMPLE/MUTUAL
type CaCertificateAnalyzer struct{}
var _ analysis.Analyzer = &CaCertificateAnalyzer{}
func (c *CaCertificateAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "destinationrule.CaCertificateAnalyzer",
Description: "Checks if caCertificates is set when TLS mode is SIMPLE/MUTUAL",
Inputs: []config.GroupVersionKind{
gvk.DestinationRule,
},
}
}
func (c *CaCertificateAnalyzer) Analyze(ctx analysis.Context) {
ctx.ForEach(gvk.DestinationRule, func(r *resource.Instance) bool {
c.analyzeDestinationRule(r, ctx)
return true
})
}
func (c *CaCertificateAnalyzer) analyzeDestinationRule(r *resource.Instance, ctx analysis.Context) {
dr := r.Message.(*v1alpha3.DestinationRule)
drNs := r.Metadata.FullName.Namespace
drName := r.Metadata.FullName.String()
mode := dr.GetTrafficPolicy().GetTls().GetMode()
if mode == v1alpha3.ClientTLSSettings_SIMPLE || mode == v1alpha3.ClientTLSSettings_MUTUAL {
if dr.GetTrafficPolicy().GetTls().GetCaCertificates() == "" {
m := msg.NewNoServerCertificateVerificationDestinationLevel(r, drName,
drNs.String(), mode.String(), dr.GetHost())
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.DestinationRuleTLSCert)); ok {
m.Line = line
}
ctx.Report(gvk.DestinationRule, m)
}
}
portSettings := dr.TrafficPolicy.GetPortLevelSettings()
for i, p := range portSettings {
mode = p.GetTls().GetMode()
if mode == v1alpha3.ClientTLSSettings_SIMPLE || mode == v1alpha3.ClientTLSSettings_MUTUAL {
if p.GetTls().GetCaCertificates() == "" {
m := msg.NewNoServerCertificateVerificationPortLevel(r, drName,
drNs.String(), mode.String(), dr.GetHost(), p.GetPort().String())
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.DestinationRuleTLSPortLevelCert, i)); ok {
m.Line = line
}
ctx.Report(gvk.DestinationRule, m)
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package envoyfilter
import (
"fmt"
network "istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// EnvoyPatchAnalyzer checks envoyFilters to see if the patch section is okay
type EnvoyPatchAnalyzer struct{}
// (compile-time check that we implement the interface)
var _ analysis.Analyzer = &EnvoyPatchAnalyzer{}
// Metadata implements analysis.Analyzer
func (*EnvoyPatchAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "envoyfilter.EnvoyPatchAnalyzer",
Description: "Checks an envoyFilters ",
Inputs: []config.GroupVersionKind{
gvk.EnvoyFilter,
},
}
}
// Analyze implements analysis.Analyzer
func (s *EnvoyPatchAnalyzer) Analyze(c analysis.Context) {
// hold the filter names that have a proxyVersion set
patchFilterNames := make([]string, 0)
c.ForEach(gvk.EnvoyFilter, func(r *resource.Instance) bool {
names := s.analyzeEnvoyFilterPatch(r, c, patchFilterNames)
patchFilterNames = names
return true
})
}
func relativeOperationMsg(r *resource.Instance, c analysis.Context, index int, priority int32, patchFilterNames []string, instanceName string) {
if priority == 0 {
// there is more than one envoy filter that uses the same name where the proxy version
// is set and the priority is not set and a relative operator is used. Issue a warning
message := msg.NewEnvoyFilterUsesRelativeOperation(r)
// if the proxyVersion is set choose that error message over the relative operation message as
// the proxyVersion error message also indicates that the proxyVersion is set
count := 0
for _, name := range patchFilterNames {
if instanceName == name {
count++
break
}
}
if count > 0 {
// there is more than one envoy filter that uses the same name where the proxy version
// is set and the priority is not set and a relative operator is used. Issue a warning
message = msg.NewEnvoyFilterUsesRelativeOperationWithProxyVersion(r)
}
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.EnvoyFilterConfigPath, index)); ok {
message.Line = line
}
c.Report(gvk.EnvoyFilter, message)
}
}
func (*EnvoyPatchAnalyzer) analyzeEnvoyFilterPatch(r *resource.Instance, c analysis.Context, patchFilterNames []string) []string {
ef := r.Message.(*network.EnvoyFilter)
for index, patch := range ef.ConfigPatches {
// validate that the patch and match sections are populated
if patch.GetPatch() == nil {
break
}
// collect filter names to figure out if there is more than one envoyFilter with the same filter name where one
// of the envoy filters has the proxy version set
instanceName := ""
if patch.Patch.GetValue() != nil {
if patch.Patch.Value.GetFields() != nil {
tmpValue := patch.Patch.Value.GetFields()
tmpName := tmpValue["name"]
if tmpName != nil {
instanceName = tmpValue["name"].String()
} else if patch.GetMatch() != nil {
if patch.Match.GetListener() != nil {
if patch.Match.GetListener().GetFilterChain() != nil {
instanceName = patch.Match.GetListener().GetFilterChain().GetFilter().GetName()
}
}
}
}
}
// check each operation type
if patch.Patch.Operation == network.EnvoyFilter_Patch_ADD {
// the ADD operation is an absolute operation but provide a warning
// indicating that the operation will be ignored when applyTo is set to ROUTE_CONFIGURATION,
// or HTTP_ROUTE
if patch.ApplyTo == network.EnvoyFilter_ROUTE_CONFIGURATION || patch.ApplyTo == network.EnvoyFilter_HTTP_ROUTE {
// provide an error message indicating a mismatch between the operation type and the filter type
message := msg.NewEnvoyFilterUsesAddOperationIncorrectly(r)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.EnvoyFilterConfigPath, index)); ok {
message.Line = line
}
c.Report(gvk.EnvoyFilter, message)
}
} else if patch.Patch.Operation == network.EnvoyFilter_Patch_REMOVE {
// the REMOVE operation is ignored when applyTo is set to ROUTE_CONFIGURATION, or HTTP_ROUTE.
if patch.ApplyTo == network.EnvoyFilter_ROUTE_CONFIGURATION || patch.ApplyTo == network.EnvoyFilter_HTTP_ROUTE {
// provide an error message indicating a mismatch between the operation type and the filter type
message := msg.NewEnvoyFilterUsesRemoveOperationIncorrectly(r)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.EnvoyFilterConfigPath, index)); ok {
message.Line = line
}
c.Report(gvk.EnvoyFilter, message)
} else {
// A relative operation (REMOVE) was used so check if priority is set and if not set provide a warning
relativeOperationMsg(r, c, index, ef.Priority, patchFilterNames, instanceName)
}
} else if patch.Patch.Operation == network.EnvoyFilter_Patch_REPLACE {
// the REPLACE operation is only valid for HTTP_FILTER and NETWORK_FILTER.
if patch.ApplyTo != network.EnvoyFilter_NETWORK_FILTER && patch.ApplyTo != network.EnvoyFilter_HTTP_FILTER {
// provide an error message indicating an invalid filter type
message := msg.NewEnvoyFilterUsesReplaceOperationIncorrectly(r)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.EnvoyFilterConfigPath, index)); ok {
message.Line = line
}
c.Report(gvk.EnvoyFilter, message)
} else {
// A relative operation (REPLACE) was used so check if priority is set and if not set provide a warning
relativeOperationMsg(r, c, index, ef.Priority, patchFilterNames, instanceName)
}
} else if patch.Patch.Operation == network.EnvoyFilter_Patch_INSERT_BEFORE || patch.Patch.Operation == network.EnvoyFilter_Patch_INSERT_AFTER {
// Also a relative operation (INSERT_BEFORE or INSERT_AFTER) was used so check if priority is set and if not set provide a warning
relativeOperationMsg(r, c, index, ef.Priority, patchFilterNames, instanceName)
}
// append the patchValueStr to the slice for next iteration if the proxyVersion is set
if patch.GetMatch() != nil {
if patch.Match.GetProxy() != nil {
if len(patch.Match.Proxy.ProxyVersion) != 0 {
patchFilterNames = append(patchFilterNames, instanceName)
}
}
}
}
return patchFilterNames
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package externalcontrolplane
import (
"fmt"
"net"
"net/url"
"strings"
v1 "k8s.io/api/admissionregistration/v1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
type ExternalControlPlaneAnalyzer struct{}
// Compile-time check that this Analyzer correctly implements the interface
var _ analysis.Analyzer = &ExternalControlPlaneAnalyzer{}
// Metadata implements Analyzer
func (s *ExternalControlPlaneAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "externalcontrolplane.ExternalControlPlaneAnalyzer",
Description: "Checks that the remote IstioOperator resources reference an external control plane",
Inputs: []config.GroupVersionKind{
gvk.ValidatingWebhookConfiguration,
gvk.MutatingWebhookConfiguration,
},
}
}
const (
defaultIstioValidatingWebhookName = "istiod-default-validator"
istioValidatingWebhookNamePrefix = "istio-validator"
istioMutatingWebhookNamePrefix = "istio-sidecar-injector"
)
// Analyze implements Analyzer
func (s *ExternalControlPlaneAnalyzer) Analyze(c analysis.Context) {
reportWebhookURL := func(r *resource.Instance, hName string, clientConf v1.WebhookClientConfig) {
// If defined, it means that an external istiod has been adopted
if clientConf.URL != nil {
result, err := lintWebhookURL(*clientConf.URL)
if err != nil {
c.Report(gvk.ValidatingWebhookConfiguration, msg.NewInvalidExternalControlPlaneConfig(r, *clientConf.URL, hName, err.Error()))
return
}
if result.isIP() {
c.Report(gvk.ValidatingWebhookConfiguration, msg.NewExternalControlPlaneAddressIsNotAHostname(r, *clientConf.URL, hName))
}
} else if clientConf.Service == nil {
c.Report(gvk.ValidatingWebhookConfiguration, msg.NewInvalidExternalControlPlaneConfig(r, "", hName, "is blank"))
}
}
c.ForEach(gvk.ValidatingWebhookConfiguration, func(resource *resource.Instance) bool {
webhookConfig := resource.Message.(*v1.ValidatingWebhookConfiguration)
// 1. ValidatingWebhookConfiguration: istio-validator or istiod-default-validator(default)
// istio-validator{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }}-{{ .Values.global.istioNamespace }}
if webhookConfig.GetName() != "" &&
(webhookConfig.Name == defaultIstioValidatingWebhookName ||
strings.HasPrefix(webhookConfig.Name, istioValidatingWebhookNamePrefix)) {
for _, hook := range webhookConfig.Webhooks {
reportWebhookURL(resource, hook.Name, hook.ClientConfig)
}
}
return true
})
c.ForEach(gvk.MutatingWebhookConfiguration, func(resource *resource.Instance) bool {
webhookConfig := resource.Message.(*v1.MutatingWebhookConfiguration)
// 2. MutatingWebhookConfiguration: istio-sidecar-injector
// {{- if eq .Release.Namespace "istio-system"}}
// name: istio-sidecar-injector{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }}
// {{- else }}
// name: istio-sidecar-injector{{- if not (eq .Values.revision "") }}-{{ .Values.revision }}{{- end }}-{{ .Release.Namespace }}
// {{- end }}
if strings.HasPrefix(webhookConfig.Name, istioMutatingWebhookNamePrefix) {
for _, hook := range webhookConfig.Webhooks {
reportWebhookURL(resource, hook.Name, hook.ClientConfig)
}
}
return true
})
}
type webhookURLResult struct {
ip net.IP
hostName string
resolvesIPs []net.IP
}
func (r *webhookURLResult) isIP() bool {
if r == nil {
return false
}
return r.ip != nil
}
func lintWebhookURL(webhookURL string) (result *webhookURLResult, err error) {
result = &webhookURLResult{}
parsedWebhookURL, err := url.Parse(webhookURL)
if err != nil {
return result, fmt.Errorf("was provided in an invalid format")
}
parsedHostname := parsedWebhookURL.Hostname()
if ip := net.ParseIP(parsedHostname); ip != nil {
result.ip = ip
return result, nil
}
result.hostName = parsedHostname
ips, err := net.LookupIP(parsedHostname)
if err != nil {
return result, fmt.Errorf("cannot be resolved via a DNS lookup")
}
result.resolvesIPs = ips
if len(ips) == 0 {
return result, fmt.Errorf("resolves with zero IP addresses")
}
return result, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
type CertificateAnalyzer struct{}
var _ analysis.Analyzer = &CertificateAnalyzer{}
func (*CertificateAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "gateway.CertificateAnalyzer",
Description: "Checks a gateway certificate",
Inputs: []config.GroupVersionKind{
gvk.Gateway,
},
}
}
// Analyze implements analysis.Analyzer
func (gateway *CertificateAnalyzer) Analyze(context analysis.Context) {
context.ForEach(gvk.Gateway, func(resource *resource.Instance) bool {
gateway.analyzeDuplicateCertificate(resource, context, features.ScopeGatewayToNamespace)
return true
})
}
func (gateway *CertificateAnalyzer) analyzeDuplicateCertificate(currentResource *resource.Instance, context analysis.Context, scopeGatewayToNamespace bool) {
currentGateway := currentResource.Message.(*v1alpha3.Gateway)
currentGatewayFullName := currentResource.Metadata.FullName
gateways := getGatewaysWithSelector(context, scopeGatewayToNamespace, currentGatewayFullName, currentGateway.Selector)
for _, gatewayFullName := range gateways {
// ignore matching the same exact gateway
if currentGatewayFullName == gatewayFullName {
continue
}
gatewayInstance := context.Find(gvk.Gateway, gatewayFullName)
gateway := gatewayInstance.Message.(*v1alpha3.Gateway)
for _, currentServer := range currentGateway.Servers {
for _, server := range gateway.Servers {
// make sure have TLS configuration
if currentServer.Tls == nil || server.Tls == nil {
continue
}
if haveSameCertificate(currentServer.Tls, server.Tls) {
gatewayNames := []string{currentGatewayFullName.String(), gatewayFullName.String()}
message := msg.NewGatewayDuplicateCertificate(currentResource, gatewayNames)
if line, ok := util.ErrorLine(currentResource, util.MetadataName); ok {
message.Line = line
}
context.Report(gvk.Gateway, message)
}
}
}
}
}
func haveSameCertificate(currentGatewayTLS, gatewayTLS *v1alpha3.ServerTLSSettings) bool {
if currentGatewayTLS.CredentialName != "" && gatewayTLS.CredentialName != "" {
return currentGatewayTLS.CredentialName == gatewayTLS.CredentialName
}
if currentGatewayTLS.CredentialName == "" && gatewayTLS.CredentialName == "" {
if currentGatewayTLS.ServerCertificate != "" && gatewayTLS.ServerCertificate != "" {
if currentGatewayTLS.ServerCertificate == gatewayTLS.ServerCertificate {
if currentGatewayTLS.PrivateKey != "" && gatewayTLS.PrivateKey != "" {
return currentGatewayTLS.PrivateKey == gatewayTLS.PrivateKey
}
return false
}
}
}
return false
}
// get all gateways that is superset of the selector
func getGatewaysWithSelector(c analysis.Context, gwScope bool, currentGWName resource.FullName, currentGWSelector map[string]string) []resource.FullName {
var gateways []resource.FullName
c.ForEach(gvk.Gateway, func(resource *resource.Instance) bool {
// if scopeToNamespace true, ignore adding gateways from other namespace
if gwScope {
if currentGWName.Namespace != resource.Metadata.FullName.Namespace {
return true
}
}
// if current gateway selector is empty, match all gateway
if len(currentGWSelector) == 0 {
gateways = append(gateways, resource.Metadata.FullName)
return true
}
gateway := resource.Message.(*v1alpha3.Gateway)
// if current gateway selector is subset of other gateway selector
// add other gateway
if selectorSubset(currentGWSelector, gateway.Selector) {
gateways = append(gateways, resource.Metadata.FullName)
}
return true
})
return gateways
}
func selectorSubset(selectorX, selectorY map[string]string) bool {
var count int
for keyX, valueX := range selectorX {
for keyY, valueY := range selectorY {
if keyX == keyY {
// if have same key but different value
// mean selectorX is not subset of selectorY
if valueX != valueY {
return false
}
// if key and value is same
// increase the counting
count++
}
}
}
// if total counting is not same with the length
// of selectorX, selectorX is not subset of selectorY
return count == len(selectorX)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"fmt"
"strconv"
"strings"
klabels "k8s.io/apimachinery/pkg/labels"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// ConflictingGatewayAnalyzer checks a gateway's selector, port number and hosts.
type ConflictingGatewayAnalyzer struct{}
// (compile-time check that we implement the interface)
var _ analysis.Analyzer = &ConflictingGatewayAnalyzer{}
// Metadata implements analysis.Analyzer
func (*ConflictingGatewayAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "gateway.ConflictingGatewayAnalyzer",
Description: "Checks a gateway's selector, port number and hosts",
Inputs: []config.GroupVersionKind{
gvk.Gateway,
},
}
}
// Analyze implements analysis.Analyzer
func (s *ConflictingGatewayAnalyzer) Analyze(c analysis.Context) {
gwConflictingMap := initGatewaysMap(c)
c.ForEach(gvk.Gateway, func(r *resource.Instance) bool {
s.analyzeGateway(r, c, gwConflictingMap)
return true
})
}
func (*ConflictingGatewayAnalyzer) analyzeGateway(r *resource.Instance, c analysis.Context,
gwCMap map[string]map[string][]string,
) {
gw := r.Message.(*v1alpha3.Gateway)
gwName := r.Metadata.FullName.String()
// For pods selected by gw.Selector, find Services that select them and remember those ports
gwSelector := klabels.SelectorFromSet(gw.Selector)
sGWSelector := gwSelector.String()
// Check non-exist gateway with particular selector
isExists := false
for gwmKey := range gwCMap {
if strings.Contains(gwmKey, sGWSelector) {
isExists = true
break
}
}
if sGWSelector != "" && !isExists {
m := msg.NewReferencedResourceNotFound(r, "selector", sGWSelector)
label := util.ExtractLabelFromSelectorString(sGWSelector)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.GatewaySelector, label)); ok {
m.Line = line
}
c.Report(gvk.Gateway, m)
return
}
for _, server := range gw.Servers {
var rmsg []string
conflictingGWMatch := 0
sPortNumber := strconv.Itoa(int(server.GetPort().GetNumber()))
mapKey := genGatewayMapKey(sGWSelector, sPortNumber)
for gwNameKey, gwHostsValue := range gwCMap[mapKey] {
for _, gwHost := range server.GetHosts() {
// both selector and portnumber are the same, then check hosts
if isGWsHostMatched(gwHost, gwHostsValue) {
if gwName != gwNameKey {
conflictingGWMatch++
rmsg = append(rmsg, gwNameKey)
}
}
}
}
if conflictingGWMatch > 0 {
reportMsg := strings.Join(rmsg, ",")
hostsMsg := strings.Join(server.GetHosts(), ",")
m := msg.NewConflictingGateways(r, reportMsg, sGWSelector, sPortNumber, hostsMsg)
c.Report(gvk.Gateway, m)
}
}
}
// isGWsHostMatched implements gateway's hosts match
func isGWsHostMatched(gwInstance string, gwHostList []string) bool {
gwInstanceNamed := host.Name(gwInstance)
for _, gwElem := range gwHostList {
gwElemNamed := host.Name(gwElem)
if gwInstanceNamed.Matches(gwElemNamed) {
return true
}
}
return false
}
// initGatewaysMap implements initialization for gateways Map
func initGatewaysMap(ctx analysis.Context) map[string]map[string][]string {
gwConflictingMap := make(map[string]map[string][]string)
ctx.ForEach(gvk.Gateway, func(r *resource.Instance) bool {
gw := r.Message.(*v1alpha3.Gateway)
gwName := r.Metadata.FullName.String()
gwSelector := klabels.SelectorFromSet(gw.GetSelector())
sGWSelector := gwSelector.String()
for _, server := range gw.GetServers() {
sPortNumber := strconv.Itoa(int(server.GetPort().GetNumber()))
mapKey := genGatewayMapKey(sGWSelector, sPortNumber)
if _, exits := gwConflictingMap[mapKey]; !exits {
objMap := make(map[string][]string)
objMap[gwName] = server.GetHosts()
gwConflictingMap[mapKey] = objMap
} else {
gwConflictingMap[mapKey][gwName] = server.GetHosts()
}
}
return true
})
return gwConflictingMap
}
func genGatewayMapKey(selector, portNumber string) string {
key := selector + "~" + portNumber
return key
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"fmt"
v1 "k8s.io/api/core/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// IngressGatewayPortAnalyzer checks a gateway's ports against the gateway's Kubernetes service ports.
type IngressGatewayPortAnalyzer struct{}
// (compile-time check that we implement the interface)
var _ analysis.Analyzer = &IngressGatewayPortAnalyzer{}
// Metadata implements analysis.Analyzer
func (*IngressGatewayPortAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "gateway.IngressGatewayPortAnalyzer",
Description: "Checks a gateway's ports against the gateway's Kubernetes service ports",
Inputs: []config.GroupVersionKind{
gvk.Gateway,
gvk.Pod,
gvk.Service,
},
}
}
// Analyze implements analysis.Analyzer
func (s *IngressGatewayPortAnalyzer) Analyze(c analysis.Context) {
c.ForEach(gvk.Gateway, func(r *resource.Instance) bool {
s.analyzeGateway(r, c)
return true
})
}
func (*IngressGatewayPortAnalyzer) analyzeGateway(r *resource.Instance, c analysis.Context) {
gw := r.Message.(*v1alpha3.Gateway)
// Typically there will be a single istio-ingressgateway service, which will select
// the same ingress gateway pod workload as the Gateway resource. If there are multiple
// Kubernetes services, and they offer different TCP port combinations, this validator will
// not report a problem if *any* selecting service exposes the Gateway's port.
servicePorts := map[uint32]bool{}
gwSelectorMatches := 0
// For pods selected by gw.Selector, find Services that select them and remember those ports
gwSelector := klabels.SelectorFromSet(gw.Selector)
c.ForEach(gvk.Pod, func(rPod *resource.Instance) bool {
podLabels := klabels.Set(rPod.Metadata.Labels)
if gwSelector.Matches(podLabels) {
gwSelectorMatches++
c.ForEach(gvk.Service, func(rSvc *resource.Instance) bool {
nsSvc := string(rSvc.Metadata.FullName.Namespace)
if nsSvc != rPod.Metadata.FullName.Namespace.String() {
return true // Services only select pods in their namespace
}
service := rSvc.Message.(*v1.ServiceSpec)
// TODO I want to match service.Namespace to pod.ObjectMeta.Namespace
svcSelector := klabels.SelectorFromSet(service.Selector)
if svcSelector.Matches(podLabels) {
for _, port := range service.Ports {
if port.Protocol == "TCP" {
// Because the Gateway's server port is the port on which the proxy should listen for incoming connections,
// the actual port associated with the service is the `TargetPort` that reaches the sidecar *workload instances*.
if tp := port.TargetPort.IntValue(); tp != 0 {
servicePorts[uint32(tp)] = true
} else {
servicePorts[uint32(port.Port)] = true
}
}
}
}
return true
})
}
return true
})
// Report if we found no pods matching this gateway's selector
if gwSelectorMatches == 0 {
m := msg.NewReferencedResourceNotFound(r, "selector", gwSelector.String())
label := util.ExtractLabelFromSelectorString(gwSelector.String())
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.GatewaySelector, label)); ok {
m.Line = line
}
c.Report(gvk.Gateway, m)
return
}
// Check each Gateway port against what the workload ingress service offers
for _, server := range gw.Servers {
if server.Port != nil {
_, ok := servicePorts[server.Port.Number]
if !ok {
m := msg.NewGatewayPortNotDefinedOnService(r, int(server.Port.Number), gwSelector.String())
label := util.ExtractLabelFromSelectorString(gwSelector.String())
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.GatewaySelector, label)); ok {
m.Line = line
}
c.Report(gvk.Gateway, m)
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/credentials/kube"
"istio.io/istio/pilot/pkg/xds"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// SecretAnalyzer checks a gateway's referenced secrets for correctness
type SecretAnalyzer struct{}
var _ analysis.Analyzer = &SecretAnalyzer{}
// Metadata implements analysis.Analyzer
func (a *SecretAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "gateway.SecretAnalyzer",
Description: "Checks a gateway's referenced secrets for correctness",
Inputs: []config.GroupVersionKind{
gvk.Gateway,
gvk.Pod,
gvk.Secret,
},
}
}
// Analyze implements analysis.Analyzer
func (a *SecretAnalyzer) Analyze(ctx analysis.Context) {
ctx.ForEach(gvk.Gateway, func(r *resource.Instance) bool {
gw := r.Message.(*v1alpha3.Gateway)
gwNs := getGatewayNamespace(ctx, gw)
// If we can't find a namespace for the gateway, it's because there's no matching selector. Exit early with a different message.
if gwNs == "" {
gwSelector := labels.SelectorFromSet(gw.Selector)
m := msg.NewReferencedResourceNotFound(r, "selector", labels.SelectorFromSet(gw.Selector).String())
label := util.ExtractLabelFromSelectorString(gwSelector.String())
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.GatewaySelector, label)); ok {
m.Line = line
}
ctx.Report(gvk.Gateway, m)
return true
}
for i, srv := range gw.GetServers() {
tls := srv.GetTls()
if tls == nil {
continue
}
cn := tls.GetCredentialName()
if cn == "" {
continue
}
secret := ctx.Find(gvk.Secret, resource.NewShortOrFullName(gwNs, cn))
if secret == nil {
m := msg.NewReferencedResourceNotFound(r, "credentialName", cn)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.CredentialName, i)); ok {
m.Line = line
}
ctx.Report(gvk.Gateway, m)
continue
}
if !isValidSecret(secret) {
m := msg.NewInvalidGatewayCredential(r, r.Metadata.FullName.Name.String(), gwNs.String())
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.CredentialName, i)); ok {
m.Line = line
}
ctx.Report(gvk.Secret, m)
}
}
return true
})
}
func isValidSecret(secret *resource.Instance) bool {
s, ok := secret.Message.(*corev1.Secret)
if !ok {
return false
}
certs, err := kube.ExtractCertInfo(s)
if err != nil {
return false
}
if err = xds.ValidateCertificate(certs.Cert); err != nil {
return false
}
return true
}
// Gets the namespace for the gateway (in terms of the actual workload selected by the gateway, NOT the namespace of the Gateway CRD)
// Assumes that all selected workloads are in the same namespace, if this is not the case which one's namespace gets returned is undefined.
func getGatewayNamespace(ctx analysis.Context, gw *v1alpha3.Gateway) resource.Namespace {
var ns resource.Namespace
gwSelector := labels.SelectorFromSet(gw.Selector)
ctx.ForEach(gvk.Pod, func(rPod *resource.Instance) bool {
if gwSelector.Matches(labels.Set(rPod.Metadata.Labels)) {
ns = rPod.Metadata.FullName.Namespace
return false
}
return true
})
return ns
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package injection
import (
"strings"
admitv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// ImageAutoAnalyzer reports an error if Pods and Deployments with `image: auto` are not going to be injected.
type ImageAutoAnalyzer struct{}
var _ analysis.Analyzer = &ImageAutoAnalyzer{}
const (
istioProxyContainerName = "istio-proxy"
manualInjectionImage = "auto"
)
// Metadata implements Analyzer.
func (a *ImageAutoAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "injection.ImageAutoAnalyzer",
Description: "Makes sure that Pods and Deployments with `image: auto` are going to be injected",
Inputs: []config.GroupVersionKind{
gvk.Namespace,
gvk.Pod,
gvk.Deployment,
gvk.MutatingWebhookConfiguration,
},
}
}
// Analyze implements Analyzer.
func (a *ImageAutoAnalyzer) Analyze(c analysis.Context) {
var istioWebhooks []admitv1.MutatingWebhook
c.ForEach(gvk.MutatingWebhookConfiguration, func(resource *resource.Instance) bool {
mwhc := resource.Message.(*admitv1.MutatingWebhookConfiguration)
for _, wh := range mwhc.Webhooks {
if strings.HasSuffix(wh.Name, "istio.io") {
istioWebhooks = append(istioWebhooks, wh)
}
}
return true
})
c.ForEach(gvk.Pod, func(resource *resource.Instance) bool {
p := resource.Message.(*v1.PodSpec)
// If a pod has `image: auto` it is broken whether the webhooks match or not
if !hasAutoImage(p) {
return true
}
m := msg.NewImageAutoWithoutInjectionError(resource, "Pod", resource.Metadata.FullName.Name.String())
c.Report(gvk.Pod, m)
return true
})
c.ForEach(gvk.Deployment, func(resource *resource.Instance) bool {
d := resource.Message.(*appsv1.DeploymentSpec)
if !hasAutoImage(&d.Template.Spec) {
return true
}
nsLabels := getNamespaceLabels(c, resource.Metadata.FullName.Namespace.String())
if !matchesWebhooks(nsLabels, d.Template.Labels, istioWebhooks) {
m := msg.NewImageAutoWithoutInjectionWarning(resource, "Deployment", resource.Metadata.FullName.Name.String())
c.Report(gvk.Deployment, m)
}
return true
})
}
func hasAutoImage(spec *v1.PodSpec) bool {
for _, c := range spec.Containers {
if c.Name == istioProxyContainerName && c.Image == manualInjectionImage {
return true
}
}
return false
}
func getNamespaceLabels(c analysis.Context, nsName string) map[string]string {
if nsName == "" {
nsName = "default"
}
ns := c.Find(gvk.Namespace, resource.NewFullName("", resource.LocalName(nsName)))
if ns == nil {
return nil
}
return ns.Metadata.Labels
}
func matchesWebhooks(nsLabels, podLabels map[string]string, istioWebhooks []admitv1.MutatingWebhook) bool {
for _, w := range istioWebhooks {
if selectorMatches(w.NamespaceSelector, nsLabels) && selectorMatches(w.ObjectSelector, podLabels) {
return true
}
}
return false
}
func selectorMatches(selector *metav1.LabelSelector, labels klabels.Set) bool {
// From webhook spec: "Default to the empty LabelSelector, which matchesWebhooks everything."
if selector == nil {
return true
}
s, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
return false
}
return s.Matches(labels)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package injection
import (
"encoding/json"
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/slices"
)
// ImageAnalyzer checks the image of auto-injection configured with the running proxies on pods.
type ImageAnalyzer struct{}
var _ analysis.Analyzer = &ImageAnalyzer{}
// injectionConfigMap is a snippet of the sidecar injection ConfigMap
type injectionConfigMap struct {
Global global `json:"global"`
}
type global struct {
Hub string `json:"hub"`
Tag string `json:"tag"`
Proxy proxy `json:"proxy"`
}
type proxy struct {
Image string `json:"image"`
}
// Metadata implements Analyzer.
func (a *ImageAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "injection.ImageAnalyzer",
Description: "Checks the image of auto-injection configured with the running proxies on pods",
Inputs: []config.GroupVersionKind{
gvk.Namespace,
gvk.Pod,
gvk.ConfigMap,
gvk.MeshConfig,
gvk.ProxyConfig,
},
}
}
// Analyze implements Analyzer.
func (a *ImageAnalyzer) Analyze(c analysis.Context) {
proxyImageMap := make(map[string]string)
// when multiple injector configmaps exist, we may need to assess them respectively.
c.ForEach(gvk.ConfigMap, func(r *resource.Instance) bool {
cmName := r.Metadata.FullName.Name.String()
if strings.HasPrefix(cmName, "istio-sidecar-injector") {
cm := r.Message.(*v1.ConfigMap)
proxyImageMap[cmName] = GetIstioProxyImage(cm)
return true
}
return true
})
if len(proxyImageMap) == 0 {
return
}
injectedNamespaces := make(map[string]string)
namespaceMismatchedPods := make(map[string][]string)
namespaceResources := make(map[string]*resource.Instance)
// Collect the list of namespaces that have istio injection enabled.
c.ForEach(gvk.Namespace, func(r *resource.Instance) bool {
namespaceResources[r.Metadata.FullName.String()] = r
nsRevision, okNewInjectionLabel := r.Metadata.Labels[RevisionInjectionLabelName]
if r.Metadata.Labels[util.InjectionLabelName] == util.InjectionLabelEnableValue || okNewInjectionLabel {
if okNewInjectionLabel {
injectedNamespaces[r.Metadata.FullName.String()] = nsRevision
} else {
injectedNamespaces[r.Metadata.FullName.String()] = "default"
}
} else {
return true
}
return true
})
resolver := util.NewEffectiveProxyConfigResolver(c)
c.ForEach(gvk.Pod, func(r *resource.Instance) bool {
var injectionCMName string
pod := r.Message.(*v1.PodSpec)
if nsRevision, ok := injectedNamespaces[r.Metadata.FullName.Namespace.String()]; ok {
// Generate the injection configmap name with different revision for every pod
injectionCMName = util.GetInjectorConfigMapName(nsRevision)
} else {
return true
}
// If the pod has been annotated with a custom sidecar, then ignore as
// it always overrides the injector logic.
if r.Metadata.Annotations["sidecar.istio.io/proxyImage"] != "" {
return true
}
variant := resolver.ImageType(r)
for _, container := range append(slices.Clone(pod.Containers), pod.InitContainers...) {
if container.Name != util.IstioProxyName {
continue
}
proxyImage, okImage := proxyImageMap[injectionCMName]
if !okImage {
return true
}
if container.Image != proxyImage && container.Image != fmt.Sprintf("%s-%s", proxyImage, variant) {
namespaceMismatchedPods[r.Metadata.FullName.Namespace.String()] = append(
namespaceMismatchedPods[r.Metadata.FullName.Namespace.String()], r.Metadata.FullName.Name.String())
}
}
return true
})
for ns, pods := range namespaceMismatchedPods {
c.Report(gvk.Namespace,
msg.NewPodsIstioProxyImageMismatchInNamespace(namespaceResources[ns], pods))
}
}
// GetIstioProxyImage retrieves the proxy image name defined in the sidecar injector
// configuration.
func GetIstioProxyImage(cm *v1.ConfigMap) string {
var m injectionConfigMap
if err := json.Unmarshal([]byte(cm.Data[util.InjectionConfigMapValue]), &m); err != nil {
return ""
}
// The injector template has a similar '{ contains "/" ... }' conditional
if strings.Contains(m.Global.Proxy.Image, "/") {
return m.Global.Proxy.Image
}
return fmt.Sprintf("%s/%s:%s", m.Global.Hub, m.Global.Proxy.Image, m.Global.Tag)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package injection
import (
"encoding/json"
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
"istio.io/api/annotation"
"istio.io/api/label"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/slices"
)
// Analyzer checks conditions related to Istio sidecar injection.
type Analyzer struct{}
var _ analysis.Analyzer = &Analyzer{}
// We assume that enablement is via an istio-injection=enabled or istio.io/rev namespace label
// In theory, there can be alternatives using Mutatingwebhookconfiguration, but they're very uncommon
// See https://istio.io/docs/ops/troubleshooting/injection/ for more info.
var (
RevisionInjectionLabelName = label.IoIstioRev.Name
)
// Metadata implements Analyzer
func (a *Analyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "injection.Analyzer",
Description: "Checks conditions related to Istio sidecar injection",
Inputs: []config.GroupVersionKind{
gvk.Namespace,
gvk.Pod,
gvk.ConfigMap,
},
}
}
// Analyze implements Analyzer
func (a *Analyzer) Analyze(c analysis.Context) {
enableNamespacesByDefault := false
injectedNamespaces := make(map[string]bool)
c.ForEach(gvk.Namespace, func(r *resource.Instance) bool {
if r.Metadata.FullName.String() == constants.IstioSystemNamespace {
return true
}
ns := r.Metadata.FullName.String()
injectionLabel, okInjectionLabel := r.Metadata.Labels[util.InjectionLabelName]
nsRevision, okNewInjectionLabel := r.Metadata.Labels[RevisionInjectionLabelName]
istioLabels := make([]string, 0)
if okInjectionLabel {
istioLabels = append(istioLabels, fmt.Sprintf("%s=%s", util.InjectionLabelName, injectionLabel))
}
for _, l := range []string{RevisionInjectionLabelName, constants.DataplaneMode} {
if _, ok := r.Metadata.Labels[l]; ok && (!okInjectionLabel || injectionLabel == "enabled") {
istioLabels = append(istioLabels, fmt.Sprintf("%s=%s", l, r.Metadata.Labels[l]))
}
}
if len(istioLabels) > 1 {
m := msg.NewNamespaceMultipleInjectionLabels(r, istioLabels)
c.Report(gvk.Namespace, m)
}
if r.Metadata.Labels[constants.DataplaneMode] == constants.DataplaneModeAmbient {
return true
}
// verify the enableNamespacesByDefault flag in injection configmaps
c.ForEach(gvk.ConfigMap, func(r *resource.Instance) bool {
injectionCMName := util.GetInjectorConfigMapName(nsRevision)
if r.Metadata.FullName.Name.String() == injectionCMName {
cm := r.Message.(*v1.ConfigMap)
enableNamespacesByDefault = GetEnableNamespacesByDefaultFromInjectedConfigMap(cm)
return false
}
return true
})
if injectionLabel == "" && !okNewInjectionLabel {
// if Istio is installed with sidecarInjectorWebhook.enableNamespacesByDefault=true
// (in the istio-sidecar-injector configmap), we need to reverse this logic and treat this as an injected namespace
if enableNamespacesByDefault {
m := msg.NewNamespaceInjectionEnabledByDefault(r)
c.Report(gvk.Namespace, m)
return true
}
m := msg.NewNamespaceNotInjected(r, ns, ns)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.MetadataName)); ok {
m.Line = line
}
c.Report(gvk.Namespace, m)
return true
}
if injectionLabel != util.InjectionLabelEnableValue {
// If legacy label has any value other than the enablement value, they are deliberately not injecting it, so ignore
return true
}
injectedNamespaces[ns] = true
return true
})
c.ForEach(gvk.Pod, func(r *resource.Instance) bool {
pod := r.Message.(*v1.PodSpec)
if !injectedNamespaces[r.Metadata.FullName.Namespace.String()] {
return true
}
// If a pod has injection explicitly disabled, no need to check further
inj := r.Metadata.Annotations[annotation.SidecarInject.Name]
if v, ok := r.Metadata.Labels[label.SidecarInject.Name]; ok {
inj = v
}
if strings.EqualFold(inj, "false") {
return true
}
if pod.HostNetwork {
return true
}
proxyImage := ""
for _, container := range append(slices.Clone(pod.Containers), pod.InitContainers...) {
if container.Name == util.IstioProxyName {
proxyImage = container.Image
break
}
}
if proxyImage == "" {
c.Report(gvk.Pod, msg.NewPodMissingProxy(r, r.Metadata.FullName.String()))
}
return true
})
}
// GetInjectedConfigMapValuesStruct retrieves value of sidecarInjectorWebhook.enableNamespacesByDefault
// defined in the sidecar injector configuration.
func GetEnableNamespacesByDefaultFromInjectedConfigMap(cm *v1.ConfigMap) bool {
var injectedCMValues map[string]any
if err := json.Unmarshal([]byte(cm.Data[util.InjectionConfigMapValue]), &injectedCMValues); err != nil {
return false
}
injectionEnable := injectedCMValues[util.InjectorWebhookConfigKey].(map[string]any)[util.InjectorWebhookConfigValue]
return injectionEnable.(bool)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8sgateway
import (
typev1beta1 "istio.io/api/type/v1beta1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/maps"
)
var _ analysis.Analyzer = &SelectorAnalyzer{}
type SelectorAnalyzer struct{}
var policyGVKs = []config.GroupVersionKind{
gvk.AuthorizationPolicy,
gvk.RequestAuthentication,
gvk.Telemetry,
gvk.WasmPlugin,
}
type policy interface {
GetSelector() *typev1beta1.WorkloadSelector
GetTargetRef() *typev1beta1.PolicyTargetReference
}
func (w *SelectorAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "k8sgateway.SelectorAnalyzer",
Description: "Check that selectors are effective for Kubernetes gateway",
Inputs: []config.GroupVersionKind{
gvk.AuthorizationPolicy,
gvk.RequestAuthentication,
gvk.Telemetry,
gvk.WasmPlugin,
gvk.Pod,
},
}
}
// Analyze implements analysis.Analyzer
func (w *SelectorAnalyzer) Analyze(context analysis.Context) {
pods := gatewayPodsLabelMap(context)
handleResource := func(r *resource.Instance, gvkType config.GroupVersionKind) {
spec, ok := r.Message.(policy)
if spec.GetTargetRef() != nil {
return
}
if !ok || spec.GetSelector() == nil {
return
}
selector := spec.GetSelector()
for _, pod := range pods[r.Metadata.FullName.Namespace.String()] {
if maps.Contains(pod, selector.MatchLabels) {
context.Report(gvkType, msg.NewIneffectiveSelector(r, pod[constants.GatewayNameLabel]))
}
}
}
for _, gvkType := range policyGVKs {
context.ForEach(gvkType, func(r *resource.Instance) bool {
handleResource(r, gvkType)
return true
})
}
}
// gatewayPodsLabelMap returns a map of pod namespaces to labels for all pods with a gateway label
func gatewayPodsLabelMap(context analysis.Context) map[string][]map[string]string {
pods := make(map[string][]map[string]string)
context.ForEach(gvk.Pod, func(resource *resource.Instance) bool {
if _, ok := resource.Metadata.Labels[constants.GatewayNameLabel]; !ok {
return true
}
ns := resource.Metadata.FullName.Namespace.String()
pods[ns] = append(pods[ns], resource.Metadata.Labels)
return true
})
return pods
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package maturity
import (
"strings"
corev1 "k8s.io/api/core/v1"
"istio.io/api/annotation"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// AlphaAnalyzer checks for alpha Istio annotations in K8s resources
type AlphaAnalyzer struct{}
// the alpha analyzer is currently explicitly left out of the default collection of analyzers to run, as it results
// in too much noise for users, with annotations that are set by default. Once the noise dies down, this should be
// added to the CombinedAnalyzers() function.
var istioAnnotations = annotation.AllResourceAnnotations()
// Metadata implements analyzer.Analyzer
func (*AlphaAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "annotations.AlphaAnalyzer",
Description: "Checks for alpha Istio annotations in Kubernetes resources",
Inputs: []config.GroupVersionKind{
gvk.Namespace,
gvk.Service,
gvk.Pod,
gvk.Deployment,
},
}
}
var conditionallyIgnoredAnnotations = map[string]bool{
annotation.SidecarInterceptionMode.Name: true,
annotation.SidecarTrafficIncludeInboundPorts.Name: true,
annotation.SidecarTrafficExcludeInboundPorts.Name: true,
annotation.SidecarTrafficIncludeOutboundIPRanges.Name: true,
}
var AlwaysIgnoredAnnotations = map[string]bool{
// this annotation is set by default in istiod, don't alert on it.
annotation.SidecarStatus.Name: true,
// this annotation is set by controller, don't alert on it.
annotation.GatewayControllerVersion.Name: true,
// this annotation is added automatically.
annotation.IoIstioRev.Name: true,
// TODO below are ambient related annotations that are not yet known to be stable.
// They are added automatically, and should not be alerted on.
// Delete these related annotations once they are stable.
// Ref: https://github.com/istio/api/pull/2695
constants.WaypointServiceAccount: true,
constants.AmbientRedirection: true,
}
// Analyze implements analysis.Analyzer
func (fa *AlphaAnalyzer) Analyze(ctx analysis.Context) {
ctx.ForEach(gvk.Namespace, func(r *resource.Instance) bool {
fa.allowAnnotations(r, ctx, gvk.Namespace)
return true
})
ctx.ForEach(gvk.Service, func(r *resource.Instance) bool {
fa.allowAnnotations(r, ctx, gvk.Service)
return true
})
ctx.ForEach(gvk.Pod, func(r *resource.Instance) bool {
fa.allowAnnotations(r, ctx, gvk.Pod)
return true
})
ctx.ForEach(gvk.Deployment, func(r *resource.Instance) bool {
fa.allowAnnotations(r, ctx, gvk.Deployment)
return true
})
}
func (*AlphaAnalyzer) allowAnnotations(r *resource.Instance, ctx analysis.Context, collectionType config.GroupVersionKind) {
if len(r.Metadata.Annotations) == 0 {
return
}
var shouldSkipDefault bool
if r.Metadata.Schema.GroupVersionKind() == gvk.Pod {
shouldSkipDefault = isCNIEnabled(r.Message.(*corev1.PodSpec))
}
// It is fine if the annotation is kubectl.kubernetes.io/last-applied-configuration.
for ann := range r.Metadata.Annotations {
if !istioAnnotation(ann) {
continue
}
if annotationDef := lookupAnnotation(ann); annotationDef != nil {
if annotationDef.FeatureStatus == annotation.Alpha {
if AlwaysIgnoredAnnotations[annotationDef.Name] {
continue
}
// some annotations are set by default in istiod, don't alert on it.
if shouldSkipDefault && conditionallyIgnoredAnnotations[annotationDef.Name] {
continue
}
m := msg.NewAlphaAnnotation(r, ann)
util.AddLineNumber(r, ann, m)
ctx.Report(collectionType, m)
}
}
}
}
func isCNIEnabled(pod *corev1.PodSpec) bool {
var hasIstioInitContainer bool
for _, c := range pod.InitContainers {
if c.Name == "istio-init" {
hasIstioInitContainer = true
break
}
}
return !hasIstioInitContainer
}
// istioAnnotation is true if the annotation is in Istio's namespace
func istioAnnotation(ann string) bool {
// We document this Kubernetes annotation, we should analyze it as well
if ann == "kubernetes.io/ingress.class" {
return true
}
parts := strings.Split(ann, "/")
if len(parts) == 0 {
return false
}
if !strings.HasSuffix(parts[0], "istio.io") {
return false
}
return true
}
func lookupAnnotation(ann string) *annotation.Instance {
for _, candidate := range istioAnnotations {
if candidate.Name == ann {
return candidate
}
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package multicluster
import (
"fmt"
v1 "k8s.io/api/core/v1"
"istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube/multicluster"
)
// MeshNetworksAnalyzer validates MeshNetworks configuration in multi-cluster.
type MeshNetworksAnalyzer struct{}
var _ analysis.Analyzer = &MeshNetworksAnalyzer{}
// Service Registries that are known to istio.
var serviceRegistries = []provider.ID{
provider.Mock,
provider.Kubernetes,
provider.External,
}
// Metadata implements Analyzer
func (s *MeshNetworksAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "meshnetworks.MeshNetworksAnalyzer",
Description: "Check the validity of MeshNetworks in the cluster",
Inputs: []config.GroupVersionKind{
gvk.MeshNetworks,
gvk.Secret,
},
}
}
// Analyze implements Analyzer
func (s *MeshNetworksAnalyzer) Analyze(c analysis.Context) {
c.ForEach(gvk.Secret, func(r *resource.Instance) bool {
if r.Metadata.Labels[multicluster.MultiClusterSecretLabel] == "true" {
s := r.Message.(*v1.Secret)
for c := range s.Data {
serviceRegistries = append(serviceRegistries, provider.ID(c))
}
}
return true
})
// only one meshnetworks config should exist.
c.ForEach(gvk.MeshNetworks, func(r *resource.Instance) bool {
mn := r.Message.(*v1alpha1.MeshNetworks)
for i, n := range mn.Networks {
for j, e := range n.Endpoints {
switch re := e.Ne.(type) {
case *v1alpha1.Network_NetworkEndpoints_FromRegistry:
found := false
for _, s := range serviceRegistries {
if provider.ID(re.FromRegistry) == s {
found = true
}
}
if !found {
m := msg.NewUnknownMeshNetworksServiceRegistry(r, re.FromRegistry, i)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.FromRegistry, i, j)); ok {
m.Line = line
}
c.Report(gvk.MeshNetworks, m)
}
}
}
}
return true
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"github.com/hashicorp/go-multierror"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/collections"
sresource "istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/config/validation"
)
// ValidationAnalyzer runs schema validation as an analyzer and reports any violations as messages
type ValidationAnalyzer struct {
s sresource.Schema
}
var _ analysis.Analyzer = &ValidationAnalyzer{}
func CollectionValidationAnalyzer(s sresource.Schema) analysis.Analyzer {
return &ValidationAnalyzer{s: s}
}
// AllValidationAnalyzers returns a slice with a validation analyzer for each Istio schema
// This automation comes with an assumption: that the collection names used by the schema match the metadata used by Galley components
func AllValidationAnalyzers() []analysis.Analyzer {
result := make([]analysis.Analyzer, 0)
collections.Istio.ForEach(func(s sresource.Schema) (done bool) {
result = append(result, &ValidationAnalyzer{s: s})
return
})
return result
}
// Metadata implements Analyzer
func (a *ValidationAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "schema.ValidationAnalyzer." + a.s.Kind(),
Description: "Runs schema validation as an analyzer on '" + a.s.Kind() + "' resources",
Inputs: []config.GroupVersionKind{a.s.GroupVersionKind()},
}
}
// Analyze implements Analyzer
func (a *ValidationAnalyzer) Analyze(ctx analysis.Context) {
gv := a.s.GroupVersionKind()
ctx.ForEach(gv, func(r *resource.Instance) bool {
ns := r.Metadata.FullName.Namespace
name := r.Metadata.FullName.Name
warnings, err := a.s.ValidateConfig(config.Config{
Meta: config.Meta{
Name: string(name),
Namespace: string(ns),
},
Spec: r.Message,
})
if err != nil {
if multiErr, ok := err.(*multierror.Error); ok {
for _, err := range multiErr.WrappedErrors() {
ctx.Report(gv, morePreciseMessage(r, err, true))
}
} else {
ctx.Report(gv, morePreciseMessage(r, err, true))
}
}
if warnings != nil {
if multiErr, ok := warnings.(*multierror.Error); ok {
for _, err := range multiErr.WrappedErrors() {
ctx.Report(gv, morePreciseMessage(r, err, false))
}
} else {
ctx.Report(gv, morePreciseMessage(r, warnings, false))
}
}
return true
})
}
func morePreciseMessage(r *resource.Instance, err error, isError bool) diag.Message {
if aae, ok := err.(*validation.AnalysisAwareError); ok {
switch aae.Type {
case "VirtualServiceUnreachableRule":
return msg.NewVirtualServiceUnreachableRule(r, aae.Parameters[0].(string), aae.Parameters[1].(string))
case "VirtualServiceIneffectiveMatch":
return msg.NewVirtualServiceIneffectiveMatch(r, aae.Parameters[0].(string), aae.Parameters[1].(string), aae.Parameters[2].(string))
}
}
if !isError {
return msg.NewSchemaWarning(r, err)
}
return msg.NewSchemaValidationError(r, err)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package service
import (
"fmt"
v1 "k8s.io/api/core/v1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/constants"
configKube "istio.io/istio/pkg/config/kube"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// PortNameAnalyzer checks the port name of the service
type PortNameAnalyzer struct{}
var _ analysis.Analyzer = &PortNameAnalyzer{}
// Metadata implements Analyzer
func (s *PortNameAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "service.PortNameAnalyzer",
Description: "Checks the port names associated with each service",
Inputs: []config.GroupVersionKind{
gvk.Service,
},
}
}
// Analyze implements Analyzer
func (s *PortNameAnalyzer) Analyze(c analysis.Context) {
c.ForEach(gvk.Service, func(r *resource.Instance) bool {
// Skip port name check for istio control plane
if util.IsIstioControlPlane(r) {
return true
}
s.analyzeService(r, c)
return true
})
}
func (s *PortNameAnalyzer) analyzeService(r *resource.Instance, c analysis.Context) {
svc := r.Message.(*v1.ServiceSpec)
// Skip gateway managed services, which are not created by users
if v, ok := r.Metadata.Labels[constants.ManagedGatewayLabel]; ok &&
v == constants.ManagedGatewayControllerLabel ||
v == constants.ManagedGatewayMeshControllerLabel {
return
}
for i, port := range svc.Ports {
instance := configKube.ConvertProtocol(port.Port, port.Name, port.Protocol, port.AppProtocol)
if instance.IsUnsupported() || port.Name == "tcp" && svc.Type == "ExternalName" {
m := msg.NewPortNameIsNotUnderNamingConvention(
r, port.Name, int(port.Port), port.TargetPort.String())
if svc.Type == "ExternalName" {
m = msg.NewExternalNameServiceTypeInvalidPortName(r)
}
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.PortInPorts, i)); ok {
m.Line = line
}
c.Report(gvk.Service, m)
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package serviceentry
import (
"fmt"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
type ProtocolAddressesAnalyzer struct{}
var _ analysis.Analyzer = &ProtocolAddressesAnalyzer{}
func (serviceEntry *ProtocolAddressesAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "serviceentry.Analyzer",
Description: "Checks the validity of ServiceEntry",
Inputs: []config.GroupVersionKind{
gvk.ServiceEntry,
gvk.MeshConfig,
},
}
}
func (serviceEntry *ProtocolAddressesAnalyzer) Analyze(context analysis.Context) {
autoAllocated := false
context.ForEach(gvk.MeshConfig, func(r *resource.Instance) bool {
mc := r.Message.(*meshconfig.MeshConfig)
if v, ok := mc.DefaultConfig.ProxyMetadata["ISTIO_META_DNS_CAPTURE"]; !ok || v != "true" {
return true
}
if v, ok := mc.DefaultConfig.ProxyMetadata["ISTIO_META_DNS_AUTO_ALLOCATE"]; ok && v == "true" {
autoAllocated = true
}
return true
})
context.ForEach(gvk.ServiceEntry, func(resource *resource.Instance) bool {
serviceEntry.analyzeProtocolAddresses(resource, context, autoAllocated)
return true
})
}
func (serviceEntry *ProtocolAddressesAnalyzer) analyzeProtocolAddresses(r *resource.Instance, ctx analysis.Context, metaDNSAutoAllocated bool) {
se := r.Message.(*v1alpha3.ServiceEntry)
if se.Addresses == nil && !metaDNSAutoAllocated {
for index, port := range se.Ports {
if port.Protocol == "" || port.Protocol == "TCP" {
message := msg.NewServiceEntryAddressesRequired(r)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.ServiceEntryPort, index)); ok {
message.Line = line
}
ctx.Report(gvk.ServiceEntry, message)
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sidecar
import (
"fmt"
"k8s.io/apimachinery/pkg/labels"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// SelectorAnalyzer validates, per namespace, that:
// * sidecar resources that define a workload selector match at least one pod
// * there aren't multiple sidecar resources that select overlapping pods
type SelectorAnalyzer struct{}
var _ analysis.Analyzer = &SelectorAnalyzer{}
// Metadata implements Analyzer
func (a *SelectorAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "sidecar.SelectorAnalyzer",
Description: "Validates that sidecars that define a workload selector " +
"match at least one pod, and that there aren't multiple sidecar resources that select overlapping pods",
Inputs: []config.GroupVersionKind{
gvk.Sidecar,
gvk.Pod,
gvk.Namespace,
},
}
}
// Analyze implements Analyzer
func (a *SelectorAnalyzer) Analyze(c analysis.Context) {
podsToSidecars := make(map[resource.FullName][]*resource.Instance)
pods := make(map[resource.FullName]*resource.Instance)
namespacesToSidecars := make(map[resource.Namespace][]*resource.Instance)
namespaces := make(map[string]*resource.Instance)
c.ForEach(gvk.Sidecar, func(rs *resource.Instance) bool {
s := rs.Message.(*v1alpha3.Sidecar)
// record namespace-scoped sidecars
if s.WorkloadSelector == nil || len(s.WorkloadSelector.Labels) == 0 {
namespacesToSidecars[rs.Metadata.FullName.Namespace] = append(namespacesToSidecars[rs.Metadata.FullName.Namespace], rs)
return true
}
sNs := rs.Metadata.FullName.Namespace
sel := labels.SelectorFromSet(s.WorkloadSelector.Labels)
foundPod := false
c.ForEach(gvk.Pod, func(rp *resource.Instance) bool {
pNs := rp.Metadata.FullName.Namespace
podLabels := labels.Set(rp.Metadata.Labels)
// Only attempt to match in the same namespace
if pNs != sNs {
return true
}
if sel.Matches(podLabels) {
foundPod = true
podsToSidecars[rp.Metadata.FullName] = append(podsToSidecars[rp.Metadata.FullName], rs)
pods[rp.Metadata.FullName] = rp
}
return true
})
if !foundPod {
m := msg.NewReferencedResourceNotFound(rs, "selector", sel.String())
label := util.ExtractLabelFromSelectorString(sel.String())
if line, ok := util.ErrorLine(rs, fmt.Sprintf(util.WorkloadSelector, label)); ok {
m.Line = line
}
c.Report(gvk.Sidecar, m)
}
return true
})
c.ForEach(gvk.Namespace, func(r *resource.Instance) bool {
namespaces[r.Metadata.FullName.Name.String()] = r
return true
})
reportedResources := make(map[string]bool)
for p, sList := range podsToSidecars {
podResource := pods[p]
if len(sList) == 1 && !util.PodInAmbientMode(podResource) {
continue
}
sNames := getNames(sList)
for _, rs := range sList {
// We don't want to report errors for pods in ambient mode, since there is no sidecar,
// but we do want to warn that the policy is ineffective.
if util.PodInAmbientMode(podResource) {
if !reportedResources[rs.Metadata.FullName.String()] {
c.Report(gvk.Sidecar, msg.NewIneffectivePolicy(rs,
"selected workload is in ambient mode, the policy has no impact"))
reportedResources[rs.Metadata.FullName.String()] = true
}
continue
}
m := msg.NewConflictingSidecarWorkloadSelectors(rs, sNames,
p.Namespace.String(), p.Name.String())
if line, ok := util.ErrorLine(rs, fmt.Sprintf(util.MetadataName)); ok {
m.Line = line
}
c.Report(gvk.Sidecar, m)
}
}
for ns, sList := range namespacesToSidecars {
nsResource := namespaces[ns.String()]
// We don't want to report errors for namespaces in ambient mode, since there is no sidecar,
// but we do want to warn that the policy is ineffective.
// TODO: do we need to check mixed mode?
if util.NamespaceInAmbientMode(nsResource) {
for _, rs := range sList {
if !reportedResources[rs.Metadata.FullName.String()] {
c.Report(gvk.Sidecar, msg.NewIneffectivePolicy(rs,
"namespace is in ambient mode, the policy has no impact"))
reportedResources[rs.Metadata.FullName.String()] = true
}
}
continue
}
if len(sList) > 1 {
sNames := getNames(sList)
for _, r := range sList {
c.Report(gvk.Sidecar, msg.NewMultipleSidecarsWithoutWorkloadSelectors(r, sNames, string(ns)))
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sidecar
import "istio.io/istio/pkg/config/resource"
func getNames(entries []*resource.Instance) []string {
names := make([]string, 0, len(entries))
for _, rs := range entries {
names = append(names, string(rs.Metadata.FullName.Name))
}
return names
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"istio.io/api/telemetry/v1alpha1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// DefaultSelectorAnalyzer validates, per namespace, that there aren't multiple
// telemetry resources that have no selector. This is distinct from
// SelectorAnalyzer because it does not require pods, so it can run even if that
// collection is unavailable.
type DefaultSelectorAnalyzer struct{}
var _ analysis.Analyzer = &DefaultSelectorAnalyzer{}
// Metadata implements Analyzer
func (a *DefaultSelectorAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "telemetry.DefaultSelectorAnalyzer",
Description: "Validates that there aren't multiple telemetry resources that have no selector",
Inputs: []config.GroupVersionKind{
gvk.Telemetry,
},
}
}
// Analyze implements Analyzer
func (a *DefaultSelectorAnalyzer) Analyze(c analysis.Context) {
nsToTelemetries := make(map[resource.Namespace][]*resource.Instance)
c.ForEach(gvk.Telemetry, func(r *resource.Instance) bool {
s := r.Message.(*v1alpha1.Telemetry)
ns := r.Metadata.FullName.Namespace
if s.Selector == nil {
nsToTelemetries[ns] = append(nsToTelemetries[ns], r)
}
return true
})
// Check for more than one selector-less telemetry instance, per namespace
for ns, sList := range nsToTelemetries {
if len(sList) > 1 {
sNames := getNames(sList)
for _, r := range sList {
c.Report(gvk.Telemetry, msg.NewMultipleTelemetriesWithoutWorkloadSelectors(r, sNames, string(ns)))
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"fmt"
"istio.io/api/mesh/v1alpha1"
telemetryapi "istio.io/api/telemetry/v1alpha1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/util/sets"
)
type LightstepAnalyzer struct{}
var _ analysis.Analyzer = &LightstepAnalyzer{}
// Metadata implements Analyzer
func (a *LightstepAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "telemetry.LightstepAnalyzer",
Description: "Validates that lightstep provider is still used",
Inputs: []config.GroupVersionKind{
gvk.Telemetry,
gvk.MeshConfig,
},
}
}
// Analyze implements Analyzer
func (a *LightstepAnalyzer) Analyze(c analysis.Context) {
meshConfig := fetchMeshConfig(c)
providerNames := sets.New[string]()
for _, prov := range meshConfig.ExtensionProviders {
switch prov.Provider.(type) {
case *v1alpha1.MeshConfig_ExtensionProvider_Lightstep:
providerNames.Insert(prov.Name)
}
}
if len(providerNames) == 0 {
return
}
c.ForEach(gvk.Telemetry, func(r *resource.Instance) bool {
telemetry := r.Message.(*telemetryapi.Telemetry)
for _, tracing := range telemetry.Tracing {
for _, p := range tracing.Providers {
if providerNames.Contains(p.Name) {
c.Report(gvk.Telemetry,
msg.NewDeprecated(r, fmt.Sprintf("The Lightstep provider %s is deprecated, please migrate to OpenTelemetry provider.", p.Name)))
}
}
}
return true
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
telemetryapi "istio.io/api/telemetry/v1alpha1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
type ProdiverAnalyzer struct{}
var _ analysis.Analyzer = &ProdiverAnalyzer{}
// Metadata implements Analyzer
func (a *ProdiverAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "telemetry.ProviderAnalyzer",
Description: "Validates that providers in telemetry resource is valid",
Inputs: []config.GroupVersionKind{
gvk.Telemetry,
gvk.MeshConfig,
},
}
}
// Analyze implements Analyzer
func (a *ProdiverAnalyzer) Analyze(c analysis.Context) {
meshConfig := fetchMeshConfig(c)
if meshConfig.DefaultProviders == nil ||
len(meshConfig.DefaultProviders.AccessLogging) == 0 {
c.ForEach(gvk.Telemetry, func(r *resource.Instance) bool {
telemetry := r.Message.(*telemetryapi.Telemetry)
for _, l := range telemetry.AccessLogging {
if l.Disabled != nil && l.Disabled.Value {
continue
}
if len(l.Providers) == 0 {
c.Report(gvk.Telemetry,
msg.NewInvalidTelemetryProvider(r, string(r.Metadata.FullName.Name), string(r.Metadata.FullName.Namespace)))
}
}
return true
})
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"fmt"
"k8s.io/apimachinery/pkg/labels"
"istio.io/api/telemetry/v1alpha1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// SelectorAnalyzer validates, per namespace, that:
// * telemetry resources that define a workload selector match at least one pod
// * there aren't multiple telemetry resources that select overlapping pods
type SelectorAnalyzer struct{}
var _ analysis.Analyzer = &SelectorAnalyzer{}
// Metadata implements Analyzer
func (a *SelectorAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "telemetry.SelectorAnalyzer",
Description: "Validates that telemetries that define a selector " +
"match at least one pod, and that there aren't multiple telemetry resources that select overlapping pods",
Inputs: []config.GroupVersionKind{
gvk.Telemetry,
gvk.Pod,
},
}
}
// Analyze implements Analyzer
func (a *SelectorAnalyzer) Analyze(c analysis.Context) {
podsToTelemetries := make(map[resource.FullName][]*resource.Instance)
// This is using an unindexed approach for matching selectors.
// Using an index for selectoes is problematic because selector != label
// We can match a label to a selector, but we can't generate a selector from a label.
c.ForEach(gvk.Telemetry, func(rs *resource.Instance) bool {
s := rs.Message.(*v1alpha1.Telemetry)
// For this analysis, ignore Telemetries with no selectors specified at all.
if s.Selector == nil || len(s.GetSelector().MatchLabels) == 0 {
return true
}
sNs := rs.Metadata.FullName.Namespace
sel := labels.SelectorFromSet(s.GetSelector().MatchLabels)
foundPod := false
c.ForEach(gvk.Pod, func(rp *resource.Instance) bool {
pNs := rp.Metadata.FullName.Namespace
podLabels := labels.Set(rp.Metadata.Labels)
// Only attempt to match in the same namespace
if pNs != sNs {
return true
}
if sel.Matches(podLabels) {
foundPod = true
podsToTelemetries[rp.Metadata.FullName] = append(podsToTelemetries[rp.Metadata.FullName], rs)
}
return true
})
if !foundPod {
m := msg.NewReferencedResourceNotFound(rs, "selector", sel.String())
label := util.ExtractLabelFromSelectorString(sel.String())
if line, ok := util.ErrorLine(rs, fmt.Sprintf(util.TelemetrySelector, label)); ok {
m.Line = line
}
c.Report(gvk.Telemetry, m)
}
return true
})
for p, sList := range podsToTelemetries {
if len(sList) == 1 {
continue
}
sNames := getNames(sList)
for _, rs := range sList {
m := msg.NewConflictingTelemetryWorkloadSelectors(rs, sNames,
p.Namespace.String(), p.Name.String())
if line, ok := util.ErrorLine(rs, fmt.Sprintf(util.MetadataName)); ok {
m.Line = line
}
c.Report(gvk.Telemetry, m)
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"istio.io/api/mesh/v1alpha1"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
func getNames(entries []*resource.Instance) []string {
names := make([]string, 0, len(entries))
for _, rs := range entries {
names = append(names, string(rs.Metadata.FullName.Name))
}
return names
}
func fetchMeshConfig(c analysis.Context) *v1alpha1.MeshConfig {
var meshConfig *v1alpha1.MeshConfig
c.ForEach(gvk.MeshConfig, func(r *resource.Instance) bool {
meshConfig = r.Message.(*v1alpha1.MeshConfig)
return r.Metadata.FullName.Name != util.MeshConfigName
})
return meshConfig
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"istio.io/istio/pkg/config/resource"
)
// IsIstioControlPlane returns true for resources that are part of the Istio control plane
func IsIstioControlPlane(r *resource.Instance) bool {
if _, ok := r.Metadata.Labels["istio"]; ok {
return true
}
if r.Metadata.Labels["release"] == "istio" {
return true
}
return false
}
func GetInjectorConfigMapName(revision string) string {
name := InjectionConfigMap
if revision == "" || revision == "default" {
return name
}
return name + "-" + revision
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
// IsExportToAllNamespaces returns true if export to applies to all namespaces
// and false if it is set to namespace local.
func IsExportToAllNamespaces(exportTos []string) bool {
exportedToAll := false
for _, e := range exportTos {
if e == ExportToAllNamespaces {
// give preference to "*" and stop iterating
exportedToAll = true
break
}
}
if len(exportTos) == 0 {
exportedToAll = true
}
return exportedToAll
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"strings"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/resource"
)
const (
// Path templates for different fields with different paths, may edited by future developers if not covered in this list
// Use the path template to find the exact line number for the field
// Path for host in VirtualService.
// Required parameters: route rule, route rule index, route index.
DestinationHost = "{.spec.%s[%d].route[%d].destination.host}"
// Path for mirror host in VirtualService.
// Required parameters: http index.
MirrorHost = "{.spec.http[%d].mirror.host}"
// Path for mirrors host in VirtualService.
// Required parameters: http index, mirror index.
MirrorsHost = "{.spec.http[%d].mirrors[%d].host}"
// Path for VirtualService gateway.
// Required parameters: gateway index.
VSGateway = "{.spec.gateways[%d]}"
// Path for regex match of uri, scheme, method and authority.
// Required parameters: http index, match index, where to match.
URISchemeMethodAuthorityRegexMatch = "{.spec.http[%d].match[%d].%s.regex}"
// Path for regex match of headers and queryParams.
// Required parameters: http index, match index, where to match, match key.
HeaderAndQueryParamsRegexMatch = "{.spec.http[%d].match[%d].%s.%s.regex}"
// Path for regex match of allowOrigins.
// Required parameters: http index, allowOrigins index.
AllowOriginsRegexMatch = "{.spec.http[%d].corsPolicy.allowOrigins[%d].regex}"
// Path for workload selector.
// Required parameters: selector label.
WorkloadSelector = "{.spec.workloadSelector.labels.%s}"
// Path for port from ports collections.
// Required parameters: port index.
PortInPorts = "{.spec.ports[%d].port}"
// Path for fromRegistry in the mesh networks.
// Required parameters: network name, endPoint index.
FromRegistry = "{.networks.%s.endpoints[%d]}"
// Path for the image in the container.
// Required parameters: container index.
ImageInContainer = "{.spec.containers[%d].image}"
// Path for namespace in metadata.
// Required parameters: none.
MetadataNamespace = "{.metadata.namespace}"
// Path for name in metadata.
// Required parameters: none.
MetadataName = "{.metadata.name}"
// Path for namespace in authorizationPolicy.
// Required parameters: rule index, from index, namespace index.
AuthorizationPolicyNameSpace = "{.spec.rules[%d].from[%d].source.namespaces[%d]}"
// Path for annotation.
// Required parameters: annotation name.
Annotation = "{.metadata.annotations.%s}"
// Path for selector in Gateway.
// Required parameters: selector label.
GatewaySelector = "{.spec.selector.%s}"
// Path for credentialName.
// Required parameters: server index.
CredentialName = "{.spec.servers[%d].tls.credentialName}"
// Path for Port in ServiceEntry.
// Required parameters: port index.
ServiceEntryPort = "{.spec.ports[%d].name}"
// Path for DestinationRule tls certificate.
// Required parameters: none.
DestinationRuleTLSCert = "{.spec.trafficPolicy.tls.caCertificates}"
// Path for DestinationRule port-level tls certificate.
// Required parameters: portLevelSettings index.
DestinationRuleTLSPortLevelCert = "{.spec.trafficPolicy.portLevelSettings[%d].tls.caCertificates}"
// Path for ConfigPatch in envoyFilter
// Required parameters: envoyFilter config patch index
EnvoyFilterConfigPath = "{.spec.configPatches[%d].patch.value}"
// Path for selector in telemetry.
// Required parameters: selector label.
TelemetrySelector = "{.spec.selector.matchLabels.%s}"
)
// ErrorLine returns the line number of the input path key in the resource
func ErrorLine(r *resource.Instance, path string) (line int, found bool) {
fieldMap := r.Origin.FieldMap()
line, ok := fieldMap[path]
if !ok {
return 0, false
}
return line, true
}
// ExtractLabelFromSelectorString returns the label of the match in the k8s labels.Selector
func ExtractLabelFromSelectorString(s string) string {
equalIndex := strings.Index(s, "=")
if equalIndex < 0 {
return ""
}
return s[:equalIndex]
}
func AddLineNumber(r *resource.Instance, ann string, m diag.Message) bool {
if line, ok := ErrorLine(r, fmt.Sprintf(Annotation, ann)); ok {
m.Line = line
return true
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"strings"
"istio.io/istio/pkg/config/resource"
)
type ScopedFqdn string
// GetScopeAndFqdn splits ScopedFqdn back to scope namespace and fqdn parts
func (s ScopedFqdn) GetScopeAndFqdn() (string, string) {
parts := strings.SplitN(string(s), "/", 2)
return parts[0], parts[1]
}
// InScopeOf returns true if ns is in the scope of ScopedFqdn
func (s ScopedFqdn) InScopeOf(ns string) bool {
scope, fqdn := s.GetScopeAndFqdn()
fn := GetFullNameFromFQDN(fqdn)
return scope == "*" || scope == "." && ns == fn.Namespace.String() || scope == ns
}
// NewScopedFqdn converts the passed host to FQDN if needed and applies the passed scope.
func NewScopedFqdn(scope string, namespace resource.Namespace, host string) ScopedFqdn {
fqdn := ConvertHostToFQDN(namespace, host)
return ScopedFqdn(scope + "/" + fqdn)
}
// GetResourceNameFromHost figures out the resource.FullName to look up from the provided host string
// We need to handle two possible formats: short name and FQDN
// https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/#Destination
func GetResourceNameFromHost(defaultNamespace resource.Namespace, host string) resource.FullName {
// First, try to parse as FQDN (which can be cross-namespace)
name := GetFullNameFromFQDN(host)
// Otherwise, treat this as a short name and use the assumed namespace
if name.Namespace == "" {
name.Namespace = defaultNamespace
name.Name = resource.LocalName(host)
}
return name
}
// GetFullNameFromFQDN tries to parse namespace and name from a fqdn.
// Empty strings are returned if either namespace or name cannot be parsed.
func GetFullNameFromFQDN(fqdn string) resource.FullName {
result := fqdnPattern.FindAllStringSubmatch(fqdn, -1)
if len(result) == 0 {
return resource.FullName{
Namespace: "",
Name: "",
}
}
return resource.FullName{
Namespace: resource.Namespace(result[0][2]),
Name: resource.LocalName(result[0][1]),
}
}
// ConvertHostToFQDN returns the given host as a FQDN, if it isn't already.
func ConvertHostToFQDN(namespace resource.Namespace, host string) string {
fqdn := host
// Convert to FQDN only if host is not a wildcard or a FQDN
if !strings.HasPrefix(host, "*") &&
!strings.Contains(host, ".") {
fqdn = host + "." + string(namespace) + "." + DefaultClusterLocalDomain
}
return fqdn
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"istio.io/api/label"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/slices"
)
// DeploymentInMesh returns true if deployment is in the service mesh (has sidecar)
func DeploymentInMesh(r *resource.Instance, c analysis.Context) bool {
d := r.Message.(*appsv1.DeploymentSpec)
return inMesh(d.Template.Annotations, d.Template.Labels,
resource.Namespace(r.Metadata.FullName.Namespace.String()), d.Template.Spec.Containers, c)
}
// PodInMesh returns true if a Pod is in the service mesh (has sidecar)
func PodInMesh(r *resource.Instance, c analysis.Context) bool {
p := r.Message.(*v1.PodSpec)
return inMesh(r.Metadata.Annotations, r.Metadata.Labels,
r.Metadata.FullName.Namespace, append(slices.Clone(p.Containers), p.InitContainers...), c)
}
// PodInAmbientMode returns true if a Pod is in the service mesh with the ambient mode
func PodInAmbientMode(r *resource.Instance) bool {
if r == nil {
return false
}
return r.Metadata.Annotations[constants.AmbientRedirection] == constants.AmbientRedirectionEnabled
}
// NamespaceInAmbientMode returns true if a Namespace is configured as a ambient namespace.
func NamespaceInAmbientMode(r *resource.Instance) bool {
if r == nil {
return false
}
// If there is a sidecar injection label, then we assume the namespace is not in ambient mode
if r.Metadata.Labels[InjectionLabelName] == InjectionLabelEnableValue {
return false
}
if v, ok := r.Metadata.Labels[label.IoIstioRev.Name]; ok && v != "" {
return false
}
return r.Metadata.Labels[constants.DataplaneMode] == constants.DataplaneModeAmbient
}
func inMesh(annos, labels map[string]string, namespace resource.Namespace, containers []v1.Container, c analysis.Context) bool {
// If pod has the sidecar container set, then, the pod is in the mesh
if hasIstioProxy(containers) {
return true
}
// If Pod has labels, return the injection label value
if piv, ok := getPodSidecarInjectionStatus(labels); ok {
return piv
}
// If Pod has annotation, return the injection annotation value
if piv, ok := getPodSidecarInjectionStatus(annos); ok {
return piv
}
// In case the annotation is not present but there is a auto-injection label on the namespace,
// return the auto-injection label status
if niv, nivok := getNamesSidecarInjectionStatus(namespace, c); nivok {
return niv
}
return false
}
// getPodSidecarInjectionStatus returns two booleans: enabled and ok.
// enabled is true when deployment d PodSpec has either the label/annotation 'sidecar.istio.io/inject: "true"'
// ok is true when the PodSpec doesn't have the 'sidecar.istio.io/inject' label/annotation present.
func getPodSidecarInjectionStatus(metadata map[string]string) (enabled bool, ok bool) {
v, ok := metadata[label.SidecarInject.Name]
return v == "true", ok
}
// autoInjectionEnabled returns two booleans: enabled and ok.
// enabled is true when namespace ns has 'istio-injection' label set to 'enabled'
// ok is true when the namespace doesn't have the label 'istio-injection'
func getNamesSidecarInjectionStatus(ns resource.Namespace, c analysis.Context) (enabled bool, ok bool) {
enabled, ok = false, false
namespace := c.Find(gvk.Namespace, resource.NewFullName("", resource.LocalName(ns)))
if namespace != nil {
enabled, ok = namespace.Metadata.Labels[InjectionLabelName] == InjectionLabelEnableValue, true
}
return enabled, ok
}
func hasIstioProxy(containers []v1.Container) bool {
proxyImage := ""
for _, container := range containers {
if container.Name == IstioProxyName {
proxyImage = container.Image
break
}
}
return proxyImage != ""
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"strings"
"istio.io/api/annotation"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/api/networking/v1beta1"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/util/protomarshal"
)
type EffectiveProxyConfigResolver struct {
meshConfig *meshconfig.MeshConfig
rootNamespace string
root *v1beta1.ProxyConfig
namespace map[string]*v1beta1.ProxyConfig
workload map[string]*v1beta1.ProxyConfig
}
// ImageType returns the effective image type for the given pod.
func (e *EffectiveProxyConfigResolver) ImageType(pod *resource.Instance) string {
variant := ""
if e.meshConfig.GetDefaultConfig().GetImage().GetImageType() != "" {
variant = e.meshConfig.GetDefaultConfig().GetImage().GetImageType()
}
if e.root.GetImage().GetImageType() != "" {
variant = e.root.GetImage().GetImageType()
}
if v, ok := e.namespace[pod.Metadata.FullName.Namespace.String()]; ok {
if v.GetImage().GetImageType() != "" {
variant = v.GetImage().GetImageType()
}
}
// check if there are workload level resources that match the pod
for k, v := range e.workload {
if !strings.HasPrefix(k, pod.Metadata.FullName.Namespace.String()) {
continue
}
if maps.Contains(v.GetSelector().GetMatchLabels(), pod.Metadata.Labels) {
if v.GetImage().GetImageType() != "" {
variant = v.GetImage().GetImageType()
}
}
}
if v, ok := pod.Metadata.Annotations[annotation.ProxyConfig.Name]; ok {
pc := &meshconfig.ProxyConfig{}
if err := protomarshal.ApplyYAML(v, pc); err == nil {
if pc.GetImage().GetImageType() != "" {
variant = pc.GetImage().GetImageType()
}
}
}
if variant == "default" {
variant = ""
}
return variant
}
func NewEffectiveProxyConfigResolver(c analysis.Context) *EffectiveProxyConfigResolver {
mc := &meshconfig.MeshConfig{}
rootNamespace := ""
c.ForEach(gvk.MeshConfig, func(r *resource.Instance) bool {
meshConfig := r.Message.(*meshconfig.MeshConfig)
rootNamespace = meshConfig.GetRootNamespace()
if rootNamespace == "" {
rootNamespace = "istio-system"
}
mc = meshConfig
return true
})
resolver := &EffectiveProxyConfigResolver{
meshConfig: mc,
rootNamespace: rootNamespace,
namespace: make(map[string]*v1beta1.ProxyConfig),
workload: make(map[string]*v1beta1.ProxyConfig),
}
c.ForEach(gvk.ProxyConfig, func(r *resource.Instance) bool {
proxyConfig := r.Message.(*v1beta1.ProxyConfig)
if r.Metadata.FullName.Namespace.String() == resolver.rootNamespace {
resolver.root = proxyConfig
return true
}
if proxyConfig.GetSelector() == nil {
resolver.namespace[r.Metadata.FullName.Namespace.String()] = proxyConfig
} else {
resolver.workload[r.Metadata.FullName.String()] = proxyConfig
}
return true
})
return resolver
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"strings"
corev1 "k8s.io/api/core/v1"
"istio.io/api/annotation"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
func InitServiceEntryHostMap(ctx analysis.Context) map[ScopedFqdn]*v1alpha3.ServiceEntry {
result := make(map[ScopedFqdn]*v1alpha3.ServiceEntry)
ctx.ForEach(gvk.ServiceEntry, func(r *resource.Instance) bool {
s := r.Message.(*v1alpha3.ServiceEntry)
hostsNamespaceScope := string(r.Metadata.FullName.Namespace)
exportsToAll := false
for _, h := range s.GetHosts() {
// ExportToAll scenario
if len(s.ExportTo) == 0 || exportsToAll {
result[NewScopedFqdn(ExportToAllNamespaces, r.Metadata.FullName.Namespace, h)] = s
continue // If exports to all, we can skip adding to each namespace
}
for _, ns := range s.ExportTo {
switch ns {
case ExportToAllNamespaces:
result[NewScopedFqdn(ExportToAllNamespaces, r.Metadata.FullName.Namespace, h)] = s
exportsToAll = true
case ExportToNamespaceLocal:
result[NewScopedFqdn(hostsNamespaceScope, r.Metadata.FullName.Namespace, h)] = s
default:
result[NewScopedFqdn(ns, r.Metadata.FullName.Namespace, h)] = s
}
// If exports to all, we don't need to check other namespaces
if exportsToAll {
break
}
}
}
return true
})
// converts k8s service to serviceEntry since destinationHost
// validation is performed against serviceEntry
ctx.ForEach(gvk.Service, func(r *resource.Instance) bool {
s := r.Message.(*corev1.ServiceSpec)
var se *v1alpha3.ServiceEntry
var ports []*v1alpha3.ServicePort
for _, p := range s.Ports {
ports = append(ports, &v1alpha3.ServicePort{
Number: uint32(p.Port),
Name: p.Name,
Protocol: string(p.Protocol),
})
}
host := ConvertHostToFQDN(r.Metadata.FullName.Namespace, r.Metadata.FullName.Name.String())
se = &v1alpha3.ServiceEntry{
Hosts: []string{host},
Ports: ports,
}
visibleNamespaces := getVisibleNamespacesFromExportToAnno(
r.Metadata.Annotations[annotation.NetworkingExportTo.Name], r.Metadata.FullName.Namespace.String())
for _, scope := range visibleNamespaces {
result[NewScopedFqdn(scope, r.Metadata.FullName.Namespace, r.Metadata.FullName.Name.String())] = se
}
return true
})
return result
}
func getVisibleNamespacesFromExportToAnno(anno, resourceNamespace string) []string {
scopes := make([]string, 0)
if anno == "" {
scopes = append(scopes, ExportToAllNamespaces)
} else {
for _, ns := range strings.Split(anno, ",") {
if ns == ExportToNamespaceLocal {
scopes = append(scopes, resourceNamespace)
} else {
scopes = append(scopes, ns)
}
}
}
return scopes
}
func GetDestinationHost(sourceNs resource.Namespace, exportTo []string, host string,
serviceEntryHosts map[ScopedFqdn]*v1alpha3.ServiceEntry,
) *v1alpha3.ServiceEntry {
// Check explicitly defined ServiceEntries as well as services discovered from the platform
// Check ServiceEntries which are exposed to all namespaces
allNsScopedFqdn := NewScopedFqdn(ExportToAllNamespaces, sourceNs, host)
if s, ok := serviceEntryHosts[allNsScopedFqdn]; ok {
return s
}
// ServiceEntries can be either namespace scoped or exposed to different/all namespaces
if len(exportTo) == 0 {
nsScopedFqdn := NewScopedFqdn(string(sourceNs), sourceNs, host)
if s, ok := serviceEntryHosts[nsScopedFqdn]; ok {
return s
}
} else {
for _, e := range exportTo {
if e == ExportToNamespaceLocal {
e = sourceNs.String()
}
nsScopedFqdn := NewScopedFqdn(e, sourceNs, host)
if s, ok := serviceEntryHosts[nsScopedFqdn]; ok {
return s
}
}
}
// Now check wildcard matches, namespace scoped or all namespaces
// (This more expensive checking left for last)
// Assumes the wildcard entries are correctly formatted ("*<dns suffix>")
for seHostScopedFqdn, s := range serviceEntryHosts {
scope, seHost := seHostScopedFqdn.GetScopeAndFqdn()
// Skip over non-wildcard entries
if !strings.HasPrefix(seHost, Wildcard) {
continue
}
// Skip over entries not visible to the current virtual service namespace
if scope != ExportToAllNamespaces && scope != string(sourceNs) {
continue
}
seHostWithoutWildcard := strings.TrimPrefix(seHost, Wildcard)
hostWithoutWildCard := strings.TrimPrefix(host, Wildcard)
if strings.HasSuffix(hostWithoutWildCard, seHostWithoutWildcard) {
return s
}
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package virtualservice
import (
"fmt"
"strings"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/util/sets"
)
// ConflictingMeshGatewayHostsAnalyzer checks if multiple virtual services
// associated with the mesh gateway have conflicting hosts. The behavior is
// undefined if conflicts exist.
type ConflictingMeshGatewayHostsAnalyzer struct{}
var _ analysis.Analyzer = &ConflictingMeshGatewayHostsAnalyzer{}
// Metadata implements Analyzer
func (c *ConflictingMeshGatewayHostsAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "virtualservice.ConflictingMeshGatewayHostsAnalyzer",
Description: "Checks if multiple virtual services associated with the mesh gateway have conflicting hosts",
Inputs: []config.GroupVersionKind{
gvk.VirtualService,
},
}
}
// Analyze implements Analyzer
func (c *ConflictingMeshGatewayHostsAnalyzer) Analyze(ctx analysis.Context) {
hs := initMeshGatewayHosts(ctx)
reported := make(map[resource.FullName]bool)
for scopedFqdn, vsList := range hs {
scope, _ := scopedFqdn.GetScopeAndFqdn()
if scope != util.ExportToAllNamespaces {
noScopedVSList := getExportToAllNamespacesVSListForScopedHost(scopedFqdn, hs)
vsList = append(vsList, noScopedVSList...)
}
if len(vsList) > 1 {
vsNames := combineResourceEntryNames(vsList)
for i := range vsList {
if reported[vsList[i].Metadata.FullName] {
continue
}
reported[vsList[i].Metadata.FullName] = true
m := msg.NewConflictingMeshGatewayVirtualServiceHosts(vsList[i], vsNames, string(scopedFqdn))
if line, ok := util.ErrorLine(vsList[i], fmt.Sprintf(util.MetadataName)); ok {
m.Line = line
}
ctx.Report(gvk.VirtualService, m)
}
}
}
}
func getExportToAllNamespacesVSListForScopedHost(sh util.ScopedFqdn, meshGatewayHosts map[util.ScopedFqdn][]*resource.Instance) []*resource.Instance {
_, h := sh.GetScopeAndFqdn()
vss := make([]*resource.Instance, 0)
for sf, resources := range meshGatewayHosts {
mghScope, mgh := sf.GetScopeAndFqdn()
hName := host.Name(h)
mghName := host.Name(mgh)
if mghScope != util.ExportToAllNamespaces || !hName.Matches(mghName) {
continue
}
vss = append(vss, resources...)
}
return vss
}
func combineResourceEntryNames(rList []*resource.Instance) string {
names := make([]string, 0, len(rList))
for _, r := range rList {
names = append(names, r.Metadata.FullName.String())
}
return strings.Join(names, ",")
}
func initMeshGatewayHosts(ctx analysis.Context) map[util.ScopedFqdn][]*resource.Instance {
hostsVirtualServices := map[util.ScopedFqdn][]*resource.Instance{}
ctx.ForEach(gvk.VirtualService, func(r *resource.Instance) bool {
vs := r.Message.(*v1alpha3.VirtualService)
vsNamespace := r.Metadata.FullName.Namespace
vsAttachedToMeshGateway := false
// No entry in gateways imply "mesh" by default
if len(vs.Gateways) == 0 {
vsAttachedToMeshGateway = true
} else {
for _, g := range vs.Gateways {
if g == util.MeshGateway {
vsAttachedToMeshGateway = true
}
}
}
if vsAttachedToMeshGateway {
// determine the scope of hosts i.e. local to VirtualService namespace or
// all namespaces
hostsNamespaceScope := make([]string, 0)
exportToAllNamespaces := util.IsExportToAllNamespaces(vs.ExportTo)
if exportToAllNamespaces {
hostsNamespaceScope = append(hostsNamespaceScope, util.ExportToAllNamespaces)
} else {
nss := sets.New[string]()
for _, et := range vs.ExportTo {
if et == util.ExportToNamespaceLocal {
nss.Insert(vsNamespace.String())
} else {
nss.Insert(et)
}
}
hostsNamespaceScope = nss.UnsortedList()
}
for _, nsScope := range hostsNamespaceScope {
for _, h := range vs.Hosts {
scopedFqdn := util.NewScopedFqdn(nsScope, vsNamespace, h)
vsNames := hostsVirtualServices[scopedFqdn]
if len(vsNames) == 0 {
hostsVirtualServices[scopedFqdn] = []*resource.Instance{r}
} else {
hostsVirtualServices[scopedFqdn] = append(hostsVirtualServices[scopedFqdn], r)
}
}
}
}
return true
})
return hostsVirtualServices
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package virtualservice
import (
"fmt"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// DestinationHostAnalyzer checks the destination hosts associated with each virtual service
type DestinationHostAnalyzer struct{}
var _ analysis.Analyzer = &DestinationHostAnalyzer{}
type hostAndSubset struct {
host resource.FullName
subset string
}
// Metadata implements Analyzer
func (a *DestinationHostAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "virtualservice.DestinationHostAnalyzer",
Description: "Checks the destination hosts associated with each virtual service",
Inputs: []config.GroupVersionKind{
gvk.ServiceEntry,
gvk.VirtualService,
gvk.Service,
},
}
}
// Analyze implements Analyzer
func (a *DestinationHostAnalyzer) Analyze(ctx analysis.Context) {
// Precompute the set of service entry hosts that exist (there can be more than one defined per ServiceEntry CRD)
serviceEntryHosts := util.InitServiceEntryHostMap(ctx)
virtualServiceDestinations := initVirtualServiceDestinations(ctx)
ctx.ForEach(gvk.VirtualService, func(r *resource.Instance) bool {
a.analyzeVirtualService(r, ctx, serviceEntryHosts)
a.analyzeSubset(r, ctx, virtualServiceDestinations)
return true
})
}
func (a *DestinationHostAnalyzer) analyzeSubset(r *resource.Instance, ctx analysis.Context, vsDestinations map[resource.FullName][]*v1alpha3.Destination) {
vs := r.Message.(*v1alpha3.VirtualService)
// if there's no gateway specified, we're done
if len(vs.Gateways) == 0 {
return
}
for ruleIndex, http := range vs.Http {
for routeIndex, route := range http.Route {
if route.Destination.Subset == "" {
for virtualservice, destinations := range vsDestinations {
for _, destination := range destinations {
if destination.Host == route.Destination.Host {
m := msg.NewIngressRouteRulesNotAffected(r, virtualservice.String(), r.Metadata.FullName.String())
key := fmt.Sprintf(util.DestinationHost, http.Name, ruleIndex, routeIndex)
if line, ok := util.ErrorLine(r, key); ok {
m.Line = line
}
ctx.Report(gvk.VirtualService, m)
}
}
}
}
}
}
}
// get all virtualservice that have destination with subset
func initVirtualServiceDestinations(ctx analysis.Context) map[resource.FullName][]*v1alpha3.Destination {
virtualservices := make(map[resource.FullName][]*v1alpha3.Destination)
ctx.ForEach(gvk.VirtualService, func(r *resource.Instance) bool {
virtualservice := r.Message.(*v1alpha3.VirtualService)
for _, routes := range virtualservice.Http {
for _, destinations := range routes.Route {
// if there's no subset specified, we're done
if destinations.Destination.Subset != "" {
for _, host := range virtualservice.Hosts {
if destinations.Destination.Host == host {
virtualservices[r.Metadata.FullName] = append(virtualservices[r.Metadata.FullName], destinations.Destination)
}
}
}
}
}
return true
})
return virtualservices
}
func (a *DestinationHostAnalyzer) analyzeVirtualService(r *resource.Instance, ctx analysis.Context,
serviceEntryHosts map[util.ScopedFqdn]*v1alpha3.ServiceEntry,
) {
vs := r.Message.(*v1alpha3.VirtualService)
for _, d := range getRouteDestinations(vs) {
s := util.GetDestinationHost(r.Metadata.FullName.Namespace, vs.ExportTo, d.Destination.GetHost(), serviceEntryHosts)
if s == nil {
m := msg.NewReferencedResourceNotFound(r, "host", d.Destination.GetHost())
key := fmt.Sprintf(util.DestinationHost, d.RouteRule, d.ServiceIndex, d.DestinationIndex)
if line, found := util.ErrorLine(r, key); found {
m.Line = line
}
ctx.Report(gvk.VirtualService, m)
continue
}
checkServiceEntryPorts(ctx, r, d, s)
}
for _, d := range getHTTPMirrorDestinations(vs) {
s := util.GetDestinationHost(r.Metadata.FullName.Namespace, vs.ExportTo, d.Destination.GetHost(), serviceEntryHosts)
if s == nil {
m := msg.NewReferencedResourceNotFound(r, "mirror host", d.Destination.GetHost())
var key string
if d.RouteRule == "http.mirror" {
key = fmt.Sprintf(util.MirrorHost, d.ServiceIndex)
} else {
key = fmt.Sprintf(util.MirrorsHost, d.ServiceIndex, d.DestinationIndex)
}
if line, ok := util.ErrorLine(r, key); ok {
m.Line = line
}
ctx.Report(gvk.VirtualService, m)
continue
}
checkServiceEntryPorts(ctx, r, d, s)
}
}
func checkServiceEntryPorts(ctx analysis.Context, r *resource.Instance, d *AnnotatedDestination, s *v1alpha3.ServiceEntry) {
if d.Destination.GetPort() == nil {
// If destination port isn't specified, it's only a problem if the service being referenced exposes multiple ports.
if len(s.GetPorts()) > 1 {
var portNumbers []int
for _, p := range s.GetPorts() {
portNumbers = append(portNumbers, int(p.GetNumber()))
}
m := msg.NewVirtualServiceDestinationPortSelectorRequired(r, d.Destination.GetHost(), portNumbers)
var key string
if d.RouteRule == "http.mirror" {
key = fmt.Sprintf(util.MirrorHost, d.ServiceIndex)
} else if d.RouteRule == "http.mirrors" {
key = fmt.Sprintf(util.MirrorsHost, d.ServiceIndex, d.DestinationIndex)
} else {
key = fmt.Sprintf(util.DestinationHost, d.RouteRule, d.ServiceIndex, d.DestinationIndex)
}
if line, ok := util.ErrorLine(r, key); ok {
m.Line = line
}
ctx.Report(gvk.VirtualService, m)
return
}
// Otherwise, it's not needed and we're done here.
return
}
foundPort := false
for _, p := range s.GetPorts() {
if d.Destination.GetPort().GetNumber() == p.GetNumber() {
foundPort = true
break
}
}
if !foundPort {
m := msg.NewReferencedResourceNotFound(r, "host:port",
fmt.Sprintf("%s:%d", d.Destination.GetHost(), d.Destination.GetPort().GetNumber()))
var key string
if d.RouteRule == "http.mirror" {
key = fmt.Sprintf(util.MirrorHost, d.ServiceIndex)
} else if d.RouteRule == "http.mirrors" {
key = fmt.Sprintf(util.MirrorsHost, d.ServiceIndex, d.DestinationIndex)
} else {
key = fmt.Sprintf(util.DestinationHost, d.RouteRule, d.ServiceIndex, d.DestinationIndex)
}
if line, ok := util.ErrorLine(r, key); ok {
m.Line = line
}
ctx.Report(gvk.VirtualService, m)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package virtualservice
import (
"fmt"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// DestinationRuleAnalyzer checks the destination rules associated with each virtual service
type DestinationRuleAnalyzer struct{}
var _ analysis.Analyzer = &DestinationRuleAnalyzer{}
// Metadata implements Analyzer
func (d *DestinationRuleAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "virtualservice.DestinationRuleAnalyzer",
Description: "Checks the destination rules associated with each virtual service",
Inputs: []config.GroupVersionKind{
gvk.VirtualService,
gvk.DestinationRule,
},
}
}
// Analyze implements Analyzer
func (d *DestinationRuleAnalyzer) Analyze(ctx analysis.Context) {
// To avoid repeated iteration, precompute the set of existing destination host+subset combinations
destHostsAndSubsets := initDestHostsAndSubsets(ctx)
ctx.ForEach(gvk.VirtualService, func(r *resource.Instance) bool {
d.analyzeVirtualService(r, ctx, destHostsAndSubsets)
return true
})
}
func (d *DestinationRuleAnalyzer) analyzeVirtualService(r *resource.Instance, ctx analysis.Context,
destHostsAndSubsets map[hostAndSubset]bool,
) {
vs := r.Message.(*v1alpha3.VirtualService)
ns := r.Metadata.FullName.Namespace
for _, ad := range getRouteDestinations(vs) {
if !d.checkDestinationSubset(ns, ad.Destination, destHostsAndSubsets) {
m := msg.NewReferencedResourceNotFound(r, "host+subset in destinationrule",
fmt.Sprintf("%s+%s", ad.Destination.GetHost(), ad.Destination.GetSubset()))
key := fmt.Sprintf(util.DestinationHost, ad.RouteRule, ad.ServiceIndex, ad.DestinationIndex)
if line, ok := util.ErrorLine(r, key); ok {
m.Line = line
}
ctx.Report(gvk.VirtualService, m)
}
}
for _, ad := range getHTTPMirrorDestinations(vs) {
if !d.checkDestinationSubset(ns, ad.Destination, destHostsAndSubsets) {
m := msg.NewReferencedResourceNotFound(r, "mirror+subset in destinationrule",
fmt.Sprintf("%s+%s", ad.Destination.GetHost(), ad.Destination.GetSubset()))
var key string
if ad.RouteRule == "http.mirror" {
key = fmt.Sprintf(util.MirrorHost, ad.ServiceIndex)
} else {
key = fmt.Sprintf(util.MirrorsHost, ad.ServiceIndex, ad.DestinationIndex)
}
if line, ok := util.ErrorLine(r, key); ok {
m.Line = line
}
ctx.Report(gvk.VirtualService, m)
}
}
}
func (d *DestinationRuleAnalyzer) checkDestinationSubset(vsNamespace resource.Namespace, destination *v1alpha3.Destination,
destHostsAndSubsets map[hostAndSubset]bool,
) bool {
name := util.GetResourceNameFromHost(vsNamespace, destination.GetHost())
subset := destination.GetSubset()
// if there's no subset specified, we're done
if subset == "" {
return true
}
hs := hostAndSubset{
host: name,
subset: subset,
}
if _, ok := destHostsAndSubsets[hs]; ok {
return true
}
return false
}
func initDestHostsAndSubsets(ctx analysis.Context) map[hostAndSubset]bool {
hostsAndSubsets := make(map[hostAndSubset]bool)
ctx.ForEach(gvk.DestinationRule, func(r *resource.Instance) bool {
dr := r.Message.(*v1alpha3.DestinationRule)
drNamespace := r.Metadata.FullName.Namespace
for _, ss := range dr.GetSubsets() {
hs := hostAndSubset{
host: util.GetResourceNameFromHost(drNamespace, dr.GetHost()),
subset: ss.GetName(),
}
hostsAndSubsets[hs] = true
}
return true
})
return hostsAndSubsets
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package virtualservice
import (
"fmt"
"strings"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
kubeconfig "istio.io/istio/pkg/config/gateway/kube"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// GatewayAnalyzer checks the gateways associated with each virtual service
type GatewayAnalyzer struct{}
var _ analysis.Analyzer = &GatewayAnalyzer{}
// Metadata implements Analyzer
func (s *GatewayAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "virtualservice.GatewayAnalyzer",
Description: "Checks the gateways associated with each virtual service",
Inputs: []config.GroupVersionKind{
gvk.Gateway,
gvk.VirtualService,
},
}
}
// Analyze implements Analyzer
func (s *GatewayAnalyzer) Analyze(c analysis.Context) {
c.ForEach(gvk.VirtualService, func(r *resource.Instance) bool {
s.analyzeVirtualService(r, c)
return true
})
}
func (s *GatewayAnalyzer) analyzeVirtualService(r *resource.Instance, c analysis.Context) {
vs := r.Message.(*v1alpha3.VirtualService)
vsNs := r.Metadata.FullName.Namespace
vsName := r.Metadata.FullName
for i, gwName := range vs.Gateways {
// This is a special-case accepted value
if gwName == util.MeshGateway {
continue
}
if kubeconfig.IsInternalGatewayReference(gwName) {
m := msg.NewReferencedInternalGateway(r, vsName.String(), gwName)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.VSGateway, i)); ok {
m.Line = line
}
c.Report(gvk.VirtualService, m)
continue
}
gwFullName := resource.NewShortOrFullName(vsNs, gwName)
if !c.Exists(gvk.Gateway, gwFullName) {
m := msg.NewReferencedResourceNotFound(r, "gateway", gwName)
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.VSGateway, i)); ok {
m.Line = line
}
c.Report(gvk.VirtualService, m)
}
if !vsHostInGateway(c, gwFullName, vs.Hosts, vsNs.String()) {
m := msg.NewVirtualServiceHostNotFoundInGateway(r, vs.Hosts, vsName.String(), gwFullName.String())
if line, ok := util.ErrorLine(r, fmt.Sprintf(util.VSGateway, i)); ok {
m.Line = line
}
c.Report(gvk.VirtualService, m)
}
}
}
func vsHostInGateway(c analysis.Context, gateway resource.FullName, vsHosts []string, vsNamespace string) bool {
var gatewayHosts []string
var gatewayNs string
c.ForEach(gvk.Gateway, func(r *resource.Instance) bool {
if r.Metadata.FullName == gateway {
s := r.Message.(*v1alpha3.Gateway)
gatewayNs = r.Metadata.FullName.Namespace.String()
for _, v := range s.Servers {
sanitizeServerHostNamespace(v, gatewayNs)
gatewayHosts = append(gatewayHosts, v.Hosts...)
}
}
return true
})
gatewayHostNames := host.NamesForNamespace(gatewayHosts, vsNamespace)
for _, gh := range gatewayHostNames {
for _, vsh := range vsHosts {
gatewayHost := gh
vsHost := host.Name(vsh)
if gatewayHost.Matches(vsHost) {
return true
}
}
}
return false
}
// convert ./host to currentNamespace/Host
// */host to just host
// */* to just *
func sanitizeServerHostNamespace(server *v1alpha3.Server, namespace string) {
for i, h := range server.Hosts {
if strings.Contains(h, "/") {
parts := strings.Split(h, "/")
if parts[0] == "." {
server.Hosts[i] = fmt.Sprintf("%s/%s", namespace, parts[1])
} else if parts[0] == "*" {
if parts[1] == "*" {
server.Hosts = []string{"*"}
return
}
server.Hosts[i] = parts[1]
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package virtualservice
import (
klabels "k8s.io/apimachinery/pkg/labels"
"istio.io/api/networking/v1alpha3"
"istio.io/api/security/v1beta1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/jwt"
)
type JWTClaimRouteAnalyzer struct{}
var _ analysis.Analyzer = &JWTClaimRouteAnalyzer{}
// Metadata implements Analyzer
func (s *JWTClaimRouteAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "virtualservice.JWTClaimRouteAnalyzer",
Description: "Checks the VirtualService using JWT claim based routing has corresponding RequestAuthentication",
Inputs: []config.GroupVersionKind{
gvk.VirtualService,
gvk.RequestAuthentication,
gvk.Gateway,
gvk.Pod,
},
}
}
// Analyze implements Analyzer
func (s *JWTClaimRouteAnalyzer) Analyze(c analysis.Context) {
requestAuthNByNamespace := map[string][]klabels.Selector{}
c.ForEach(gvk.RequestAuthentication, func(r *resource.Instance) bool {
ns := r.Metadata.FullName.Namespace.String()
if _, found := requestAuthNByNamespace[ns]; !found {
requestAuthNByNamespace[ns] = []klabels.Selector{}
}
ra := r.Message.(*v1beta1.RequestAuthentication)
raSelector := klabels.SelectorFromSet(ra.GetSelector().GetMatchLabels())
requestAuthNByNamespace[ns] = append(requestAuthNByNamespace[ns], raSelector)
return true
})
c.ForEach(gvk.VirtualService, func(r *resource.Instance) bool {
s.analyze(r, c, requestAuthNByNamespace)
return true
})
}
func (s *JWTClaimRouteAnalyzer) analyze(r *resource.Instance, c analysis.Context, requestAuthNByNamespace map[string][]klabels.Selector) {
// Check if the virtual service is using JWT claim based routing.
vs := r.Message.(*v1alpha3.VirtualService)
var vsRouteKey string
if vsRouteKey = routeBasedOnJWTClaimKey(vs); vsRouteKey == "" {
return
}
vsNs := r.Metadata.FullName.Namespace
// Check if the virtual service is applied to gateway.
for _, gwName := range vs.Gateways {
if gwName == util.MeshGateway {
continue
}
gwFullName := resource.NewShortOrFullName(vsNs, gwName)
gwRes := c.Find(gvk.Gateway, gwFullName)
if gwRes == nil {
// The gateway does not exist, this should already be covered by the gateway analyzer.
continue
}
gw := gwRes.Message.(*v1alpha3.Gateway)
gwSelector := klabels.SelectorFromSet(gw.Selector)
// Check each pod selected by the gateway.
c.ForEach(gvk.Pod, func(rPod *resource.Instance) bool {
podLabels := klabels.Set(rPod.Metadata.Labels)
if !gwSelector.Matches(podLabels) {
return true
}
// Check if there is request authentication applied to the pod.
var hasRequestAuthNForPod bool
raSelectors := requestAuthNByNamespace[constants.IstioSystemNamespace]
raSelectors = append(raSelectors, requestAuthNByNamespace[rPod.Metadata.FullName.Namespace.String()]...)
for _, raSelector := range raSelectors {
if raSelector.Matches(podLabels) {
hasRequestAuthNForPod = true
break
}
}
if !hasRequestAuthNForPod {
m := msg.NewJwtClaimBasedRoutingWithoutRequestAuthN(r, vsRouteKey, gwFullName.String(), rPod.Metadata.FullName.Name.String())
c.Report(gvk.VirtualService, m)
}
return true
})
}
}
func routeBasedOnJWTClaimKey(vs *v1alpha3.VirtualService) string {
for _, httpRoute := range vs.GetHttp() {
for _, match := range httpRoute.GetMatch() {
for key := range match.GetHeaders() {
if jwt.ToRoutingClaim(key).Match {
return key
}
}
for key := range match.GetWithoutHeaders() {
if jwt.ToRoutingClaim(key).Match {
return key
}
}
}
}
return ""
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package virtualservice
import (
"fmt"
"regexp"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers/util"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// RegexAnalyzer checks all regexes in a virtual service
type RegexAnalyzer struct{}
var _ analysis.Analyzer = &RegexAnalyzer{}
// Metadata implements Analyzer
func (a *RegexAnalyzer) Metadata() analysis.Metadata {
return analysis.Metadata{
Name: "virtualservice.RegexAnalyzer",
Description: "Checks regex syntax",
Inputs: []config.GroupVersionKind{
gvk.VirtualService,
},
}
}
// Analyze implements Analyzer
func (a *RegexAnalyzer) Analyze(ctx analysis.Context) {
ctx.ForEach(gvk.VirtualService, func(r *resource.Instance) bool {
a.analyzeVirtualService(r, ctx)
return true
})
}
func (a *RegexAnalyzer) analyzeVirtualService(r *resource.Instance, ctx analysis.Context) {
vs := r.Message.(*v1alpha3.VirtualService)
for i, route := range vs.GetHttp() {
for j, m := range route.GetMatch() {
analyzeStringMatch(r, m.GetUri(), ctx, "uri",
fmt.Sprintf(util.URISchemeMethodAuthorityRegexMatch, i, j, "uri"))
analyzeStringMatch(r, m.GetScheme(), ctx, "scheme",
fmt.Sprintf(util.URISchemeMethodAuthorityRegexMatch, i, j, "scheme"))
analyzeStringMatch(r, m.GetMethod(), ctx, "method",
fmt.Sprintf(util.URISchemeMethodAuthorityRegexMatch, i, j, "method"))
analyzeStringMatch(r, m.GetAuthority(), ctx, "authority",
fmt.Sprintf(util.URISchemeMethodAuthorityRegexMatch, i, j, "authority"))
for key, h := range m.GetHeaders() {
analyzeStringMatch(r, h, ctx, "headers",
fmt.Sprintf(util.HeaderAndQueryParamsRegexMatch, i, j, "headers", key))
}
for key, qp := range m.GetQueryParams() {
analyzeStringMatch(r, qp, ctx, "queryParams",
fmt.Sprintf(util.HeaderAndQueryParamsRegexMatch, i, j, "queryParams", key))
}
// We don't validate withoutHeaders, because they are undocumented
}
for j, origin := range route.GetCorsPolicy().GetAllowOrigins() {
analyzeStringMatch(r, origin, ctx, "corsPolicy.allowOrigins",
fmt.Sprintf(util.AllowOriginsRegexMatch, i, j))
}
}
}
func analyzeStringMatch(r *resource.Instance, sm *v1alpha3.StringMatch, ctx analysis.Context, where string, key string) {
re := sm.GetRegex()
if re == "" {
return
}
_, err := regexp.Compile(re)
if err == nil {
return
}
m := msg.NewInvalidRegexp(r, where, re, err.Error())
// Get line number for different match field
if line, ok := util.ErrorLine(r, key); ok {
m.Line = line
}
ctx.Report(gvk.VirtualService, m)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package virtualservice
import (
"istio.io/api/networking/v1alpha3"
)
// AnnotatedDestination holds metadata about a Destination object that is used for analyzing
type AnnotatedDestination struct {
RouteRule string
ServiceIndex int
DestinationIndex int
Destination *v1alpha3.Destination
}
func getRouteDestinations(vs *v1alpha3.VirtualService) []*AnnotatedDestination {
destinations := make([]*AnnotatedDestination, 0)
for i, r := range vs.GetTcp() {
for j, rd := range r.GetRoute() {
destinations = append(destinations, &AnnotatedDestination{
RouteRule: "tcp",
ServiceIndex: i,
DestinationIndex: j,
Destination: rd.GetDestination(),
})
}
}
for i, r := range vs.GetTls() {
for j, rd := range r.GetRoute() {
destinations = append(destinations, &AnnotatedDestination{
RouteRule: "tls",
ServiceIndex: i,
DestinationIndex: j,
Destination: rd.GetDestination(),
})
}
}
for i, r := range vs.GetHttp() {
for j, rd := range r.GetRoute() {
destinations = append(destinations, &AnnotatedDestination{
RouteRule: "http",
ServiceIndex: i,
DestinationIndex: j,
Destination: rd.GetDestination(),
})
}
}
return destinations
}
func getHTTPMirrorDestinations(vs *v1alpha3.VirtualService) []*AnnotatedDestination {
var destinations []*AnnotatedDestination
for i, r := range vs.GetHttp() {
if m := r.GetMirror(); m != nil {
destinations = append(destinations, &AnnotatedDestination{
RouteRule: "http.mirror",
ServiceIndex: i,
Destination: m,
})
}
for j, m := range r.GetMirrors() {
destinations = append(destinations, &AnnotatedDestination{
RouteRule: "http.mirrors",
ServiceIndex: i,
DestinationIndex: j,
Destination: m.GetDestination(),
})
}
}
return destinations
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhook
import (
"fmt"
"strings"
v1 "k8s.io/api/admissionregistration/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"istio.io/api/label"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/msg"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/util/sets"
)
type Analyzer struct {
SkipServiceCheck bool
}
var _ analysis.Analyzer = &Analyzer{}
func (a *Analyzer) Metadata() analysis.Metadata {
meta := analysis.Metadata{
Name: "webhook.Analyzer",
Description: "Checks the validity of Istio webhooks",
Inputs: []config.GroupVersionKind{
gvk.MutatingWebhookConfiguration,
},
}
if !a.SkipServiceCheck {
meta.Inputs = append(meta.Inputs, gvk.Service)
}
return meta
}
func getNamespaceLabels() []klabels.Set {
return []klabels.Set{
{},
{"istio-injection": "enabled"},
{"istio-injection": "disabled"},
}
}
func getObjectLabels() []klabels.Set {
return []klabels.Set{
{},
{"sidecar.istio.io/inject": "true"},
{"sidecar.istio.io/inject": "false"},
}
}
func (a *Analyzer) Analyze(context analysis.Context) {
// First, extract and index all webhooks we found
webhooks := map[string][]v1.MutatingWebhook{}
resources := map[string]*resource.Instance{}
revisions := sets.New[string]()
context.ForEach(gvk.MutatingWebhookConfiguration, func(resource *resource.Instance) bool {
wh := resource.Message.(*v1.MutatingWebhookConfiguration)
revs := extractRevisions(wh)
if len(revs) == 0 && !isIstioWebhook(wh) {
return true
}
webhooks[resource.Metadata.FullName.String()] = wh.Webhooks
for _, h := range wh.Webhooks {
resources[fmt.Sprintf("%v/%v", resource.Metadata.FullName.String(), h.Name)] = resource
}
revisions.InsertAll(revs...)
return true
})
// Set up all relevant namespace and object selector permutations
namespaceLabels := getNamespaceLabels()
for rev := range revisions {
for _, base := range getNamespaceLabels() {
base[label.IoIstioRev.Name] = rev
namespaceLabels = append(namespaceLabels, base)
}
}
objectLabels := getObjectLabels()
for rev := range revisions {
for _, base := range getObjectLabels() {
base[label.IoIstioRev.Name] = rev
objectLabels = append(objectLabels, base)
}
}
// For each permutation, we check which webhooks it matches. It must match exactly 0 or 1!
for _, nl := range namespaceLabels {
for _, ol := range objectLabels {
matches := sets.New[string]()
for name, whs := range webhooks {
for _, wh := range whs {
if selectorMatches(wh.NamespaceSelector, nl) && selectorMatches(wh.ObjectSelector, ol) {
matches.Insert(fmt.Sprintf("%v/%v", name, wh.Name))
}
}
}
if len(matches) > 1 {
for match := range matches {
others := matches.Difference(sets.New(match))
context.Report(gvk.MutatingWebhookConfiguration, msg.NewInvalidWebhook(resources[match],
fmt.Sprintf("Webhook overlaps with others: %v. This may cause injection to occur twice.", others.UnsortedList())))
}
}
}
}
// Next, check service references
if a.SkipServiceCheck {
return
}
for name, whs := range webhooks {
for _, wh := range whs {
if wh.ClientConfig.Service == nil {
// it is an url, skip it
continue
}
fname := resource.NewFullName(
resource.Namespace(wh.ClientConfig.Service.Namespace),
resource.LocalName(wh.ClientConfig.Service.Name))
if !context.Exists(gvk.Service, fname) {
context.Report(gvk.MutatingWebhookConfiguration, msg.NewInvalidWebhook(resources[fmt.Sprintf("%v/%v", name, wh.Name)],
fmt.Sprintf("Injector refers to a control plane service that does not exist: %v.", fname)))
}
}
}
}
func isIstioWebhook(wh *v1.MutatingWebhookConfiguration) bool {
for _, w := range wh.Webhooks {
if strings.HasSuffix(w.Name, "istio.io") {
return true
}
}
return false
}
func extractRevisions(wh *v1.MutatingWebhookConfiguration) []string {
revs := sets.New[string]()
if r, f := wh.Labels[label.IoIstioRev.Name]; f {
revs.Insert(r)
}
for _, webhook := range wh.Webhooks {
if webhook.NamespaceSelector != nil {
if r, f := webhook.NamespaceSelector.MatchLabels[label.IoIstioRev.Name]; f {
revs.Insert(r)
}
for _, ls := range webhook.NamespaceSelector.MatchExpressions {
if ls.Key == label.IoIstioRev.Name {
revs.InsertAll(ls.Values...)
}
}
}
if webhook.ObjectSelector != nil {
if r, f := webhook.ObjectSelector.MatchLabels[label.IoIstioRev.Name]; f {
revs.Insert(r)
}
for _, ls := range webhook.ObjectSelector.MatchExpressions {
if ls.Key == label.IoIstioRev.Name {
revs.InsertAll(ls.Values...)
}
}
}
}
return revs.UnsortedList()
}
func selectorMatches(selector *metav1.LabelSelector, labels klabels.Set) bool {
// From webhook spec: "Default to the empty LabelSelector, which matches everything."
if selector == nil {
return true
}
s, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
return false
}
return s.Matches(labels)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package diag
import (
"istio.io/istio/pkg/config/resource"
)
var (
_ resource.Origin = &testOrigin{}
_ resource.Reference = &testReference{}
)
type testOrigin struct {
name string
ref resource.Reference
fieldMap map[string]int
}
func (o testOrigin) FriendlyName() string {
return o.name
}
func (o testOrigin) Comparator() string {
return o.name
}
func (o testOrigin) Namespace() resource.Namespace {
return ""
}
func (o testOrigin) Reference() resource.Reference {
return o.ref
}
func (o testOrigin) FieldMap() map[string]int {
return o.fieldMap
}
type testReference struct {
name string
}
func (r testReference) String() string {
return r.name
}
func MockResource(name string) *resource.Instance {
return &resource.Instance{
Metadata: resource.Metadata{
FullName: resource.NewShortOrFullName("default", name),
},
Origin: testOrigin{name: name},
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package diag
import (
"strings"
)
// Level is the severity level of a message.
type Level struct {
sortOrder int
name string
}
func (l Level) String() string {
return l.name
}
func (l Level) IsWorseThanOrEqualTo(target Level) bool {
return l.sortOrder <= target.sortOrder
}
var (
// Info level is for informational messages
Info = Level{2, "Info"}
// Warning level is for warning messages
Warning = Level{1, "Warning"}
// Error level is for error messages
Error = Level{0, "Error"}
)
// GetAllLevels returns an arbitrarily ordered slice of all Levels defined.
func GetAllLevels() []Level {
return []Level{Info, Warning, Error}
}
// GetAllLevelStrings returns a list of strings representing the names of all Levels defined. The order is arbitrary but
// should be the same as GetAllLevels.
func GetAllLevelStrings() []string {
levels := GetAllLevels()
var s []string
for _, l := range levels {
s = append(s, l.name)
}
return s
}
// GetUppercaseStringToLevelMap returns a mapping of uppercase strings to Level structs. This function is intended to be
// used to convert user input to structs.
func GetUppercaseStringToLevelMap() map[string]Level {
m := make(map[string]Level)
for _, l := range GetAllLevels() {
m[strings.ToUpper(l.name)] = l
}
return m
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package diag
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"istio.io/api/analysis/v1alpha1"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/url"
)
// MessageType is a type of diagnostic message
type MessageType struct {
// The level of the message.
level Level
// The error code of the message
code string
// TODO: Make this localizable
template string
}
// Level returns the level of the MessageType
func (m *MessageType) Level() Level { return m.level }
// Code returns the code of the MessageType
func (m *MessageType) Code() string { return m.code }
// Template returns the message template used by the MessageType
func (m *MessageType) Template() string { return m.template }
// Message is a specific diagnostic message
// TODO: Implement using Analysis message API
type Message struct {
Type *MessageType
// The Parameters to the message
Parameters []any
// Resource is the underlying resource instance associated with the
// message, or nil if no resource is associated with it.
Resource *resource.Instance
// DocRef is an optional reference tracker for the documentation URL
DocRef string
// Line is the line number of the error place in the message
Line int
}
// Unstructured returns this message as a JSON-style unstructured map
func (m *Message) Unstructured(includeOrigin bool) map[string]any {
result := make(map[string]any)
result["code"] = m.Type.Code()
result["level"] = m.Type.Level().String()
if includeOrigin && m.Resource != nil {
result["origin"] = m.Resource.Origin.FriendlyName()
if m.Resource.Origin.Reference() != nil {
loc := m.Resource.Origin.Reference().String()
if m.Line != 0 {
loc = m.ReplaceLine(loc)
}
result["reference"] = loc
}
}
result["message"] = fmt.Sprintf(m.Type.Template(), m.Parameters...)
docQueryString := ""
if m.DocRef != "" {
docQueryString = fmt.Sprintf("?ref=%s", m.DocRef)
}
result["documentationUrl"] = fmt.Sprintf("%s/%s/%s", url.ConfigAnalysis, strings.ToLower(m.Type.Code()), docQueryString)
return result
}
func (m *Message) AnalysisMessageBase() *v1alpha1.AnalysisMessageBase {
docQueryString := ""
if m.DocRef != "" {
docQueryString = fmt.Sprintf("?ref=%s", m.DocRef)
}
docURL := fmt.Sprintf("%s/%s/%s", url.ConfigAnalysis, strings.ToLower(m.Type.Code()), docQueryString)
return &v1alpha1.AnalysisMessageBase{
DocumentationUrl: docURL,
Level: v1alpha1.AnalysisMessageBase_Level(v1alpha1.AnalysisMessageBase_Level_value[strings.ToUpper(m.Type.Level().String())]),
Type: &v1alpha1.AnalysisMessageBase_Type{
Code: m.Type.Code(),
},
}
}
// UnstructuredAnalysisMessageBase returns this message as a JSON-style unstructured map in AnalaysisMessageBase
// TODO(jasonwzm): Remove once message implements AnalysisMessageBase
func (m *Message) UnstructuredAnalysisMessageBase() map[string]any {
mb := m.AnalysisMessageBase()
var r map[string]any
j, err := json.Marshal(mb)
if err != nil {
return r
}
_ = json.Unmarshal(j, &r)
return r
}
// Origin returns the origin of the message
func (m *Message) Origin() string {
origin := ""
if m.Resource != nil {
loc := ""
if m.Resource.Origin.Reference() != nil {
loc = " " + m.Resource.Origin.Reference().String()
if m.Line != 0 {
loc = m.ReplaceLine(loc)
}
}
origin = " (" + m.Resource.Origin.FriendlyName() + loc + ")"
}
return origin
}
// String implements io.Stringer
func (m *Message) String() string {
return fmt.Sprintf("%v [%v]%s %s",
m.Type.Level(), m.Type.Code(), m.Origin(),
fmt.Sprintf(m.Type.Template(), m.Parameters...))
}
// MarshalJSON satisfies the Marshaler interface
func (m *Message) MarshalJSON() ([]byte, error) {
return json.Marshal(m.Unstructured(true))
}
// NewMessageType returns a new MessageType instance.
func NewMessageType(level Level, code, template string) *MessageType {
return &MessageType{
level: level,
code: code,
template: template,
}
}
// NewMessage returns a new Message instance from an existing type.
func NewMessage(mt *MessageType, r *resource.Instance, p ...any) Message {
return Message{
Type: mt,
Resource: r,
Parameters: p,
}
}
// ReplaceLine replaces the line number from the input String method of Reference to the line number from Message
func (m Message) ReplaceLine(l string) string {
colonSep := strings.Split(l, ":")
if len(colonSep) < 2 {
return l
}
_, err := strconv.Atoi(strings.TrimSpace(colonSep[len(colonSep)-1]))
if err == nil {
colonSep[len(colonSep)-1] = fmt.Sprintf("%d", m.Line)
}
return strings.Join(colonSep, ":")
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package diag
import (
"sort"
)
// Messages is a slice of Message items.
type Messages []Message
// Add a new message to the messages
func (ms *Messages) Add(m ...Message) {
*ms = append(*ms, m...)
}
// Sort the message lexicographically by level, code, resource origin name, then string.
func (ms *Messages) Sort() {
sort.Slice(*ms, func(i, j int) bool {
a, b := (*ms)[i], (*ms)[j]
switch {
case a.Type.Level() != b.Type.Level():
return a.Type.Level().sortOrder < b.Type.Level().sortOrder
case a.Type.Code() != b.Type.Code():
return a.Type.Code() < b.Type.Code()
case a.Resource == nil && b.Resource != nil:
return true
case a.Resource != nil && b.Resource == nil:
return false
case a.Resource != nil && b.Resource != nil && a.Resource.Origin.Comparator() != b.Resource.Origin.Comparator():
return a.Resource.Origin.Comparator() < b.Resource.Origin.Comparator()
default:
return a.String() < b.String()
}
})
}
// SortedDedupedCopy returns a different sorted (and deduped) Messages struct.
func (ms *Messages) SortedDedupedCopy() Messages {
newMs := append((*ms)[:0:0], *ms...)
newMs.Sort()
// Take advantage of the fact that the list is already sorted to dedupe
// messages (any duplicates should be adjacent).
var deduped Messages
for _, m := range newMs {
// Two messages are duplicates if they have the same string representation.
if len(deduped) != 0 && deduped[len(deduped)-1].String() == m.String() {
continue
}
deduped = append(deduped, m)
}
return deduped
}
// SetDocRef sets the doc URL reference tracker for the messages
func (ms *Messages) SetDocRef(docRef string) *Messages {
for i := range *ms {
(*ms)[i].DocRef = docRef
}
return ms
}
// FilterOutLowerThan only keeps messages at or above the specified output level
func (ms *Messages) FilterOutLowerThan(outputLevel Level) Messages {
outputMessages := Messages{}
for _, m := range *ms {
if m.Type.Level().IsWorseThanOrEqualTo(outputLevel) {
outputMessages = append(outputMessages, m)
}
}
return outputMessages
}
/*
Copyright Istio Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package incluster
import (
"fmt"
"strings"
"time"
v1alpha12 "istio.io/api/analysis/v1alpha1"
"istio.io/api/meta/v1alpha1"
"istio.io/istio/pilot/pkg/config/kube/crdclient"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/status"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis/analyzers"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/analysis/legacy/util/kuberesource"
"istio.io/istio/pkg/config/analysis/local"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/concurrent"
"istio.io/istio/pkg/util/sets"
)
// Controller manages repeatedly running analyzers in istiod, and reporting results
// via istio status fields.
type Controller struct {
analyzer *local.IstiodAnalyzer
statusctl *status.Controller
}
func NewController(stop <-chan struct{}, rwConfigStore model.ConfigStoreController,
kubeClient kube.Client, revision, namespace string, statusManager *status.Manager, domainSuffix string,
) (*Controller, error) {
analyzer := analyzers.AllCombined()
all := kuberesource.ConvertInputsToSchemas(analyzer.Metadata().Inputs)
ia := local.NewIstiodAnalyzer(analyzer, "", resource.Namespace(namespace), func(name config.GroupVersionKind) {})
ia.AddSource(rwConfigStore)
// Filter out configs watched by rwConfigStore so we don't watch multiple times
store := crdclient.NewForSchemas(kubeClient,
crdclient.Option{
Revision: revision,
DomainSuffix: domainSuffix,
Identifier: "analysis-controller",
FiltersByGVK: ia.GetFiltersByGVK(),
},
all.Remove(rwConfigStore.Schemas().All()...))
ia.AddSource(store)
kubeClient.RunAndWait(stop)
err := ia.Init(stop)
if err != nil {
return nil, fmt.Errorf("unable to initialize analysis controller, releasing lease: %s", err)
}
ctl := statusManager.CreateIstioStatusController(func(status *v1alpha1.IstioStatus, context any) *v1alpha1.IstioStatus {
msgs := context.(diag.Messages)
// zero out analysis messages, as this is the sole controller for those
status.ValidationMessages = []*v1alpha12.AnalysisMessageBase{}
for _, msg := range msgs {
status.ValidationMessages = append(status.ValidationMessages, msg.AnalysisMessageBase())
}
return status
})
return &Controller{analyzer: ia, statusctl: ctl}, nil
}
// Run is blocking
func (c *Controller) Run(stop <-chan struct{}) {
db := concurrent.Debouncer[config.GroupVersionKind]{}
chKind := make(chan config.GroupVersionKind, 10)
for _, k := range c.analyzer.Schemas().All() {
c.analyzer.RegisterEventHandler(k.GroupVersionKind(), func(oldcfg config.Config, newcfg config.Config, ev model.Event) {
gvk := oldcfg.GroupVersionKind
if (gvk == config.GroupVersionKind{}) {
gvk = newcfg.GroupVersionKind
}
chKind <- gvk
})
}
oldmsgs := map[string]diag.Messages{}
pushFn := func(combinedKinds sets.Set[config.GroupVersionKind]) {
res, err := c.analyzer.ReAnalyzeSubset(combinedKinds, stop)
if err != nil {
log.Errorf("In-cluster analysis has failed: %s", err)
return
}
// reorganize messages to map
index := map[status.Resource]diag.Messages{}
for _, m := range res.Messages {
key := status.ResourceFromMetadata(m.Resource.Metadata)
index[key] = append(index[key], m)
}
// if we previously had a message that has been removed, ensure it is removed
// TODO: this creates a state destruction problem when istiod crashes
// in that old messages may not be removed. Not sure how to fix this
// other than write every object's status every loop.
for _, a := range res.ExecutedAnalyzers {
for _, m := range oldmsgs[a] {
key := status.ResourceFromMetadata(m.Resource.Metadata)
if _, ok := index[key]; !ok {
index[key] = diag.Messages{}
}
}
for r, m := range index {
// don't try to write status for non-istio types
if strings.HasSuffix(r.Group, "istio.io") {
log.Debugf("enqueueing update for %s/%s", r.Namespace, r.Name)
c.statusctl.EnqueueStatusUpdateResource(m, r)
}
}
oldmsgs[a] = res.MappedMessages[a]
}
log.Debugf("finished enqueueing all statuses")
}
db.Run(chKind, stop, 1*time.Second, features.AnalysisInterval, pushFn)
}
/*
Copyright Istio Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kube
import (
"fmt"
"path/filepath"
"strings"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/gvk"
)
// Origin is a K8s specific implementation of resource.Origin
type Origin struct {
Type config.GroupVersionKind
FullName resource.FullName
ResourceVersion resource.Version
Ref resource.Reference
FieldsMap map[string]int
}
var (
_ resource.Origin = &Origin{}
_ resource.Reference = &Position{}
)
// FriendlyName implements resource.Origin
func (o *Origin) FriendlyName() string {
parts := strings.Split(o.FullName.String(), "/")
if len(parts) == 2 {
// The istioctl convention is <type> [<namespace>/]<name>.
// This code has no notion of a default and always shows the namespace.
return fmt.Sprintf("%s %s/%s", o.Type.Kind, parts[0], parts[1])
}
return fmt.Sprintf("%s %s", o.Type.Kind, o.FullName.String())
}
func (o *Origin) Comparator() string {
return o.Type.Kind + "/" + o.FullName.Name.String() + "/" + o.FullName.Namespace.String()
}
// Namespace implements resource.Origin
func (o *Origin) Namespace() resource.Namespace {
// Special case: the namespace of a namespace resource is its own name
if o.Type == gvk.Namespace {
return resource.Namespace(o.FullName.Name)
}
return o.FullName.Namespace
}
// Reference implements resource.Origin
func (o *Origin) Reference() resource.Reference {
return o.Ref
}
// FieldMap implements resource.Origin
func (o *Origin) FieldMap() map[string]int {
return o.FieldsMap
}
// Position is a representation of the location of a source.
type Position struct {
Filename string // filename, if any
Line int // line number, starting at 1
}
// String outputs the string representation of the position.
func (p *Position) String() string {
s := p.Filename
// TODO: support json file position.
if p.isValid() && filepath.Ext(p.Filename) != ".json" {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d", p.Line)
}
return s
}
func (p *Position) isValid() bool {
return p.Line > 0 && p.Filename != ""
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kuberesource
import (
"fmt"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/resource"
)
func ConvertInputsToSchemas(inputs []config.GroupVersionKind) collection.Schemas {
resultBuilder := collection.NewSchemasBuilder()
for _, gv := range inputs {
s, f := collections.All.FindByGroupVersionKind(gv)
if !f {
continue
}
_ = resultBuilder.Add(s)
}
return resultBuilder.Build()
}
func DefaultExcludedSchemas() collection.Schemas {
resultBuilder := collection.NewSchemasBuilder()
for _, r := range collections.Kube.All() {
if IsDefaultExcluded(r) {
_ = resultBuilder.Add(r)
}
}
return resultBuilder.Build()
}
// the following code minimally duplicates logic from galley/pkg/config/source/kube/rt/known.go
// without propagating the many dependencies it comes with.
var knownTypes = map[string]struct{}{
asTypesKey("", "Service"): {},
asTypesKey("", "Namespace"): {},
asTypesKey("", "Node"): {},
asTypesKey("", "Pod"): {},
asTypesKey("", "Secret"): {},
}
func asTypesKey(group, kind string) string {
if group == "" {
return kind
}
return fmt.Sprintf("%s/%s", group, kind)
}
func IsDefaultExcluded(res resource.Schema) bool {
key := asTypesKey(res.Group(), res.Kind())
_, ok := knownTypes[key]
return ok
}
/*
Copyright Istio Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"encoding/json"
"fmt"
"istio.io/istio/pilot/pkg/config/file"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/analysis/legacy/source/kube"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/collections"
sresource "istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/log"
)
// NewContext allows tests to use istiodContext without exporting it. returned context is not threadsafe.
func NewContext(store model.ConfigStore, cancelCh <-chan struct{}, collectionReporter CollectionReporterFn) analysis.Context {
return &istiodContext{
store: store,
cancelCh: cancelCh,
messages: map[string]*diag.Messages{},
collectionReporter: collectionReporter,
found: map[key]*resource.Instance{},
foundCollections: map[config.GroupVersionKind]map[resource.FullName]*resource.Instance{},
}
}
type istiodContext struct {
store model.ConfigStore
cancelCh <-chan struct{}
messages map[string]*diag.Messages
collectionReporter CollectionReporterFn
found map[key]*resource.Instance
foundCollections map[config.GroupVersionKind]map[resource.FullName]*resource.Instance
currentAnalyzer string
}
type key struct {
collectionName config.GroupVersionKind
name resource.FullName
}
func (i *istiodContext) Report(c config.GroupVersionKind, m diag.Message) {
msgs := i.messages[i.currentAnalyzer]
if msgs == nil {
msgs = &diag.Messages{}
i.messages[i.currentAnalyzer] = msgs
}
msgs.Add(m)
}
func (i *istiodContext) SetAnalyzer(analyzerName string) {
i.currentAnalyzer = analyzerName
}
func (i *istiodContext) GetMessages(analyzerNames ...string) diag.Messages {
result := diag.Messages{}
if len(analyzerNames) == 0 {
// no AnalyzerNames is equivalent to a wildcard, requesting all messages.
for _, msgs := range i.messages {
result.Add(*msgs...)
}
} else {
for _, name := range analyzerNames {
if msgs, ok := i.messages[name]; ok {
result.Add(*msgs...)
}
}
}
return result
}
func (i *istiodContext) Find(col config.GroupVersionKind, name resource.FullName) *resource.Instance {
i.collectionReporter(col)
if result, ok := i.found[key{col, name}]; ok {
return result
}
if cache, ok := i.foundCollections[col]; ok {
if result, ok2 := cache[name]; ok2 {
return result
}
}
colschema, ok := collections.All.FindByGroupVersionKind(col)
if !ok {
log.Warnf("collection %s could not be found", col.String())
return nil
}
cfg := i.store.Get(colschema.GroupVersionKind(), name.Name.String(), name.Namespace.String())
if cfg == nil {
log.Debugf(" %s resource [%s/%s] could not be found", colschema.GroupVersionKind(), name.Namespace.String(), name.Name.String())
return nil
}
result, err := cfgToInstance(*cfg, col, colschema)
if err != nil {
log.Errorf("failed converting found config %s %s/%s to instance: %s, ",
cfg.Meta.GroupVersionKind.Kind, cfg.Meta.Namespace, cfg.Meta.Namespace, err)
return nil
}
i.found[key{col, name}] = result
return result
}
func (i *istiodContext) Exists(col config.GroupVersionKind, name resource.FullName) bool {
i.collectionReporter(col)
return i.Find(col, name) != nil
}
func (i *istiodContext) ForEach(col config.GroupVersionKind, fn analysis.IteratorFn) {
i.collectionReporter(col)
if cached, ok := i.foundCollections[col]; ok {
for _, res := range cached {
if !fn(res) {
break
}
}
return
}
colschema, ok := collections.All.FindByGroupVersionKind(col)
if !ok {
// TODO: demote this log before merging
log.Errorf("collection %s could not be found", col.String())
return
}
// TODO: this needs to include file source as well
cfgs := i.store.List(colschema.GroupVersionKind(), "")
broken := false
cache := map[resource.FullName]*resource.Instance{}
for _, cfg := range cfgs {
k := key{
col, resource.FullName{
Name: resource.LocalName(cfg.Name),
Namespace: resource.Namespace(cfg.Namespace),
},
}
if res, ok := i.found[k]; ok {
if !broken && !fn(res) {
broken = true
}
cache[res.Metadata.FullName] = res
continue
}
res, err := cfgToInstance(cfg, col, colschema)
if err != nil {
// TODO: demote this log before merging
log.Error(err)
// TODO: is continuing the right thing here?
continue
}
if !broken && !fn(res) {
broken = true
}
cache[res.Metadata.FullName] = res
}
if len(cache) > 0 {
i.foundCollections[col] = cache
}
}
func (i *istiodContext) Canceled() bool {
select {
case <-i.cancelCh:
return true
default:
return false
}
}
func cfgToInstance(cfg config.Config, col config.GroupVersionKind, colschema sresource.Schema) (*resource.Instance, error) {
res := resource.PilotConfigToInstance(&cfg, colschema)
fmstring := cfg.Meta.Annotations[file.FieldMapKey]
var out map[string]int
if fmstring != "" {
err := json.Unmarshal([]byte(fmstring), &out)
if err != nil {
return nil, fmt.Errorf("error parsing fieldmap: %s", err)
}
}
refstring := cfg.Meta.Annotations[file.ReferenceKey]
var outref resource.Reference
if refstring != "" {
outref = &kube.Position{}
err := json.Unmarshal([]byte(refstring), outref)
if err != nil {
return nil, fmt.Errorf("error parsing reference: %s", err)
}
}
res.Origin = &kube.Origin{
Type: col,
FullName: res.Metadata.FullName,
ResourceVersion: resource.Version(cfg.ResourceVersion),
Ref: outref,
FieldsMap: out,
}
// MCP is not aware of generation, add that here.
res.Metadata.Generation = cfg.Generation
return res, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"bytes"
"text/template"
)
const defaultIstioIngressGateway = `
apiVersion: v1
kind: Pod
metadata:
labels:
istio: ingressgateway
name: {{.ingressService}}-dummypod
namespace: {{.namespace}}
spec:
containers:
- args:
name: istio-proxy
---
apiVersion: v1
kind: Service
metadata:
name: {{.ingressService}}
namespace: {{.namespace}}
spec:
ports:
- name: http2
nodePort: 31380
port: 80
protocol: TCP
targetPort: 80
- name: https
nodePort: 31390
port: 443
protocol: TCP
targetPort: 443
- name: tcp
nodePort: 31400
port: 31400
protocol: TCP
targetPort: 31400
- name: tls
nodePort: 31447
port: 15443
protocol: TCP
targetPort: 15443
selector:
istio: ingressgateway
`
func getDefaultIstioIngressGateway(namespace, ingressService string) (string, error) {
result, err := generate(defaultIstioIngressGateway, map[string]string{"namespace": namespace, "ingressService": ingressService})
if err != nil {
return "", err
}
return result, nil
}
func generate(tmpl string, params map[string]string) (string, error) {
t := template.Must(template.New("code").Parse(tmpl))
var b bytes.Buffer
if err := t.Execute(&b, params); err != nil {
return "", err
}
return b.String(), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package local
import (
"context"
"fmt"
"io"
"os"
"strings"
"github.com/hashicorp/go-multierror"
io2 "github.com/AdamKorcz/bugdetectors/io"
"github.com/ryanuber/go-glob"
v1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"istio.io/api/annotation"
"istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/config/aggregate"
"istio.io/istio/pilot/pkg/config/file"
"istio.io/istio/pilot/pkg/config/kube/crdclient"
"istio.io/istio/pilot/pkg/config/memory"
"istio.io/istio/pilot/pkg/leaderelection/k8sleaderelection/k8sresourcelock"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/analysis/legacy/util/kuberesource"
"istio.io/istio/pkg/config/analysis/scope"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/resource"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/gvk"
kubelib "istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/inject"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/util/sets"
)
// IstiodAnalyzer handles local analysis of k8s event sources, both live and file-based
type IstiodAnalyzer struct {
// internalStore stores synthetic configs for analysis (mesh config, etc)
internalStore model.ConfigStore
// stores contains all the (non file) config sources to analyze
stores []model.ConfigStoreController
// fileSource contains all file bases sources
fileSource *file.KubeSource
analyzer *analysis.CombinedAnalyzer
namespace resource.Namespace
istioNamespace resource.Namespace
initializedStore model.ConfigStoreController
// List of code and resource suppressions to exclude messages on
suppressions []AnalysisSuppression
// Mesh config for this analyzer. This can come from multiple sources, and the last added version will take precedence.
meshCfg *v1alpha1.MeshConfig
// Mesh networks config for this analyzer.
meshNetworks *v1alpha1.MeshNetworks
// Which kube resources are used by this analyzer
// Derived from metadata and the specified analyzer and transformer providers
kubeResources collection.Schemas
// Hook function called when a collection is used in analysis
collectionReporter CollectionReporterFn
clientsToRun []kubelib.Client
}
// NewSourceAnalyzer is a drop-in replacement for the galley function, adapting to istiod analyzer.
func NewSourceAnalyzer(analyzer *analysis.CombinedAnalyzer, namespace, istioNamespace resource.Namespace, cr CollectionReporterFn) *IstiodAnalyzer {
return NewIstiodAnalyzer(analyzer, namespace, istioNamespace, cr)
}
// NewIstiodAnalyzer creates a new IstiodAnalyzer with no sources. Use the Add*Source
// methods to add sources in ascending precedence order,
// then execute Analyze to perform the analysis
func NewIstiodAnalyzer(analyzer *analysis.CombinedAnalyzer, namespace,
istioNamespace resource.Namespace, cr CollectionReporterFn,
) *IstiodAnalyzer {
// collectionReporter hook function defaults to no-op
if cr == nil {
cr = func(config.GroupVersionKind) {}
}
// Get the closure of all input collections for our analyzer, paying attention to transforms
kubeResources := kuberesource.ConvertInputsToSchemas(analyzer.Metadata().Inputs)
mcfg := mesh.DefaultMeshConfig()
sa := &IstiodAnalyzer{
meshCfg: mcfg,
meshNetworks: mesh.DefaultMeshNetworks(),
analyzer: analyzer,
namespace: namespace,
internalStore: memory.Make(collection.SchemasFor(collections.MeshNetworks, collections.MeshConfig)),
istioNamespace: istioNamespace,
kubeResources: kubeResources,
collectionReporter: cr,
}
return sa
}
func (sa *IstiodAnalyzer) ReAnalyzeSubset(kinds sets.Set[config.GroupVersionKind], cancel <-chan struct{}) (AnalysisResult, error) {
subset := sa.analyzer.RelevantSubset(kinds)
return sa.internalAnalyze(subset, cancel)
}
// ReAnalyze loads the sources and executes the analysis, assuming init is already called
func (sa *IstiodAnalyzer) ReAnalyze(cancel <-chan struct{}) (AnalysisResult, error) {
return sa.internalAnalyze(sa.analyzer, cancel)
}
func (sa *IstiodAnalyzer) internalAnalyze(a *analysis.CombinedAnalyzer, cancel <-chan struct{}) (AnalysisResult, error) {
store := sa.initializedStore
var result AnalysisResult
result.ExecutedAnalyzers = a.AnalyzerNames()
result.SkippedAnalyzers = a.RemoveSkipped(store.Schemas())
result.MappedMessages = make(map[string]diag.Messages, len(result.ExecutedAnalyzers))
kubelib.WaitForCacheSync("istiod analyzer", cancel, store.HasSynced)
ctx := NewContext(store, cancel, sa.collectionReporter)
a.Analyze(ctx)
// TODO(hzxuzhonghu): we do not need set here
namespaces := sets.New[resource.Namespace]()
if sa.namespace != "" {
namespaces.Insert(sa.namespace)
}
for _, analyzerName := range result.ExecutedAnalyzers {
// TODO: analysis is run for all namespaces, even if they are requested to be filtered.
msgs := filterMessages(ctx.(*istiodContext).GetMessages(analyzerName), namespaces, sa.suppressions)
result.MappedMessages[analyzerName] = msgs.SortedDedupedCopy()
}
msgs := filterMessages(ctx.(*istiodContext).GetMessages(), namespaces, sa.suppressions)
result.Messages = msgs.SortedDedupedCopy()
return result, nil
}
// Analyze loads the sources and executes the analysis
func (sa *IstiodAnalyzer) Analyze(cancel <-chan struct{}) (AnalysisResult, error) {
err2 := sa.Init(cancel)
if err2 != nil {
return AnalysisResult{}, err2
}
return sa.ReAnalyze(cancel)
}
func (sa *IstiodAnalyzer) Init(cancel <-chan struct{}) error {
// We need at least one non-meshcfg source
if len(sa.stores) == 0 && sa.fileSource == nil {
return fmt.Errorf("at least one file and/or Kubernetes source must be provided")
}
// TODO: there's gotta be a better way to convert v1meshconfig to config.Config...
// Create a store containing mesh config. There should be exactly one.
_, err := sa.internalStore.Create(config.Config{
Meta: config.Meta{
Name: "meshconfig",
Namespace: sa.istioNamespace.String(),
GroupVersionKind: gvk.MeshConfig,
},
Spec: sa.meshCfg,
})
if err != nil {
return fmt.Errorf("something unexpected happened while creating the meshconfig: %s", err)
}
// Create a store containing meshnetworks. There should be exactly one.
_, err = sa.internalStore.Create(config.Config{
Meta: config.Meta{
Name: "meshnetworks",
Namespace: sa.istioNamespace.String(),
GroupVersionKind: gvk.MeshNetworks,
},
Spec: sa.meshNetworks,
})
if err != nil {
return fmt.Errorf("something unexpected happened while creating the meshnetworks: %s", err)
}
allstores := append(sa.stores, dfCache{ConfigStore: sa.internalStore})
if sa.fileSource != nil {
// File source takes the highest precedence, since files are resources to be configured to in-cluster resources.
// The order here does matter - aggregated store takes the first available resource.
allstores = append([]model.ConfigStoreController{sa.fileSource}, allstores...)
}
for _, c := range sa.clientsToRun {
// TODO: this could be parallel
c.RunAndWait(cancel)
}
store, err := aggregate.MakeWriteableCache(allstores, nil)
if err != nil {
return err
}
go store.Run(cancel)
sa.initializedStore = store
return nil
}
type dfCache struct {
model.ConfigStore
}
func (d dfCache) RegisterEventHandler(kind config.GroupVersionKind, handler model.EventHandler) {
panic("implement me")
}
// Run intentionally left empty
func (d dfCache) Run(_ <-chan struct{}) {
}
func (d dfCache) HasSynced() bool {
return true
}
// SetSuppressions will set the list of suppressions for the analyzer. Any
// resource that matches the provided suppression will not be included in the
// final message output.
func (sa *IstiodAnalyzer) SetSuppressions(suppressions []AnalysisSuppression) {
sa.suppressions = suppressions
}
// AddTestReaderKubeSource adds a yaml source to the analyzer, which will analyze
// runtime resources like pods and namespaces for use in tests.
func (sa *IstiodAnalyzer) AddTestReaderKubeSource(readers []ReaderSource) error {
return sa.addReaderKubeSourceInternal(readers, true)
}
// AddReaderKubeSource adds a source based on the specified k8s yaml files to the current IstiodAnalyzer
func (sa *IstiodAnalyzer) AddReaderKubeSource(readers []ReaderSource) error {
return sa.addReaderKubeSourceInternal(readers, false)
}
func (sa *IstiodAnalyzer) addReaderKubeSourceInternal(readers []ReaderSource, includeRuntimeResources bool) error {
var src *file.KubeSource
if sa.fileSource != nil {
src = sa.fileSource
} else {
var readerResources collection.Schemas
if includeRuntimeResources {
readerResources = sa.kubeResources
} else {
readerResources = sa.kubeResources.Remove(kuberesource.DefaultExcludedSchemas().All()...)
}
src = file.NewKubeSource(readerResources)
sa.fileSource = src
}
src.SetDefaultNamespace(sa.namespace)
src.SetNamespacesFilter(func(obj interface{}) bool {
cfg, ok := obj.(config.Config)
if !ok {
return false
}
meta := cfg.GetNamespace()
if cfg.Meta.GroupVersionKind.Kind == gvk.Namespace.Kind {
meta = cfg.GetName()
}
return !inject.IgnoredNamespaces.Contains(meta)
})
var errs error
// If we encounter any errors reading or applying files, track them but attempt to continue
for _, r := range readers {
by, err := io2.ReadAll(r.Reader, "/src/istio/pkg/config/analysis/local/istiod_analyze.go:291:14 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
errs = multierror.Append(errs, err)
continue
}
if err = src.ApplyContent(r.Name, string(by)); err != nil {
errs = multierror.Append(errs, err)
}
}
return errs
}
// AddRunningKubeSource adds a source based on a running k8s cluster to the current IstiodAnalyzer
// Also tries to get mesh config from the running cluster, if it can
func (sa *IstiodAnalyzer) AddRunningKubeSource(c kubelib.Client) {
sa.AddRunningKubeSourceWithRevision(c, "default")
}
func isIstioConfigMap(obj any) bool {
cObj, ok := obj.(*v1.ConfigMap)
if !ok {
return false
}
if _, ok = cObj.GetAnnotations()[k8sresourcelock.LeaderElectionRecordAnnotationKey]; ok {
return false
}
return strings.HasPrefix(cObj.GetName(), "istio")
}
var secretFieldSelector = fields.AndSelectors(
fields.OneTermNotEqualSelector("type", "helm.sh/release.v1"),
fields.OneTermNotEqualSelector("type", string(v1.SecretTypeServiceAccountToken))).String()
func (sa *IstiodAnalyzer) GetFiltersByGVK() map[config.GroupVersionKind]kubetypes.Filter {
return map[config.GroupVersionKind]kubetypes.Filter{
gvk.ConfigMap: {
Namespace: sa.istioNamespace.String(),
ObjectFilter: isIstioConfigMap,
},
gvk.Secret: {
FieldSelector: secretFieldSelector,
},
}
}
func (sa *IstiodAnalyzer) AddRunningKubeSourceWithRevision(c kubelib.Client, revision string) {
// This makes the assumption we don't care about Helm secrets or SA token secrets - two common
// large secrets in clusters.
// This is a best effort optimization only; the code would behave correctly if we watched all secrets.
ignoredNamespacesSelectorForField := func(field string) string {
selectors := make([]fields.Selector, 0, len(inject.IgnoredNamespaces))
for _, ns := range inject.IgnoredNamespaces.UnsortedList() {
selectors = append(selectors, fields.OneTermNotEqualSelector(field, ns))
}
return fields.AndSelectors(selectors...).String()
}
namespaceFieldSelector := ignoredNamespacesSelectorForField("metadata.name")
generalSelectors := ignoredNamespacesSelectorForField("metadata.namespace")
// TODO: are either of these string constants intended to vary?
// We gets Istio CRD resources with a specific revision.
store := crdclient.NewForSchemas(c, crdclient.Option{
Revision: revision,
DomainSuffix: "cluster.local",
Identifier: "analysis-controller",
FiltersByGVK: map[config.GroupVersionKind]kubetypes.Filter{
gvk.ConfigMap: {
Namespace: sa.istioNamespace.String(),
ObjectFilter: isIstioConfigMap,
},
},
}, sa.kubeResources.Remove(kuberesource.DefaultExcludedSchemas().All()...))
sa.stores = append(sa.stores, store)
// We gets service discovery resources without a specific revision.
store = crdclient.NewForSchemas(c, crdclient.Option{
DomainSuffix: "cluster.local",
Identifier: "analysis-controller",
FiltersByGVK: map[config.GroupVersionKind]kubetypes.Filter{
gvk.Secret: {
FieldSelector: secretFieldSelector,
},
gvk.Namespace: {
FieldSelector: namespaceFieldSelector,
},
gvk.Service: {
FieldSelector: generalSelectors,
},
gvk.Pod: {
FieldSelector: generalSelectors,
},
gvk.Deployment: {
FieldSelector: generalSelectors,
},
},
}, sa.kubeResources.Intersect(kuberesource.DefaultExcludedSchemas()))
sa.stores = append(sa.stores, store)
// RunAndWait must be called after NewForSchema so that the informers are all created and started.
sa.clientsToRun = append(sa.clientsToRun, c)
// Since we're using a running k8s source, try to get meshconfig and meshnetworks from the configmap.
if err := sa.addRunningKubeIstioConfigMapSource(c); err != nil {
_, err := c.Kube().CoreV1().Namespaces().Get(context.TODO(), sa.istioNamespace.String(), metav1.GetOptions{})
if kerrors.IsNotFound(err) {
// An AnalysisMessage already show up to warn the absence of istio-system namespace, so making it debug level.
scope.Analysis.Debugf("%v namespace not found. Istio may not be installed in the target cluster. "+
"Using default mesh configuration values for analysis", sa.istioNamespace.String())
} else if err != nil {
scope.Analysis.Errorf("error getting mesh config from running kube source: %v", err)
}
}
}
// AddSource adds a source based on user supplied configstore to the current IstiodAnalyzer
// Assumes that the source has same or subset of resource types that this analyzer is configured with.
// This can be used by external users who import the analyzer as a module within their own controllers.
func (sa *IstiodAnalyzer) AddSource(src model.ConfigStoreController) {
sa.stores = append(sa.stores, src)
}
// AddFileKubeMeshConfig gets mesh config from the specified yaml file
func (sa *IstiodAnalyzer) AddFileKubeMeshConfig(file string) error {
by, err := os.ReadFile(file)
if err != nil {
return err
}
cfg, err := mesh.ApplyMeshConfigDefaults(string(by))
if err != nil {
return err
}
sa.meshCfg = cfg
return nil
}
// AddFileKubeMeshNetworks gets a file meshnetworks and add it to the analyzer.
func (sa *IstiodAnalyzer) AddFileKubeMeshNetworks(file string) error {
mn, err := mesh.ReadMeshNetworks(file)
if err != nil {
return err
}
sa.meshNetworks = mn
return nil
}
// AddDefaultResources adds some basic dummy Istio resources, based on mesh configuration.
// This is useful for files-only analysis cases where we don't expect the user to be including istio system resources
// and don't want to generate false positives because they aren't there.
// Respect mesh config when deciding which default resources should be generated
func (sa *IstiodAnalyzer) AddDefaultResources() error {
var readers []ReaderSource
if sa.meshCfg.GetIngressControllerMode() != v1alpha1.MeshConfig_OFF {
ingressResources, err := getDefaultIstioIngressGateway(sa.istioNamespace.String(), sa.meshCfg.GetIngressService())
if err != nil {
return err
}
readers = append(readers, ReaderSource{Reader: strings.NewReader(ingressResources), Name: "internal-ingress"})
}
if len(readers) == 0 {
return nil
}
return sa.AddReaderKubeSource(readers)
}
func (sa *IstiodAnalyzer) RegisterEventHandler(kind config.GroupVersionKind, handler model.EventHandler) {
for _, store := range sa.stores {
store.RegisterEventHandler(kind, handler)
}
}
func (sa *IstiodAnalyzer) Schemas() collection.Schemas {
result := collection.NewSchemasBuilder()
for _, store := range sa.stores {
for _, schema := range store.Schemas().All() {
result.MustAdd(schema)
}
}
return result.Build()
}
func (sa *IstiodAnalyzer) addRunningKubeIstioConfigMapSource(client kubelib.Client) error {
meshConfigMap, err := client.Kube().CoreV1().ConfigMaps(string(sa.istioNamespace)).Get(context.TODO(), meshConfigMapName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("could not read configmap %q from namespace %q: %v", meshConfigMapName, sa.istioNamespace, err)
}
configYaml, ok := meshConfigMap.Data[meshConfigMapKey]
if !ok {
return fmt.Errorf("missing config map key %q", meshConfigMapKey)
}
cfg, err := mesh.ApplyMeshConfigDefaults(configYaml)
if err != nil {
return fmt.Errorf("error parsing mesh config: %v", err)
}
sa.meshCfg = cfg
meshNetworksYaml, ok := meshConfigMap.Data[meshNetworksMapKey]
if !ok {
return fmt.Errorf("missing config map key %q", meshNetworksMapKey)
}
mn, err := mesh.ParseMeshNetworks(meshNetworksYaml)
if err != nil {
return fmt.Errorf("error parsing mesh networks: %v", err)
}
sa.meshNetworks = mn
return nil
}
// CollectionReporterFn is a hook function called whenever a collection is accessed through the AnalyzingDistributor's context
type CollectionReporterFn func(config.GroupVersionKind)
// copied from processing/snapshotter/analyzingdistributor.go
func filterMessages(messages diag.Messages, namespaces sets.Set[resource.Namespace], suppressions []AnalysisSuppression) diag.Messages {
nsNames := sets.New[string]()
for k := range namespaces {
nsNames.Insert(k.String())
}
var msgs diag.Messages
FilterMessages:
for _, m := range messages {
// Only keep messages for resources in namespaces we want to analyze if the
// message doesn't have an origin (meaning we can't determine the
// namespace). Also kept are cluster-level resources where the namespace is
// the empty string. If no such limit is specified, keep them all.
if len(namespaces) > 0 && m.Resource != nil && m.Resource.Origin.Namespace() != "" {
if !nsNames.Contains(m.Resource.Origin.Namespace().String()) {
continue FilterMessages
}
}
// Filter out any messages on resources with suppression annotations.
if m.Resource != nil && m.Resource.Metadata.Annotations[annotation.GalleyAnalyzeSuppress.Name] != "" {
for _, code := range strings.Split(m.Resource.Metadata.Annotations[annotation.GalleyAnalyzeSuppress.Name], ",") {
if code == "*" || m.Type.Code() == code {
scope.Analysis.Debugf("Suppressing code %s on resource %s due to resource annotation", m.Type.Code(), m.Resource.Origin.FriendlyName())
continue FilterMessages
}
}
}
// Filter out any messages that match our suppressions.
for _, s := range suppressions {
if m.Resource == nil || s.Code != m.Type.Code() {
continue
}
if !glob.Glob(s.ResourceName, m.Resource.Origin.FriendlyName()) {
continue
}
scope.Analysis.Debugf("Suppressing code %s on resource %s due to suppressions list", m.Type.Code(), m.Resource.Origin.FriendlyName())
continue FilterMessages
}
msgs = append(msgs, m)
}
return msgs
}
// AnalysisSuppression describes a resource and analysis code to be suppressed
// (e.g. ignored) during analysis. Used when a particular message code is to be
// ignored for a specific resource.
type AnalysisSuppression struct {
// Code is the analysis code to suppress (e.g. "IST0104").
Code string
// ResourceName is the name of the resource to suppress the message for. For
// K8s resources it has the same form as used by istioctl (e.g.
// "DestinationRule default.istio-system"). Note that globbing wildcards are
// supported (e.g. "DestinationRule *.istio-system").
ResourceName string
}
// ReaderSource is a tuple of a io.Reader and filepath.
type ReaderSource struct {
// Name is the name of the source (commonly the path to a file, but can be "-" for sources read from stdin or "" if completely synthetic).
Name string
// Reader is the reader instance to use.
Reader io.Reader
}
// Code generated by generate.main.go. DO NOT EDIT.
package msg
import (
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/resource"
)
var (
// InternalError defines a diag.MessageType for message "InternalError".
// Description: There was an internal error in the toolchain. This is almost always a bug in the implementation.
InternalError = diag.NewMessageType(diag.Error, "IST0001", "Internal error: %v")
// Deprecated defines a diag.MessageType for message "Deprecated".
// Description: A feature that the configuration is depending on is now deprecated.
Deprecated = diag.NewMessageType(diag.Warning, "IST0002", "Deprecated: %s")
// ReferencedResourceNotFound defines a diag.MessageType for message "ReferencedResourceNotFound".
// Description: A resource being referenced does not exist.
ReferencedResourceNotFound = diag.NewMessageType(diag.Error, "IST0101", "Referenced %s not found: %q")
// NamespaceNotInjected defines a diag.MessageType for message "NamespaceNotInjected".
// Description: A namespace is not enabled for Istio injection.
NamespaceNotInjected = diag.NewMessageType(diag.Info, "IST0102", "The namespace is not enabled for Istio injection. Run 'kubectl label namespace %s istio-injection=enabled' to enable it, or 'kubectl label namespace %s istio-injection=disabled' to explicitly mark it as not needing injection.")
// PodMissingProxy defines a diag.MessageType for message "PodMissingProxy".
// Description: A pod is missing the Istio proxy.
PodMissingProxy = diag.NewMessageType(diag.Warning, "IST0103", "The pod %s is missing the Istio proxy. This can often be resolved by restarting or redeploying the workload.")
// SchemaValidationError defines a diag.MessageType for message "SchemaValidationError".
// Description: The resource has a schema validation error.
SchemaValidationError = diag.NewMessageType(diag.Error, "IST0106", "Schema validation error: %v")
// MisplacedAnnotation defines a diag.MessageType for message "MisplacedAnnotation".
// Description: An Istio annotation is applied to the wrong kind of resource.
MisplacedAnnotation = diag.NewMessageType(diag.Warning, "IST0107", "Misplaced annotation: %s can only be applied to %s")
// UnknownAnnotation defines a diag.MessageType for message "UnknownAnnotation".
// Description: An Istio annotation is not recognized for any kind of resource
UnknownAnnotation = diag.NewMessageType(diag.Warning, "IST0108", "Unknown annotation: %s")
// ConflictingMeshGatewayVirtualServiceHosts defines a diag.MessageType for message "ConflictingMeshGatewayVirtualServiceHosts".
// Description: Conflicting hosts on VirtualServices associated with mesh gateway
ConflictingMeshGatewayVirtualServiceHosts = diag.NewMessageType(diag.Error, "IST0109", "The VirtualServices %s associated with mesh gateway define the same host %s which can lead to undefined behavior. This can be fixed by merging the conflicting VirtualServices into a single resource.")
// ConflictingSidecarWorkloadSelectors defines a diag.MessageType for message "ConflictingSidecarWorkloadSelectors".
// Description: A Sidecar resource selects the same workloads as another Sidecar resource
ConflictingSidecarWorkloadSelectors = diag.NewMessageType(diag.Error, "IST0110", "The Sidecars %v in namespace %q select the same workload pod %q, which can lead to undefined behavior.")
// MultipleSidecarsWithoutWorkloadSelectors defines a diag.MessageType for message "MultipleSidecarsWithoutWorkloadSelectors".
// Description: More than one sidecar resource in a namespace has no workload selector
MultipleSidecarsWithoutWorkloadSelectors = diag.NewMessageType(diag.Error, "IST0111", "The Sidecars %v in namespace %q have no workload selector, which can lead to undefined behavior.")
// VirtualServiceDestinationPortSelectorRequired defines a diag.MessageType for message "VirtualServiceDestinationPortSelectorRequired".
// Description: A VirtualService routes to a service with more than one port exposed, but does not specify which to use.
VirtualServiceDestinationPortSelectorRequired = diag.NewMessageType(diag.Error, "IST0112", "This VirtualService routes to a service %q that exposes multiple ports %v. Specifying a port in the destination is required to disambiguate.")
// DeploymentAssociatedToMultipleServices defines a diag.MessageType for message "DeploymentAssociatedToMultipleServices".
// Description: The resulting pods of a service mesh deployment can't be associated with multiple services using the same port but different protocols.
DeploymentAssociatedToMultipleServices = diag.NewMessageType(diag.Warning, "IST0116", "This deployment %s is associated with multiple services using port %d but different protocols: %v")
// PortNameIsNotUnderNamingConvention defines a diag.MessageType for message "PortNameIsNotUnderNamingConvention".
// Description: Port name is not under naming convention. Protocol detection is applied to the port.
PortNameIsNotUnderNamingConvention = diag.NewMessageType(diag.Info, "IST0118", "Port name %s (port: %d, targetPort: %s) doesn't follow the naming convention of Istio port.")
// InvalidRegexp defines a diag.MessageType for message "InvalidRegexp".
// Description: Invalid Regex
InvalidRegexp = diag.NewMessageType(diag.Warning, "IST0122", "Field %q regular expression invalid: %q (%s)")
// NamespaceMultipleInjectionLabels defines a diag.MessageType for message "NamespaceMultipleInjectionLabels".
// Description: A namespace has more than one type of injection labels
NamespaceMultipleInjectionLabels = diag.NewMessageType(diag.Warning, "IST0123", "The namespace has more than one type of injection labels %v, which may lead to undefined behavior. Make sure only one injection label exists.")
// InvalidAnnotation defines a diag.MessageType for message "InvalidAnnotation".
// Description: An Istio annotation that is not valid
InvalidAnnotation = diag.NewMessageType(diag.Warning, "IST0125", "Invalid annotation %s: %s")
// UnknownMeshNetworksServiceRegistry defines a diag.MessageType for message "UnknownMeshNetworksServiceRegistry".
// Description: A service registry in Mesh Networks is unknown
UnknownMeshNetworksServiceRegistry = diag.NewMessageType(diag.Error, "IST0126", "Unknown service registry %s in network %s")
// NoMatchingWorkloadsFound defines a diag.MessageType for message "NoMatchingWorkloadsFound".
// Description: There aren't workloads matching the resource labels
NoMatchingWorkloadsFound = diag.NewMessageType(diag.Warning, "IST0127", "No matching workloads for this resource with the following labels: %s")
// NoServerCertificateVerificationDestinationLevel defines a diag.MessageType for message "NoServerCertificateVerificationDestinationLevel".
// Description: No caCertificates are set in DestinationRule, this results in no verification of presented server certificate.
NoServerCertificateVerificationDestinationLevel = diag.NewMessageType(diag.Warning, "IST0128", "DestinationRule %s in namespace %s has TLS mode set to %s but no caCertificates are set to validate server identity for host: %s")
// NoServerCertificateVerificationPortLevel defines a diag.MessageType for message "NoServerCertificateVerificationPortLevel".
// Description: No caCertificates are set in DestinationRule, this results in no verification of presented server certificate for traffic to a given port.
NoServerCertificateVerificationPortLevel = diag.NewMessageType(diag.Warning, "IST0129", "DestinationRule %s in namespace %s has TLS mode set to %s but no caCertificates are set to validate server identity for host: %s at port %s")
// VirtualServiceUnreachableRule defines a diag.MessageType for message "VirtualServiceUnreachableRule".
// Description: A VirtualService rule will never be used because a previous rule uses the same match.
VirtualServiceUnreachableRule = diag.NewMessageType(diag.Warning, "IST0130", "VirtualService rule %v not used (%s).")
// VirtualServiceIneffectiveMatch defines a diag.MessageType for message "VirtualServiceIneffectiveMatch".
// Description: A VirtualService rule match duplicates a match in a previous rule.
VirtualServiceIneffectiveMatch = diag.NewMessageType(diag.Info, "IST0131", "VirtualService rule %v match %v is not used (duplicate/overlapping match in rule %v).")
// VirtualServiceHostNotFoundInGateway defines a diag.MessageType for message "VirtualServiceHostNotFoundInGateway".
// Description: Host defined in VirtualService not found in Gateway.
VirtualServiceHostNotFoundInGateway = diag.NewMessageType(diag.Warning, "IST0132", "one or more host %v defined in VirtualService %s not found in Gateway %s.")
// SchemaWarning defines a diag.MessageType for message "SchemaWarning".
// Description: The resource has a schema validation warning.
SchemaWarning = diag.NewMessageType(diag.Warning, "IST0133", "Schema validation warning: %v")
// ServiceEntryAddressesRequired defines a diag.MessageType for message "ServiceEntryAddressesRequired".
// Description: Virtual IP addresses are required for ports serving TCP (or unset) protocol
ServiceEntryAddressesRequired = diag.NewMessageType(diag.Warning, "IST0134", "ServiceEntry addresses are required for this protocol.")
// DeprecatedAnnotation defines a diag.MessageType for message "DeprecatedAnnotation".
// Description: A resource is using a deprecated Istio annotation.
DeprecatedAnnotation = diag.NewMessageType(diag.Info, "IST0135", "Annotation %q has been deprecated%s and may not work in future Istio versions.")
// AlphaAnnotation defines a diag.MessageType for message "AlphaAnnotation".
// Description: An Istio annotation may not be suitable for production.
AlphaAnnotation = diag.NewMessageType(diag.Info, "IST0136", "Annotation %q is part of an alpha-phase feature and may be incompletely supported.")
// DeploymentConflictingPorts defines a diag.MessageType for message "DeploymentConflictingPorts".
// Description: Two services selecting the same workload with the same targetPort MUST refer to the same port.
DeploymentConflictingPorts = diag.NewMessageType(diag.Warning, "IST0137", "This deployment %s is associated with multiple services %v using targetPort %q but different ports: %v.")
// GatewayDuplicateCertificate defines a diag.MessageType for message "GatewayDuplicateCertificate".
// Description: Duplicate certificate in multiple gateways may cause 404s if clients re-use HTTP2 connections.
GatewayDuplicateCertificate = diag.NewMessageType(diag.Warning, "IST0138", "Duplicate certificate in multiple gateways %v may cause 404s if clients re-use HTTP2 connections.")
// InvalidWebhook defines a diag.MessageType for message "InvalidWebhook".
// Description: Webhook is invalid or references a control plane service that does not exist.
InvalidWebhook = diag.NewMessageType(diag.Error, "IST0139", "%v")
// IngressRouteRulesNotAffected defines a diag.MessageType for message "IngressRouteRulesNotAffected".
// Description: Route rules have no effect on ingress gateway requests
IngressRouteRulesNotAffected = diag.NewMessageType(diag.Warning, "IST0140", "Subset in virtual service %s has no effect on ingress gateway %s requests")
// InsufficientPermissions defines a diag.MessageType for message "InsufficientPermissions".
// Description: Required permissions to install Istio are missing.
InsufficientPermissions = diag.NewMessageType(diag.Error, "IST0141", "Missing required permission to create resource %v (%v)")
// UnsupportedKubernetesVersion defines a diag.MessageType for message "UnsupportedKubernetesVersion".
// Description: The Kubernetes version is not supported
UnsupportedKubernetesVersion = diag.NewMessageType(diag.Error, "IST0142", "The Kubernetes Version %q is lower than the minimum version: %v")
// LocalhostListener defines a diag.MessageType for message "LocalhostListener".
// Description: A port exposed in a Service is bound to a localhost address
LocalhostListener = diag.NewMessageType(diag.Error, "IST0143", "Port %v is exposed in a Service but listens on localhost. It will not be exposed to other pods.")
// InvalidApplicationUID defines a diag.MessageType for message "InvalidApplicationUID".
// Description: Application pods should not run as user ID (UID) 1337
InvalidApplicationUID = diag.NewMessageType(diag.Warning, "IST0144", "User ID (UID) 1337 is reserved for the sidecar proxy.")
// ConflictingGateways defines a diag.MessageType for message "ConflictingGateways".
// Description: Gateway should not have the same selector, port and matched hosts of server
ConflictingGateways = diag.NewMessageType(diag.Error, "IST0145", "Conflict with gateways %s (workload selector %s, port %s, hosts %v).")
// ImageAutoWithoutInjectionWarning defines a diag.MessageType for message "ImageAutoWithoutInjectionWarning".
// Description: Deployments with `image: auto` should be targeted for injection.
ImageAutoWithoutInjectionWarning = diag.NewMessageType(diag.Warning, "IST0146", "%s %s contains `image: auto` but does not match any Istio injection webhook selectors.")
// ImageAutoWithoutInjectionError defines a diag.MessageType for message "ImageAutoWithoutInjectionError".
// Description: Pods with `image: auto` should be targeted for injection.
ImageAutoWithoutInjectionError = diag.NewMessageType(diag.Error, "IST0147", "%s %s contains `image: auto` but does not match any Istio injection webhook selectors.")
// NamespaceInjectionEnabledByDefault defines a diag.MessageType for message "NamespaceInjectionEnabledByDefault".
// Description: user namespace should be injectable if Istio is installed with enableNamespacesByDefault enabled and neither injection label is set.
NamespaceInjectionEnabledByDefault = diag.NewMessageType(diag.Info, "IST0148", "is enabled for Istio injection, as Istio is installed with enableNamespacesByDefault as true.")
// JwtClaimBasedRoutingWithoutRequestAuthN defines a diag.MessageType for message "JwtClaimBasedRoutingWithoutRequestAuthN".
// Description: Virtual service using JWT claim based routing without request authentication.
JwtClaimBasedRoutingWithoutRequestAuthN = diag.NewMessageType(diag.Error, "IST0149", "The virtual service uses the JWT claim based routing (key: %s) but found no request authentication for the gateway (%s) pod (%s). The request authentication must first be applied for the gateway pods to validate the JWT token and make the claims available for routing.")
// ExternalNameServiceTypeInvalidPortName defines a diag.MessageType for message "ExternalNameServiceTypeInvalidPortName".
// Description: Proxy may prevent tcp named ports and unmatched traffic for ports serving TCP protocol from being forwarded correctly for ExternalName services.
ExternalNameServiceTypeInvalidPortName = diag.NewMessageType(diag.Warning, "IST0150", "Port name for ExternalName service is invalid. Proxy may prevent tcp named ports and unmatched traffic for ports serving TCP protocol from being forwarded correctly")
// EnvoyFilterUsesRelativeOperation defines a diag.MessageType for message "EnvoyFilterUsesRelativeOperation".
// Description: This EnvoyFilter does not have a priority and has a relative patch operation set which can cause the EnvoyFilter not to be applied. Using the INSERT_FIRST or ADD option or setting the priority may help in ensuring the EnvoyFilter is applied correctly.
EnvoyFilterUsesRelativeOperation = diag.NewMessageType(diag.Warning, "IST0151", "This EnvoyFilter does not have a priority and has a relative patch operation set which can cause the EnvoyFilter not to be applied. Using the INSERT_FIRST of ADD option or setting the priority may help in ensuring the EnvoyFilter is applied correctly.")
// EnvoyFilterUsesReplaceOperationIncorrectly defines a diag.MessageType for message "EnvoyFilterUsesReplaceOperationIncorrectly".
// Description: The REPLACE operation is only valid for HTTP_FILTER and NETWORK_FILTER.
EnvoyFilterUsesReplaceOperationIncorrectly = diag.NewMessageType(diag.Error, "IST0152", "The REPLACE operation is only valid for HTTP_FILTER and NETWORK_FILTER.")
// EnvoyFilterUsesAddOperationIncorrectly defines a diag.MessageType for message "EnvoyFilterUsesAddOperationIncorrectly".
// Description: The ADD operation will be ignored when applyTo is set to ROUTE_CONFIGURATION, or HTTP_ROUTE.
EnvoyFilterUsesAddOperationIncorrectly = diag.NewMessageType(diag.Error, "IST0153", "The ADD operation will be ignored when applyTo is set to ROUTE_CONFIGURATION, or HTTP_ROUTE.")
// EnvoyFilterUsesRemoveOperationIncorrectly defines a diag.MessageType for message "EnvoyFilterUsesRemoveOperationIncorrectly".
// Description: The REMOVE operation will be ignored when applyTo is set to ROUTE_CONFIGURATION, or HTTP_ROUTE.
EnvoyFilterUsesRemoveOperationIncorrectly = diag.NewMessageType(diag.Error, "IST0154", "The REMOVE operation will be ignored when applyTo is set to ROUTE_CONFIGURATION, or HTTP_ROUTE.")
// EnvoyFilterUsesRelativeOperationWithProxyVersion defines a diag.MessageType for message "EnvoyFilterUsesRelativeOperationWithProxyVersion".
// Description: This EnvoyFilter does not have a priority and has a relative patch operation (NSTERT_BEFORE/AFTER, REPLACE, MERGE, DELETE) and proxyVersion set which can cause the EnvoyFilter not to be applied during an upgrade. Using the INSERT_FIRST or ADD option or setting the priority may help in ensuring the EnvoyFilter is applied correctly.
EnvoyFilterUsesRelativeOperationWithProxyVersion = diag.NewMessageType(diag.Warning, "IST0155", "This EnvoyFilter does not have a priority and has a relative patch operation (NSTERT_BEFORE/AFTER, REPLACE, MERGE, DELETE) and proxyVersion set which can cause the EnvoyFilter not to be applied during an upgrade. Using the INSERT_FIRST or ADD option or setting the priority may help in ensuring the EnvoyFilter is applied correctly.")
// UnsupportedGatewayAPIVersion defines a diag.MessageType for message "UnsupportedGatewayAPIVersion".
// Description: The Gateway API CRD version is not supported
UnsupportedGatewayAPIVersion = diag.NewMessageType(diag.Error, "IST0156", "The Gateway API CRD version %v is lower than the minimum version: %v")
// InvalidTelemetryProvider defines a diag.MessageType for message "InvalidTelemetryProvider".
// Description: The Telemetry with empty providers will be ignored
InvalidTelemetryProvider = diag.NewMessageType(diag.Warning, "IST0157", "The Telemetry %v in namespace %q with empty providers will be ignored.")
// PodsIstioProxyImageMismatchInNamespace defines a diag.MessageType for message "PodsIstioProxyImageMismatchInNamespace".
// Description: The Istio proxy image of the pods running in the namespace do not match the image defined in the injection configuration.
PodsIstioProxyImageMismatchInNamespace = diag.NewMessageType(diag.Warning, "IST0158", "The Istio proxy images of the pods running in the namespace do not match the image defined in the injection configuration (pod names: %v). This often happens after upgrading the Istio control-plane and can be fixed by redeploying the pods.")
// ConflictingTelemetryWorkloadSelectors defines a diag.MessageType for message "ConflictingTelemetryWorkloadSelectors".
// Description: A Telemetry resource selects the same workloads as another Telemetry resource
ConflictingTelemetryWorkloadSelectors = diag.NewMessageType(diag.Error, "IST0159", "The Telemetries %v in namespace %q select the same workload pod %q, which can lead to undefined behavior.")
// MultipleTelemetriesWithoutWorkloadSelectors defines a diag.MessageType for message "MultipleTelemetriesWithoutWorkloadSelectors".
// Description: More than one telemetry resource in a namespace has no workload selector
MultipleTelemetriesWithoutWorkloadSelectors = diag.NewMessageType(diag.Error, "IST0160", "The Telemetries %v in namespace %q have no workload selector, which can lead to undefined behavior.")
// InvalidGatewayCredential defines a diag.MessageType for message "InvalidGatewayCredential".
// Description: The credential provided for the Gateway resource is invalid
InvalidGatewayCredential = diag.NewMessageType(diag.Error, "IST0161", "The credential referenced by the Gateway %s in namespace %s is invalid, which can cause the traffic not to work as expected.")
// GatewayPortNotDefinedOnService defines a diag.MessageType for message "GatewayPortNotDefinedOnService".
// Description: Gateway port not exposed by service
GatewayPortNotDefinedOnService = diag.NewMessageType(diag.Warning, "IST0162", "The gateway is listening on a target port (port %d) that is not defined in the Service associated with its workload instances (Pod selector %s). If you need to access the gateway port through the gateway Service, it will not be available.")
// InvalidExternalControlPlaneConfig defines a diag.MessageType for message "InvalidExternalControlPlaneConfig".
// Description: Address for the ingress gateway on the external control plane is not valid
InvalidExternalControlPlaneConfig = diag.NewMessageType(diag.Warning, "IST0163", "The hostname (%s) that was provided for the webhook (%s) to reach the ingress gateway on the external control plane cluster %s. Traffic may not flow properly.")
// ExternalControlPlaneAddressIsNotAHostname defines a diag.MessageType for message "ExternalControlPlaneAddressIsNotAHostname".
// Description: Address for the ingress gateway on the external control plane is an IP address and not a hostname
ExternalControlPlaneAddressIsNotAHostname = diag.NewMessageType(diag.Info, "IST0164", "The address (%s) that was provided for the webhook (%s) to reach the ingress gateway on the external control plane cluster is an IP address. This is not recommended for a production environment.")
// ReferencedInternalGateway defines a diag.MessageType for message "ReferencedInternalGateway".
// Description: VirtualServices should not reference internal Gateways.
ReferencedInternalGateway = diag.NewMessageType(diag.Warning, "IST0165", "Gateway reference in VirtualService %s is to an implementation-generated internal Gateway: %s.")
// IneffectiveSelector defines a diag.MessageType for message "IneffectiveSelector".
// Description: Selector has no effect when applied to Kubernetes Gateways.
IneffectiveSelector = diag.NewMessageType(diag.Warning, "IST0166", "Ineffective selector on Kubernetes Gateway %s. Use the TargetRef field instead.")
// IneffectivePolicy defines a diag.MessageType for message "IneffectivePolicy".
// Description: The policy applied has no impact.
IneffectivePolicy = diag.NewMessageType(diag.Warning, "IST0167", "The policy has no impact: %s.")
// UnknownUpgradeCompatibility defines a diag.MessageType for message "UnknownUpgradeCompatibility".
// Description: We cannot automatically detect whether a change is fully compatible or not
UnknownUpgradeCompatibility = diag.NewMessageType(diag.Warning, "IST0168", "The configuration %q changed in release %s, but compatibility cannot be automatically detected: %s. Or, install with `--set compatibilityVersion=%s` to retain the old default.")
// UpdateIncompatibility defines a diag.MessageType for message "UpdateIncompatibility".
// Description: The provided configuration object may be incompatible due to an upgrade
UpdateIncompatibility = diag.NewMessageType(diag.Warning, "IST0169", "The configuration %q changed in release %s: %s. Or, install with `--set compatibilityVersion=%s` to retain the old default.")
)
// All returns a list of all known message types.
func All() []*diag.MessageType {
return []*diag.MessageType{
InternalError,
Deprecated,
ReferencedResourceNotFound,
NamespaceNotInjected,
PodMissingProxy,
SchemaValidationError,
MisplacedAnnotation,
UnknownAnnotation,
ConflictingMeshGatewayVirtualServiceHosts,
ConflictingSidecarWorkloadSelectors,
MultipleSidecarsWithoutWorkloadSelectors,
VirtualServiceDestinationPortSelectorRequired,
DeploymentAssociatedToMultipleServices,
PortNameIsNotUnderNamingConvention,
InvalidRegexp,
NamespaceMultipleInjectionLabels,
InvalidAnnotation,
UnknownMeshNetworksServiceRegistry,
NoMatchingWorkloadsFound,
NoServerCertificateVerificationDestinationLevel,
NoServerCertificateVerificationPortLevel,
VirtualServiceUnreachableRule,
VirtualServiceIneffectiveMatch,
VirtualServiceHostNotFoundInGateway,
SchemaWarning,
ServiceEntryAddressesRequired,
DeprecatedAnnotation,
AlphaAnnotation,
DeploymentConflictingPorts,
GatewayDuplicateCertificate,
InvalidWebhook,
IngressRouteRulesNotAffected,
InsufficientPermissions,
UnsupportedKubernetesVersion,
LocalhostListener,
InvalidApplicationUID,
ConflictingGateways,
ImageAutoWithoutInjectionWarning,
ImageAutoWithoutInjectionError,
NamespaceInjectionEnabledByDefault,
JwtClaimBasedRoutingWithoutRequestAuthN,
ExternalNameServiceTypeInvalidPortName,
EnvoyFilterUsesRelativeOperation,
EnvoyFilterUsesReplaceOperationIncorrectly,
EnvoyFilterUsesAddOperationIncorrectly,
EnvoyFilterUsesRemoveOperationIncorrectly,
EnvoyFilterUsesRelativeOperationWithProxyVersion,
UnsupportedGatewayAPIVersion,
InvalidTelemetryProvider,
PodsIstioProxyImageMismatchInNamespace,
ConflictingTelemetryWorkloadSelectors,
MultipleTelemetriesWithoutWorkloadSelectors,
InvalidGatewayCredential,
GatewayPortNotDefinedOnService,
InvalidExternalControlPlaneConfig,
ExternalControlPlaneAddressIsNotAHostname,
ReferencedInternalGateway,
IneffectiveSelector,
IneffectivePolicy,
UnknownUpgradeCompatibility,
UpdateIncompatibility,
}
}
// NewInternalError returns a new diag.Message based on InternalError.
func NewInternalError(r *resource.Instance, detail string) diag.Message {
return diag.NewMessage(
InternalError,
r,
detail,
)
}
// NewDeprecated returns a new diag.Message based on Deprecated.
func NewDeprecated(r *resource.Instance, detail string) diag.Message {
return diag.NewMessage(
Deprecated,
r,
detail,
)
}
// NewReferencedResourceNotFound returns a new diag.Message based on ReferencedResourceNotFound.
func NewReferencedResourceNotFound(r *resource.Instance, reftype string, refval string) diag.Message {
return diag.NewMessage(
ReferencedResourceNotFound,
r,
reftype,
refval,
)
}
// NewNamespaceNotInjected returns a new diag.Message based on NamespaceNotInjected.
func NewNamespaceNotInjected(r *resource.Instance, namespace string, namespace2 string) diag.Message {
return diag.NewMessage(
NamespaceNotInjected,
r,
namespace,
namespace2,
)
}
// NewPodMissingProxy returns a new diag.Message based on PodMissingProxy.
func NewPodMissingProxy(r *resource.Instance, podName string) diag.Message {
return diag.NewMessage(
PodMissingProxy,
r,
podName,
)
}
// NewSchemaValidationError returns a new diag.Message based on SchemaValidationError.
func NewSchemaValidationError(r *resource.Instance, err error) diag.Message {
return diag.NewMessage(
SchemaValidationError,
r,
err,
)
}
// NewMisplacedAnnotation returns a new diag.Message based on MisplacedAnnotation.
func NewMisplacedAnnotation(r *resource.Instance, annotation string, kind string) diag.Message {
return diag.NewMessage(
MisplacedAnnotation,
r,
annotation,
kind,
)
}
// NewUnknownAnnotation returns a new diag.Message based on UnknownAnnotation.
func NewUnknownAnnotation(r *resource.Instance, annotation string) diag.Message {
return diag.NewMessage(
UnknownAnnotation,
r,
annotation,
)
}
// NewConflictingMeshGatewayVirtualServiceHosts returns a new diag.Message based on ConflictingMeshGatewayVirtualServiceHosts.
func NewConflictingMeshGatewayVirtualServiceHosts(r *resource.Instance, virtualServices string, host string) diag.Message {
return diag.NewMessage(
ConflictingMeshGatewayVirtualServiceHosts,
r,
virtualServices,
host,
)
}
// NewConflictingSidecarWorkloadSelectors returns a new diag.Message based on ConflictingSidecarWorkloadSelectors.
func NewConflictingSidecarWorkloadSelectors(r *resource.Instance, conflictingSidecars []string, namespace string, workloadPod string) diag.Message {
return diag.NewMessage(
ConflictingSidecarWorkloadSelectors,
r,
conflictingSidecars,
namespace,
workloadPod,
)
}
// NewMultipleSidecarsWithoutWorkloadSelectors returns a new diag.Message based on MultipleSidecarsWithoutWorkloadSelectors.
func NewMultipleSidecarsWithoutWorkloadSelectors(r *resource.Instance, conflictingSidecars []string, namespace string) diag.Message {
return diag.NewMessage(
MultipleSidecarsWithoutWorkloadSelectors,
r,
conflictingSidecars,
namespace,
)
}
// NewVirtualServiceDestinationPortSelectorRequired returns a new diag.Message based on VirtualServiceDestinationPortSelectorRequired.
func NewVirtualServiceDestinationPortSelectorRequired(r *resource.Instance, destHost string, destPorts []int) diag.Message {
return diag.NewMessage(
VirtualServiceDestinationPortSelectorRequired,
r,
destHost,
destPorts,
)
}
// NewDeploymentAssociatedToMultipleServices returns a new diag.Message based on DeploymentAssociatedToMultipleServices.
func NewDeploymentAssociatedToMultipleServices(r *resource.Instance, deployment string, port int32, services []string) diag.Message {
return diag.NewMessage(
DeploymentAssociatedToMultipleServices,
r,
deployment,
port,
services,
)
}
// NewPortNameIsNotUnderNamingConvention returns a new diag.Message based on PortNameIsNotUnderNamingConvention.
func NewPortNameIsNotUnderNamingConvention(r *resource.Instance, portName string, port int, targetPort string) diag.Message {
return diag.NewMessage(
PortNameIsNotUnderNamingConvention,
r,
portName,
port,
targetPort,
)
}
// NewInvalidRegexp returns a new diag.Message based on InvalidRegexp.
func NewInvalidRegexp(r *resource.Instance, where string, re string, problem string) diag.Message {
return diag.NewMessage(
InvalidRegexp,
r,
where,
re,
problem,
)
}
// NewNamespaceMultipleInjectionLabels returns a new diag.Message based on NamespaceMultipleInjectionLabels.
func NewNamespaceMultipleInjectionLabels(r *resource.Instance, labels []string) diag.Message {
return diag.NewMessage(
NamespaceMultipleInjectionLabels,
r,
labels,
)
}
// NewInvalidAnnotation returns a new diag.Message based on InvalidAnnotation.
func NewInvalidAnnotation(r *resource.Instance, annotation string, problem string) diag.Message {
return diag.NewMessage(
InvalidAnnotation,
r,
annotation,
problem,
)
}
// NewUnknownMeshNetworksServiceRegistry returns a new diag.Message based on UnknownMeshNetworksServiceRegistry.
func NewUnknownMeshNetworksServiceRegistry(r *resource.Instance, serviceregistry string, network string) diag.Message {
return diag.NewMessage(
UnknownMeshNetworksServiceRegistry,
r,
serviceregistry,
network,
)
}
// NewNoMatchingWorkloadsFound returns a new diag.Message based on NoMatchingWorkloadsFound.
func NewNoMatchingWorkloadsFound(r *resource.Instance, labels string) diag.Message {
return diag.NewMessage(
NoMatchingWorkloadsFound,
r,
labels,
)
}
// NewNoServerCertificateVerificationDestinationLevel returns a new diag.Message based on NoServerCertificateVerificationDestinationLevel.
func NewNoServerCertificateVerificationDestinationLevel(r *resource.Instance, destinationrule string, namespace string, mode string, host string) diag.Message {
return diag.NewMessage(
NoServerCertificateVerificationDestinationLevel,
r,
destinationrule,
namespace,
mode,
host,
)
}
// NewNoServerCertificateVerificationPortLevel returns a new diag.Message based on NoServerCertificateVerificationPortLevel.
func NewNoServerCertificateVerificationPortLevel(r *resource.Instance, destinationrule string, namespace string, mode string, host string, port string) diag.Message {
return diag.NewMessage(
NoServerCertificateVerificationPortLevel,
r,
destinationrule,
namespace,
mode,
host,
port,
)
}
// NewVirtualServiceUnreachableRule returns a new diag.Message based on VirtualServiceUnreachableRule.
func NewVirtualServiceUnreachableRule(r *resource.Instance, ruleno string, reason string) diag.Message {
return diag.NewMessage(
VirtualServiceUnreachableRule,
r,
ruleno,
reason,
)
}
// NewVirtualServiceIneffectiveMatch returns a new diag.Message based on VirtualServiceIneffectiveMatch.
func NewVirtualServiceIneffectiveMatch(r *resource.Instance, ruleno string, matchno string, dupno string) diag.Message {
return diag.NewMessage(
VirtualServiceIneffectiveMatch,
r,
ruleno,
matchno,
dupno,
)
}
// NewVirtualServiceHostNotFoundInGateway returns a new diag.Message based on VirtualServiceHostNotFoundInGateway.
func NewVirtualServiceHostNotFoundInGateway(r *resource.Instance, host []string, virtualservice string, gateway string) diag.Message {
return diag.NewMessage(
VirtualServiceHostNotFoundInGateway,
r,
host,
virtualservice,
gateway,
)
}
// NewSchemaWarning returns a new diag.Message based on SchemaWarning.
func NewSchemaWarning(r *resource.Instance, err error) diag.Message {
return diag.NewMessage(
SchemaWarning,
r,
err,
)
}
// NewServiceEntryAddressesRequired returns a new diag.Message based on ServiceEntryAddressesRequired.
func NewServiceEntryAddressesRequired(r *resource.Instance) diag.Message {
return diag.NewMessage(
ServiceEntryAddressesRequired,
r,
)
}
// NewDeprecatedAnnotation returns a new diag.Message based on DeprecatedAnnotation.
func NewDeprecatedAnnotation(r *resource.Instance, annotation string, extra string) diag.Message {
return diag.NewMessage(
DeprecatedAnnotation,
r,
annotation,
extra,
)
}
// NewAlphaAnnotation returns a new diag.Message based on AlphaAnnotation.
func NewAlphaAnnotation(r *resource.Instance, annotation string) diag.Message {
return diag.NewMessage(
AlphaAnnotation,
r,
annotation,
)
}
// NewDeploymentConflictingPorts returns a new diag.Message based on DeploymentConflictingPorts.
func NewDeploymentConflictingPorts(r *resource.Instance, deployment string, services []string, targetPort string, ports []int32) diag.Message {
return diag.NewMessage(
DeploymentConflictingPorts,
r,
deployment,
services,
targetPort,
ports,
)
}
// NewGatewayDuplicateCertificate returns a new diag.Message based on GatewayDuplicateCertificate.
func NewGatewayDuplicateCertificate(r *resource.Instance, gateways []string) diag.Message {
return diag.NewMessage(
GatewayDuplicateCertificate,
r,
gateways,
)
}
// NewInvalidWebhook returns a new diag.Message based on InvalidWebhook.
func NewInvalidWebhook(r *resource.Instance, error string) diag.Message {
return diag.NewMessage(
InvalidWebhook,
r,
error,
)
}
// NewIngressRouteRulesNotAffected returns a new diag.Message based on IngressRouteRulesNotAffected.
func NewIngressRouteRulesNotAffected(r *resource.Instance, virtualservicesubset string, virtualservice string) diag.Message {
return diag.NewMessage(
IngressRouteRulesNotAffected,
r,
virtualservicesubset,
virtualservice,
)
}
// NewInsufficientPermissions returns a new diag.Message based on InsufficientPermissions.
func NewInsufficientPermissions(r *resource.Instance, resource string, error string) diag.Message {
return diag.NewMessage(
InsufficientPermissions,
r,
resource,
error,
)
}
// NewUnsupportedKubernetesVersion returns a new diag.Message based on UnsupportedKubernetesVersion.
func NewUnsupportedKubernetesVersion(r *resource.Instance, version string, minimumVersion string) diag.Message {
return diag.NewMessage(
UnsupportedKubernetesVersion,
r,
version,
minimumVersion,
)
}
// NewLocalhostListener returns a new diag.Message based on LocalhostListener.
func NewLocalhostListener(r *resource.Instance, port string) diag.Message {
return diag.NewMessage(
LocalhostListener,
r,
port,
)
}
// NewInvalidApplicationUID returns a new diag.Message based on InvalidApplicationUID.
func NewInvalidApplicationUID(r *resource.Instance) diag.Message {
return diag.NewMessage(
InvalidApplicationUID,
r,
)
}
// NewConflictingGateways returns a new diag.Message based on ConflictingGateways.
func NewConflictingGateways(r *resource.Instance, gateway string, selector string, portnumber string, hosts string) diag.Message {
return diag.NewMessage(
ConflictingGateways,
r,
gateway,
selector,
portnumber,
hosts,
)
}
// NewImageAutoWithoutInjectionWarning returns a new diag.Message based on ImageAutoWithoutInjectionWarning.
func NewImageAutoWithoutInjectionWarning(r *resource.Instance, resourceType string, resourceName string) diag.Message {
return diag.NewMessage(
ImageAutoWithoutInjectionWarning,
r,
resourceType,
resourceName,
)
}
// NewImageAutoWithoutInjectionError returns a new diag.Message based on ImageAutoWithoutInjectionError.
func NewImageAutoWithoutInjectionError(r *resource.Instance, resourceType string, resourceName string) diag.Message {
return diag.NewMessage(
ImageAutoWithoutInjectionError,
r,
resourceType,
resourceName,
)
}
// NewNamespaceInjectionEnabledByDefault returns a new diag.Message based on NamespaceInjectionEnabledByDefault.
func NewNamespaceInjectionEnabledByDefault(r *resource.Instance) diag.Message {
return diag.NewMessage(
NamespaceInjectionEnabledByDefault,
r,
)
}
// NewJwtClaimBasedRoutingWithoutRequestAuthN returns a new diag.Message based on JwtClaimBasedRoutingWithoutRequestAuthN.
func NewJwtClaimBasedRoutingWithoutRequestAuthN(r *resource.Instance, key string, gateway string, pod string) diag.Message {
return diag.NewMessage(
JwtClaimBasedRoutingWithoutRequestAuthN,
r,
key,
gateway,
pod,
)
}
// NewExternalNameServiceTypeInvalidPortName returns a new diag.Message based on ExternalNameServiceTypeInvalidPortName.
func NewExternalNameServiceTypeInvalidPortName(r *resource.Instance) diag.Message {
return diag.NewMessage(
ExternalNameServiceTypeInvalidPortName,
r,
)
}
// NewEnvoyFilterUsesRelativeOperation returns a new diag.Message based on EnvoyFilterUsesRelativeOperation.
func NewEnvoyFilterUsesRelativeOperation(r *resource.Instance) diag.Message {
return diag.NewMessage(
EnvoyFilterUsesRelativeOperation,
r,
)
}
// NewEnvoyFilterUsesReplaceOperationIncorrectly returns a new diag.Message based on EnvoyFilterUsesReplaceOperationIncorrectly.
func NewEnvoyFilterUsesReplaceOperationIncorrectly(r *resource.Instance) diag.Message {
return diag.NewMessage(
EnvoyFilterUsesReplaceOperationIncorrectly,
r,
)
}
// NewEnvoyFilterUsesAddOperationIncorrectly returns a new diag.Message based on EnvoyFilterUsesAddOperationIncorrectly.
func NewEnvoyFilterUsesAddOperationIncorrectly(r *resource.Instance) diag.Message {
return diag.NewMessage(
EnvoyFilterUsesAddOperationIncorrectly,
r,
)
}
// NewEnvoyFilterUsesRemoveOperationIncorrectly returns a new diag.Message based on EnvoyFilterUsesRemoveOperationIncorrectly.
func NewEnvoyFilterUsesRemoveOperationIncorrectly(r *resource.Instance) diag.Message {
return diag.NewMessage(
EnvoyFilterUsesRemoveOperationIncorrectly,
r,
)
}
// NewEnvoyFilterUsesRelativeOperationWithProxyVersion returns a new diag.Message based on EnvoyFilterUsesRelativeOperationWithProxyVersion.
func NewEnvoyFilterUsesRelativeOperationWithProxyVersion(r *resource.Instance) diag.Message {
return diag.NewMessage(
EnvoyFilterUsesRelativeOperationWithProxyVersion,
r,
)
}
// NewUnsupportedGatewayAPIVersion returns a new diag.Message based on UnsupportedGatewayAPIVersion.
func NewUnsupportedGatewayAPIVersion(r *resource.Instance, version string, minimumVersion string) diag.Message {
return diag.NewMessage(
UnsupportedGatewayAPIVersion,
r,
version,
minimumVersion,
)
}
// NewInvalidTelemetryProvider returns a new diag.Message based on InvalidTelemetryProvider.
func NewInvalidTelemetryProvider(r *resource.Instance, name string, namespace string) diag.Message {
return diag.NewMessage(
InvalidTelemetryProvider,
r,
name,
namespace,
)
}
// NewPodsIstioProxyImageMismatchInNamespace returns a new diag.Message based on PodsIstioProxyImageMismatchInNamespace.
func NewPodsIstioProxyImageMismatchInNamespace(r *resource.Instance, podNames []string) diag.Message {
return diag.NewMessage(
PodsIstioProxyImageMismatchInNamespace,
r,
podNames,
)
}
// NewConflictingTelemetryWorkloadSelectors returns a new diag.Message based on ConflictingTelemetryWorkloadSelectors.
func NewConflictingTelemetryWorkloadSelectors(r *resource.Instance, conflictingTelemetries []string, namespace string, workloadPod string) diag.Message {
return diag.NewMessage(
ConflictingTelemetryWorkloadSelectors,
r,
conflictingTelemetries,
namespace,
workloadPod,
)
}
// NewMultipleTelemetriesWithoutWorkloadSelectors returns a new diag.Message based on MultipleTelemetriesWithoutWorkloadSelectors.
func NewMultipleTelemetriesWithoutWorkloadSelectors(r *resource.Instance, conflictingTelemetries []string, namespace string) diag.Message {
return diag.NewMessage(
MultipleTelemetriesWithoutWorkloadSelectors,
r,
conflictingTelemetries,
namespace,
)
}
// NewInvalidGatewayCredential returns a new diag.Message based on InvalidGatewayCredential.
func NewInvalidGatewayCredential(r *resource.Instance, gatewayName string, gatewayNamespace string) diag.Message {
return diag.NewMessage(
InvalidGatewayCredential,
r,
gatewayName,
gatewayNamespace,
)
}
// NewGatewayPortNotDefinedOnService returns a new diag.Message based on GatewayPortNotDefinedOnService.
func NewGatewayPortNotDefinedOnService(r *resource.Instance, port int, selector string) diag.Message {
return diag.NewMessage(
GatewayPortNotDefinedOnService,
r,
port,
selector,
)
}
// NewInvalidExternalControlPlaneConfig returns a new diag.Message based on InvalidExternalControlPlaneConfig.
func NewInvalidExternalControlPlaneConfig(r *resource.Instance, hostname string, webhook string, msg string) diag.Message {
return diag.NewMessage(
InvalidExternalControlPlaneConfig,
r,
hostname,
webhook,
msg,
)
}
// NewExternalControlPlaneAddressIsNotAHostname returns a new diag.Message based on ExternalControlPlaneAddressIsNotAHostname.
func NewExternalControlPlaneAddressIsNotAHostname(r *resource.Instance, hostname string, webhook string) diag.Message {
return diag.NewMessage(
ExternalControlPlaneAddressIsNotAHostname,
r,
hostname,
webhook,
)
}
// NewReferencedInternalGateway returns a new diag.Message based on ReferencedInternalGateway.
func NewReferencedInternalGateway(r *resource.Instance, virtualservice string, gateway string) diag.Message {
return diag.NewMessage(
ReferencedInternalGateway,
r,
virtualservice,
gateway,
)
}
// NewIneffectiveSelector returns a new diag.Message based on IneffectiveSelector.
func NewIneffectiveSelector(r *resource.Instance, gateway string) diag.Message {
return diag.NewMessage(
IneffectiveSelector,
r,
gateway,
)
}
// NewIneffectivePolicy returns a new diag.Message based on IneffectivePolicy.
func NewIneffectivePolicy(r *resource.Instance, reason string) diag.Message {
return diag.NewMessage(
IneffectivePolicy,
r,
reason,
)
}
// NewUnknownUpgradeCompatibility returns a new diag.Message based on UnknownUpgradeCompatibility.
func NewUnknownUpgradeCompatibility(r *resource.Instance, field string, release string, info string, compatVersion string) diag.Message {
return diag.NewMessage(
UnknownUpgradeCompatibility,
r,
field,
release,
info,
compatVersion,
)
}
// NewUpdateIncompatibility returns a new diag.Message based on UpdateIncompatibility.
func NewUpdateIncompatibility(r *resource.Instance, field string, release string, info string, compatVersion string) diag.Message {
return diag.NewMessage(
UpdateIncompatibility,
r,
field,
release,
info,
compatVersion,
)
}
/*
Copyright Istio Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"google.golang.org/protobuf/types/known/timestamppb"
mcp "istio.io/api/mcp/v1alpha1"
)
// Convert from model.Config, which has no associated proto, to MCP Resource proto.
// TODO: define a proto matching Config - to avoid useless superficial conversions.
func PilotConfigToResource(c *Config) (*mcp.Resource, error) {
r := &mcp.Resource{}
// MCP, K8S and Istio configs use gogo configs
// On the wire it's the same as golang proto.
a, err := ToProto(c.Spec)
if err != nil {
return nil, err
}
r.Body = a
r.Metadata = &mcp.Metadata{
Name: c.Namespace + "/" + c.Name,
CreateTime: timestamppb.New(c.CreationTimestamp),
Version: c.ResourceVersion,
Labels: c.Labels,
Annotations: c.Annotations,
}
return r, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gateway
import (
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config/protocol"
)
// IsTLSServer returns true if this server is non HTTP, but with some TLS settings for termination/passthrough
func IsTLSServer(server *v1alpha3.Server) bool {
// to filter out https redirect
if server.Tls != nil && !protocol.Parse(server.Port.Protocol).IsHTTP() {
return true
}
return false
}
// IsHTTPSServerWithTLSTermination returns true if the server is HTTPS with TLS termination
func IsHTTPSServerWithTLSTermination(server *v1alpha3.Server) bool {
if server.Tls != nil {
p := protocol.Parse(server.Port.Protocol)
if p == protocol.HTTPS && !IsPassThroughServer(server) {
return true
}
}
return false
}
// IsHTTPServer returns true if this server is using HTTP or HTTPS with termination
func IsHTTPServer(server *v1alpha3.Server) bool {
p := protocol.Parse(server.Port.Protocol)
if p.IsHTTP() {
return true
}
if p == protocol.HTTPS && !IsPassThroughServer(server) {
return true
}
return false
}
// IsEligibleForHTTP3Upgrade returns true if we can create an HTTP/3 server
// listening of QUIC for the given server. It must be a TLS non-passthrough
// as TLS is mandatory for QUIC
func IsEligibleForHTTP3Upgrade(server *v1alpha3.Server) bool {
if !features.EnableQUICListeners {
return false
}
p := protocol.Parse(server.Port.Protocol)
return p == protocol.HTTPS && !IsPassThroughServer(server)
}
// IsPassThroughServer returns true if this server does TLS passthrough (auto or manual)
func IsPassThroughServer(server *v1alpha3.Server) bool {
if server.Tls == nil {
return false
}
if server.Tls.Mode == v1alpha3.ServerTLSSettings_PASSTHROUGH ||
server.Tls.Mode == v1alpha3.ServerTLSSettings_AUTO_PASSTHROUGH {
return true
}
return false
}
// IsTCPServerWithTLSTermination returns true if this server is TCP(non-HTTP) server with some TLS settings for termination
func IsTCPServerWithTLSTermination(server *v1alpha3.Server) bool {
if !IsPassThroughServer(server) {
p := protocol.Parse(server.Port.Protocol)
if !p.IsHTTP() && !p.IsHTTPS() {
return true
}
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"fmt"
"strings"
"istio.io/istio/pkg/config/constants"
)
// IsInternalGatewayReference returns true if gatewayName is referencing the internal
// Istio Gateway corresponding to a Kubernetes Gateway API gateway.
func IsInternalGatewayReference(gatewayName string) bool {
parts := strings.SplitN(gatewayName, "/", 2)
if len(parts) == 2 {
gatewayName = parts[1]
}
return strings.Contains(gatewayName, fmt.Sprintf("-%s-", constants.KubernetesGatewayName))
}
// InternalGatewayName returns the name of the internal Istio Gateway corresponding to the
// specified gateway-api gateway and listener.
func InternalGatewayName(gwName, lName string) string {
return fmt.Sprintf("%s-%s-%s", gwName, constants.KubernetesGatewayName, lName)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package host
import (
"strings"
)
// Name describes a (possibly wildcarded) hostname
type Name string
// Matches returns true if this hostname overlaps with the other hostname. Names overlap if:
// - they're fully resolved (i.e. not wildcarded) and match exactly (i.e. an exact string match)
// - one or both are wildcarded (e.g. "*.foo.com"), in which case we use wildcard resolution rules
// to determine if n is covered by o or o is covered by n.
// e.g.:
//
// Name("foo.com").Matches("foo.com") = true
// Name("foo.com").Matches("bar.com") = false
// Name("*.com").Matches("foo.com") = true
// Name("bar.com").Matches("*.com") = true
// Name("*.foo.com").Matches("foo.com") = false
// Name("*").Matches("foo.com") = true
// Name("*").Matches("*.com") = true
func (n Name) Matches(o Name) bool {
hWildcard := n.IsWildCarded()
oWildcard := o.IsWildCarded()
if hWildcard {
if oWildcard {
// both n and o are wildcards
if len(n) < len(o) {
return strings.HasSuffix(string(o[1:]), string(n[1:]))
}
return strings.HasSuffix(string(n[1:]), string(o[1:]))
}
// only n is wildcard
return strings.HasSuffix(string(o), string(n[1:]))
}
if oWildcard {
// only o is wildcard
return strings.HasSuffix(string(n), string(o[1:]))
}
// both are non-wildcards, so do normal string comparison
return n == o
}
// SubsetOf returns true if this hostname is a valid subset of the other hostname. The semantics are
// the same as "Matches", but only in one direction (i.e., n is covered by o).
func (n Name) SubsetOf(o Name) bool {
hWildcard := n.IsWildCarded()
oWildcard := o.IsWildCarded()
if hWildcard {
if oWildcard {
// both n and o are wildcards
if len(n) < len(o) {
return false
}
return strings.HasSuffix(string(n[1:]), string(o[1:]))
}
// only n is wildcard
return false
}
if oWildcard {
// only o is wildcard
return strings.HasSuffix(string(n), string(o[1:]))
}
// both are non-wildcards, so do normal string comparison
return n == o
}
func (n Name) IsWildCarded() bool {
return len(n) > 0 && n[0] == '*'
}
func (n Name) String() string {
return string(n)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package host
import (
"sort"
"strings"
)
// Names is a collection of Name; it exists so it's easy to sort hostnames consistently across Istio.
// In a few locations we care about the order hostnames appear in Envoy config: primarily HTTP routes, but also in
// gateways, and for SNI. In those locations, we sort hostnames longest to shortest with wildcards last.
type Names []Name
// prove we implement the interface at compile time
var _ sort.Interface = Names{}
func (h Names) Len() int {
return len(h)
}
func (h Names) Less(i, j int) bool {
return MoreSpecific(h[i], h[j])
}
// MoreSpecific returns true if hostname a is more specific than b.
func MoreSpecific(a, b Name) bool {
if len(a) == 0 && len(b) == 0 {
return true // doesn't matter, they're both the empty string
}
// we sort longest to shortest, alphabetically, with wildcards last
ai, aj := string(a[0]) == "*", string(b[0]) == "*"
if ai && !aj {
// h[i] is a wildcard, but h[j] isn't; therefore h[j] < h[i]
return false
} else if !ai && aj {
// h[j] is a wildcard, but h[i] isn't; therefore h[i] < h[j]
return true
}
// they're either both wildcards, or both not; in either case we sort them longest to shortest, alphabetically
if len(a) == len(b) {
return a < b
}
return len(a) > len(b)
}
func (h Names) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h Names) Contains(host Name) bool {
for _, hHost := range h {
if hHost == host {
return true
}
}
return false
}
// Intersection returns the subset of host names that are covered by both h and other.
// e.g.:
//
// Names(["foo.com","bar.com"]).Intersection(Names(["*.com"])) = Names(["foo.com","bar.com"])
// Names(["foo.com","*.net"]).Intersection(Names(["*.com","bar.net"])) = Names(["foo.com","bar.net"])
// Names(["foo.com","*.net"]).Intersection(Names(["*.bar.net"])) = Names(["*.bar.net"])
// Names(["foo.com"]).Intersection(Names(["bar.com"])) = Names([])
// Names([]).Intersection(Names(["bar.com"]) = Names([])
func (h Names) Intersection(other Names) Names {
result := make(Names, 0, len(h))
for _, hHost := range h {
for _, oHost := range other {
if hHost.SubsetOf(oHost) {
if !result.Contains(hHost) {
result = append(result, hHost)
}
} else if oHost.SubsetOf(hHost) {
if !result.Contains(oHost) {
result = append(result, oHost)
}
}
}
}
return result
}
// NewNames converts a slice of host name strings to type Names.
func NewNames(hosts []string) Names {
result := make(Names, 0, len(hosts))
for _, host := range hosts {
result = append(result, Name(host))
}
return result
}
// NamesForNamespace returns the subset of hosts that are in the specified namespace.
// The list of hosts contains host names optionally qualified with namespace/ or */.
// If not qualified or qualified with *, the host name is considered to be in every namespace.
// e.g.:
// NamesForNamespace(["ns1/foo.com","ns2/bar.com"], "ns1") = Names(["foo.com"])
// NamesForNamespace(["ns1/foo.com","ns2/bar.com"], "ns3") = Names([])
// NamesForNamespace(["ns1/foo.com","*/bar.com"], "ns1") = Names(["foo.com","bar.com"])
// NamesForNamespace(["ns1/foo.com","*/bar.com"], "ns3") = Names(["bar.com"])
// NamesForNamespace(["foo.com","ns2/bar.com"], "ns2") = Names(["foo.com","bar.com"])
// NamesForNamespace(["foo.com","ns2/bar.com"], "ns3") = Names(["foo.com"])
func NamesForNamespace(hosts []string, namespace string) Names {
result := make(Names, 0, len(hosts))
for _, host := range hosts {
if strings.Contains(host, "/") {
parts := strings.Split(host, "/")
if parts[0] != namespace && parts[0] != "*" {
continue
}
// strip the namespace
host = parts[1]
}
result = append(result, Name(host))
}
return result
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"strings"
corev1 "k8s.io/api/core/v1"
"istio.io/istio/pkg/config/protocol"
)
const (
SMTP = 25
DNS = 53
MySQL = 3306
MongoDB = 27017
)
// Ports be skipped for protocol sniffing. Applications bound to these ports will be broken if
// protocol sniffing is enabled.
var wellKnownPorts = map[int32]struct{}{
SMTP: {},
DNS: {},
MySQL: {},
MongoDB: {},
}
var (
grpcWeb = string(protocol.GRPCWeb)
grpcWebLen = len(grpcWeb)
)
// ConvertProtocol from k8s protocol and port name
func ConvertProtocol(port int32, portName string, proto corev1.Protocol, appProto *string) protocol.Instance {
if proto == corev1.ProtocolUDP {
return protocol.UDP
}
// If application protocol is set, we will use that
// If not, use the port name
name := portName
if appProto != nil {
name = *appProto
// Kubernetes has a few AppProtocol specific standard names defined in the Service spec
// Handle these only for AppProtocol (name cannot have these values, anyways).
switch name {
// "http2 over cleartext", which is also what our HTTP2 port is
case "kubernetes.io/h2c":
return protocol.HTTP2
}
}
// Check if the port name prefix is "grpc-web". Need to do this before the general
// prefix check below, since it contains a hyphen.
if len(name) >= grpcWebLen && strings.EqualFold(name[:grpcWebLen], grpcWeb) {
return protocol.GRPCWeb
}
// Parse the port name to find the prefix, if any.
i := strings.IndexByte(name, '-')
if i >= 0 {
name = name[:i]
}
p := protocol.Parse(name)
if p == protocol.Unsupported {
// Make TCP as default protocol for well know ports if protocol is not specified.
if _, has := wellKnownPorts[port]; has {
return protocol.TCP
}
}
return p
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package labels
import (
"fmt"
"regexp"
"strings"
"github.com/hashicorp/go-multierror"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/slices"
)
const (
DNS1123LabelMaxLength = 63 // Public for testing only.
dns1123LabelFmt = "[a-zA-Z0-9](?:[-a-zA-Z0-9]*[a-zA-Z0-9])?"
// a wild-card prefix is an '*', a normal DNS1123 label with a leading '*' or '*-', or a normal DNS1123 label
wildcardPrefix = `(\*|(\*|\*-)?` + dns1123LabelFmt + `)`
// Using kubernetes requirement, a valid key must be a non-empty string consist
// of alphanumeric characters, '-', '_' or '.', and must start and end with an
// alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345'
qualifiedNameFmt = "(?:[A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]"
// In Kubernetes, label names can start with a DNS name followed by a '/':
dnsNamePrefixFmt = dns1123LabelFmt + `(?:\.` + dns1123LabelFmt + `)*/`
dnsNamePrefixMaxLength = 253
)
var (
tagRegexp = regexp.MustCompile("^(" + dnsNamePrefixFmt + ")?(" + qualifiedNameFmt + ")$") // label value can be an empty string
labelValueRegexp = regexp.MustCompile("^" + "(" + qualifiedNameFmt + ")?" + "$")
dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$")
wildcardPrefixRegexp = regexp.MustCompile("^" + wildcardPrefix + "$")
)
// Instance is a non empty map of arbitrary strings. Each version of a service can
// be differentiated by a unique set of labels associated with the version. These
// labels are assigned to all instances of a particular service version. For
// example, lets say catalog.mystore.com has 2 versions v1 and v2. v1 instances
// could have labels gitCommit=aeiou234, region=us-east, while v2 instances could
// have labels name=kittyCat,region=us-east.
type Instance map[string]string
// SubsetOf is true if the label has same values for the keys
func (i Instance) SubsetOf(that Instance) bool {
if len(i) == 0 {
return true
}
if len(that) == 0 || len(that) < len(i) {
return false
}
for k, v1 := range i {
if v2, ok := that[k]; !ok || v1 != v2 {
return false
}
}
return true
}
// Match is true if the label has same values for the keys.
// if len(i) == 0, will return false. It is mainly used for service -> workload
func (i Instance) Match(that Instance) bool {
if len(i) == 0 {
return false
}
return i.SubsetOf(that)
}
// Equals returns true if the labels are equal.
func (i Instance) Equals(that Instance) bool {
return maps.Equal(i, that)
}
// Validate ensures tag is well-formed
func (i Instance) Validate() error {
if i == nil {
return nil
}
var errs error
for k, v := range i {
if err := validateTagKey(k); err != nil {
errs = multierror.Append(errs, err)
}
if !labelValueRegexp.MatchString(v) {
errs = multierror.Append(errs, fmt.Errorf("invalid tag value: %q", v))
}
}
return errs
}
// IsDNS1123Label tests for a string that conforms to the definition of a label in
// DNS (RFC 1123).
func IsDNS1123Label(value string) bool {
return len(value) <= DNS1123LabelMaxLength && dns1123LabelRegexp.MatchString(value)
}
// IsWildcardDNS1123Label tests for a string that conforms to the definition of a label in DNS (RFC 1123), but allows
// the wildcard label (`*`), and typical labels with a leading astrisk instead of alphabetic character (e.g. "*-foo")
func IsWildcardDNS1123Label(value string) bool {
return len(value) <= DNS1123LabelMaxLength && wildcardPrefixRegexp.MatchString(value)
}
// validateTagKey checks that a string is valid as a Kubernetes label name.
func validateTagKey(k string) error {
match := tagRegexp.FindStringSubmatch(k)
if match == nil {
return fmt.Errorf("invalid tag key: %q", k)
}
if len(match[1]) > 0 {
dnsPrefixLength := len(match[1]) - 1 // exclude the trailing / from the length
if dnsPrefixLength > dnsNamePrefixMaxLength {
return fmt.Errorf("invalid tag key: %q (DNS prefix is too long)", k)
}
}
if len(match[2]) > DNS1123LabelMaxLength {
return fmt.Errorf("invalid tag key: %q (name is too long)", k)
}
return nil
}
func (i Instance) String() string {
// Ensure stable ordering
keys := slices.Sort(maps.Keys(i))
var buffer strings.Builder
// Assume each kv pair is roughly 25 characters. We could be under or over, this is just a guess to optimize
buffer.Grow(len(keys) * 25)
first := true
for _, k := range keys {
v := i[k]
if !first {
buffer.WriteString(",")
} else {
first = false
}
if len(v) > 0 {
buffer.WriteString(k + "=" + v)
} else {
buffer.WriteString(k)
}
}
return buffer.String()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubemesh
import (
"fmt"
v1 "k8s.io/api/core/v1"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/watcher/configmapwatcher"
"istio.io/istio/pkg/log"
)
// NewConfigMapWatcher creates a new Watcher for changes to the given ConfigMap.
func NewConfigMapWatcher(client kube.Client, namespace, name, key string, multiWatch bool, stop <-chan struct{}) *mesh.MultiWatcher {
w := mesh.NewMultiWatcher(mesh.DefaultMeshConfig())
c := configmapwatcher.NewController(client, namespace, name, func(cm *v1.ConfigMap) {
meshNetworks, err := ReadNetworksConfigMap(cm, "meshNetworks")
if err != nil {
// Keep the last known config in case there's a misconfiguration issue.
log.Errorf("failed to read meshNetworks config from ConfigMap: %v", err)
return
}
if meshNetworks != nil {
w.SetNetworks(meshNetworks)
}
if multiWatch {
meshConfig := meshConfigMapData(cm, key)
w.HandleMeshConfigData(meshConfig)
return
}
// Original behavior - just per-revision config
meshConfig, err := ReadConfigMap(cm, key)
if err != nil {
// Keep the last known config in case there's a misconfiguration issue.
log.Errorf("failed to read mesh config from ConfigMap: %v", err)
return
}
w.HandleMeshConfig(meshConfig)
})
go c.Run(stop)
// Ensure the ConfigMap is initially loaded if present.
if !client.WaitForCacheSync("configmap watcher", stop, c.HasSynced) {
log.Error("failed to wait for cache sync")
}
return w
}
func AddUserMeshConfig(client kube.Client, watcher mesh.Watcher, namespace, key, userMeshConfig string, stop <-chan struct{}) {
c := configmapwatcher.NewController(client, namespace, userMeshConfig, func(cm *v1.ConfigMap) {
meshConfig := meshConfigMapData(cm, key)
watcher.HandleUserMeshConfig(meshConfig)
})
go c.Run(stop)
if !client.WaitForCacheSync("user mesh config", stop, c.HasSynced) {
log.Error("failed to wait for cache sync")
}
}
func meshConfigMapData(cm *v1.ConfigMap, key string) string {
if cm == nil {
return ""
}
cfgYaml, exists := cm.Data[key]
if !exists {
return ""
}
return cfgYaml
}
func ReadConfigMap(cm *v1.ConfigMap, key string) (*meshconfig.MeshConfig, error) {
if cm == nil {
log.Info("no ConfigMap found, using default MeshConfig config")
return mesh.DefaultMeshConfig(), nil
}
cfgYaml, exists := cm.Data[key]
if !exists {
return nil, fmt.Errorf("missing ConfigMap key %q", key)
}
meshConfig, err := mesh.ApplyMeshConfigDefaults(cfgYaml)
if err != nil {
return nil, fmt.Errorf("failed reading MeshConfig config: %v. YAML:\n%s", err, cfgYaml)
}
log.Info("Loaded MeshConfig config from Kubernetes API server.")
return meshConfig, nil
}
func ReadNetworksConfigMap(cm *v1.ConfigMap, key string) (*meshconfig.MeshNetworks, error) {
if cm == nil {
log.Info("no ConfigMap found, using existing MeshNetworks config")
return nil, nil
}
cfgYaml, exists := cm.Data[key]
if !exists {
return nil, nil
}
meshNetworks, err := mesh.ParseMeshNetworks(cfgYaml)
if err != nil {
return nil, fmt.Errorf("failed reading MeshNetworks config: %v. YAML:\n%s", err, cfgYaml)
}
log.Info("Loaded MeshNetworks config from Kubernetes API server.")
return meshNetworks, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"fmt"
"os"
"time"
"github.com/hashicorp/go-multierror"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/durationpb"
wrappers "google.golang.org/protobuf/types/known/wrapperspb"
"sigs.k8s.io/yaml"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/validation"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/ptr"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
)
// DefaultProxyConfig for individual proxies
func DefaultProxyConfig() *meshconfig.ProxyConfig {
// TODO: include revision based on REVISION env
// TODO: set default namespace based on POD_NAMESPACE env
return &meshconfig.ProxyConfig{
ConfigPath: constants.ConfigPathDir,
ClusterName: &meshconfig.ProxyConfig_ServiceCluster{ServiceCluster: constants.ServiceClusterName},
DrainDuration: durationpb.New(45 * time.Second),
TerminationDrainDuration: durationpb.New(5 * time.Second),
ProxyAdminPort: 15000,
ControlPlaneAuthPolicy: meshconfig.AuthenticationPolicy_MUTUAL_TLS,
DiscoveryAddress: "istiod.istio-system.svc:15012",
Tracing: &meshconfig.Tracing{
Tracer: &meshconfig.Tracing_Zipkin_{
Zipkin: &meshconfig.Tracing_Zipkin{
Address: "zipkin.istio-system:9411",
},
},
},
// Code defaults
BinaryPath: constants.BinaryPathFilename,
StatNameLength: 189,
StatusPort: 15020,
}
}
// DefaultMeshNetworks returns a default meshnetworks configuration.
// By default, it is empty.
func DefaultMeshNetworks() *meshconfig.MeshNetworks {
return ptr.Of(EmptyMeshNetworks())
}
// DefaultMeshConfig returns the default mesh config.
// This is merged with values from the mesh config map.
func DefaultMeshConfig() *meshconfig.MeshConfig {
proxyConfig := DefaultProxyConfig()
// Defaults matching the standard install
// order matches the generated mesh config.
return &meshconfig.MeshConfig{
EnableTracing: true,
AccessLogFile: "",
AccessLogEncoding: meshconfig.MeshConfig_TEXT,
AccessLogFormat: "",
EnableEnvoyAccessLogService: false,
ProtocolDetectionTimeout: durationpb.New(0),
IngressService: "istio-ingressgateway",
IngressControllerMode: meshconfig.MeshConfig_STRICT,
IngressClass: "istio",
TrustDomain: constants.DefaultClusterLocalDomain,
TrustDomainAliases: []string{},
EnableAutoMtls: wrappers.Bool(true),
OutboundTrafficPolicy: &meshconfig.MeshConfig_OutboundTrafficPolicy{Mode: meshconfig.MeshConfig_OutboundTrafficPolicy_ALLOW_ANY},
InboundTrafficPolicy: &meshconfig.MeshConfig_InboundTrafficPolicy{Mode: meshconfig.MeshConfig_InboundTrafficPolicy_PASSTHROUGH},
LocalityLbSetting: &v1alpha3.LocalityLoadBalancerSetting{
Enabled: wrappers.Bool(true),
},
Certificates: []*meshconfig.Certificate{},
DefaultConfig: proxyConfig,
RootNamespace: constants.IstioSystemNamespace,
ProxyListenPort: 15001,
ProxyInboundListenPort: 15006,
ConnectTimeout: durationpb.New(10 * time.Second),
DefaultServiceExportTo: []string{"*"},
DefaultVirtualServiceExportTo: []string{"*"},
DefaultDestinationRuleExportTo: []string{"*"},
// DnsRefreshRate is only used when DNS requests fail (NXDOMAIN or SERVFAIL). For success, the TTL
// will be used.
// https://datatracker.ietf.org/doc/html/rfc2308#section-3 defines how negative DNS results should handle TTLs,
// but Envoy does not respect this (https://github.com/envoyproxy/envoy/issues/20885).
// To counter this, we bump up the default to 60s to avoid overloading DNS servers.
DnsRefreshRate: durationpb.New(60 * time.Second),
ServiceSettings: make([]*meshconfig.MeshConfig_ServiceSettings, 0),
EnablePrometheusMerge: wrappers.Bool(true),
DefaultProviders: &meshconfig.MeshConfig_DefaultProviders{},
ExtensionProviders: []*meshconfig.MeshConfig_ExtensionProvider{
{
Name: "prometheus",
Provider: &meshconfig.MeshConfig_ExtensionProvider_Prometheus{
Prometheus: &meshconfig.MeshConfig_ExtensionProvider_PrometheusMetricsProvider{},
},
},
{
Name: "stackdriver",
Provider: &meshconfig.MeshConfig_ExtensionProvider_Stackdriver{
Stackdriver: &meshconfig.MeshConfig_ExtensionProvider_StackdriverProvider{},
},
},
{
Name: "envoy",
Provider: &meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLog{
EnvoyFileAccessLog: &meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLogProvider{
Path: "/dev/stdout",
},
},
},
},
}
}
// ApplyProxyConfig applies the give proxy config yaml to a mesh config object. The passed in mesh config
// will not be modified.
func ApplyProxyConfig(yaml string, meshConfig *meshconfig.MeshConfig) (*meshconfig.MeshConfig, error) {
mc := proto.Clone(meshConfig).(*meshconfig.MeshConfig)
pc, err := MergeProxyConfig(yaml, mc.DefaultConfig)
if err != nil {
return nil, err
}
mc.DefaultConfig = pc
return mc, nil
}
// MergeProxyConfig merges the given proxy config yaml with the given proxy config object.
func MergeProxyConfig(yaml string, proxyConfig *meshconfig.ProxyConfig) (*meshconfig.ProxyConfig, error) {
origMetadata := proxyConfig.ProxyMetadata
origProxyHeaders := proxyConfig.ProxyHeaders
if err := protomarshal.ApplyYAML(yaml, proxyConfig); err != nil {
return nil, fmt.Errorf("could not parse proxy config: %v", err)
}
newMetadata := proxyConfig.ProxyMetadata
proxyConfig.ProxyMetadata = mergeMap(origMetadata, newMetadata)
correctProxyHeaders(proxyConfig, origProxyHeaders)
return proxyConfig, nil
}
func correctProxyHeaders(proxyConfig *meshconfig.ProxyConfig, orig *meshconfig.ProxyConfig_ProxyHeaders) {
ph := proxyConfig.ProxyHeaders
if ph != nil && orig != nil {
ph.ForwardedClientCert = ptr.NonEmptyOrDefault(ph.ForwardedClientCert, orig.ForwardedClientCert)
ph.RequestId = ptr.NonEmptyOrDefault(ph.RequestId, orig.RequestId)
ph.AttemptCount = ptr.NonEmptyOrDefault(ph.AttemptCount, orig.AttemptCount)
ph.Server = ptr.NonEmptyOrDefault(ph.Server, orig.Server)
ph.EnvoyDebugHeaders = ptr.NonEmptyOrDefault(ph.EnvoyDebugHeaders, orig.EnvoyDebugHeaders)
}
}
func extractYamlField(key string, mp map[string]any) (string, error) {
proxyConfig := mp[key]
if proxyConfig == nil {
return "", nil
}
bytes, err := yaml.Marshal(proxyConfig)
if err != nil {
return "", err
}
return string(bytes), nil
}
func toMap(yamlText string) (map[string]any, error) {
mp := map[string]any{}
if err := yaml.Unmarshal([]byte(yamlText), &mp); err != nil {
return nil, err
}
return mp, nil
}
// ApplyMeshConfig returns a new MeshConfig decoded from the
// input YAML with the provided defaults applied to omitted configuration values.
func ApplyMeshConfig(yaml string, defaultConfig *meshconfig.MeshConfig) (*meshconfig.MeshConfig, error) {
// We want to keep semantics that all fields are overrides, except proxy config is a merge. This allows
// decent customization while also not requiring users to redefine the entire proxy config if they want to override
// Note: if we want to add more structure in the future, we will likely need to revisit this idea.
// Store the current set proxy config so we don't wipe it out, we will configure this later
prevProxyConfig := defaultConfig.DefaultConfig
prevDefaultProvider := defaultConfig.DefaultProviders
prevExtensionProviders := defaultConfig.ExtensionProviders
prevTrustDomainAliases := defaultConfig.TrustDomainAliases
defaultConfig.DefaultConfig = DefaultProxyConfig()
if err := protomarshal.ApplyYAML(yaml, defaultConfig); err != nil {
return nil, multierror.Prefix(err, "failed to convert to proto.")
}
defaultConfig.DefaultConfig = prevProxyConfig
raw, err := toMap(yaml)
if err != nil {
return nil, err
}
// Get just the proxy config yaml
pc, err := extractYamlField("defaultConfig", raw)
if err != nil {
return nil, multierror.Prefix(err, "failed to extract proxy config")
}
if pc != "" {
pc, err := MergeProxyConfig(pc, defaultConfig.DefaultConfig)
if err != nil {
return nil, err
}
defaultConfig.DefaultConfig = pc
}
defaultConfig.DefaultProviders = prevDefaultProvider
dp, err := extractYamlField("defaultProviders", raw)
if err != nil {
return nil, multierror.Prefix(err, "failed to extract default providers")
}
if dp != "" {
if err := protomarshal.ApplyYAML(dp, defaultConfig.DefaultProviders); err != nil {
return nil, fmt.Errorf("could not parse default providers: %v", err)
}
}
newExtensionProviders := defaultConfig.ExtensionProviders
defaultConfig.ExtensionProviders = prevExtensionProviders
for _, p := range newExtensionProviders {
found := false
for _, e := range defaultConfig.ExtensionProviders {
if p.Name == e.Name {
e.Provider = p.Provider
found = true
break
}
}
if !found {
defaultConfig.ExtensionProviders = append(defaultConfig.ExtensionProviders, p)
}
}
defaultConfig.TrustDomainAliases = sets.SortedList(sets.New(append(defaultConfig.TrustDomainAliases, prevTrustDomainAliases...)...))
warn, err := validation.ValidateMeshConfig(defaultConfig)
if err != nil {
return nil, err
}
if warn != nil {
log.Warnf("warnings occurred during mesh validation: %v", warn)
}
return defaultConfig, nil
}
func mergeMap(original map[string]string, merger map[string]string) map[string]string {
if original == nil && merger == nil {
return nil
}
if original == nil {
original = map[string]string{}
}
for k, v := range merger {
original[k] = v
}
return original
}
// ApplyMeshConfigDefaults returns a new MeshConfig decoded from the
// input YAML with defaults applied to omitted configuration values.
func ApplyMeshConfigDefaults(yaml string) (*meshconfig.MeshConfig, error) {
return ApplyMeshConfig(yaml, DefaultMeshConfig())
}
func DeepCopyMeshConfig(mc *meshconfig.MeshConfig) (*meshconfig.MeshConfig, error) {
j, err := protomarshal.ToJSON(mc)
if err != nil {
return nil, err
}
nmc := &meshconfig.MeshConfig{}
if err := protomarshal.ApplyJSON(j, nmc); err != nil {
return nil, err
}
return nmc, nil
}
// EmptyMeshNetworks configuration with no networks
func EmptyMeshNetworks() meshconfig.MeshNetworks {
return meshconfig.MeshNetworks{
Networks: map[string]*meshconfig.Network{},
}
}
// ParseMeshNetworks returns a new MeshNetworks decoded from the
// input YAML.
func ParseMeshNetworks(yaml string) (*meshconfig.MeshNetworks, error) {
out := EmptyMeshNetworks()
if err := protomarshal.ApplyYAML(yaml, &out); err != nil {
return nil, multierror.Prefix(err, "failed to convert to proto.")
}
if err := validation.ValidateMeshNetworks(&out); err != nil {
return nil, err
}
return &out, nil
}
// ReadMeshNetworks gets mesh networks configuration from a config file
func ReadMeshNetworks(filename string) (*meshconfig.MeshNetworks, error) {
yaml, err := os.ReadFile(filename)
if err != nil {
return nil, multierror.Prefix(err, "cannot read networks config file")
}
return ParseMeshNetworks(string(yaml))
}
// ReadMeshConfig gets mesh configuration from a config file
func ReadMeshConfig(filename string) (*meshconfig.MeshConfig, error) {
yaml, err := os.ReadFile(filename)
if err != nil {
return nil, multierror.Prefix(err, "cannot read mesh config file")
}
return ApplyMeshConfigDefaults(string(yaml))
}
// ReadMeshConfigData gets mesh configuration yaml from a config file
func ReadMeshConfigData(filename string) (string, error) {
yaml, err := os.ReadFile(filename)
if err != nil {
return "", multierror.Prefix(err, "cannot read mesh config file")
}
return string(yaml), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"fmt"
"reflect"
"sync"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pkg/filewatcher"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/protomarshal"
)
// NetworksHolder is a holder of a mesh networks configuration.
type NetworksHolder interface {
SetNetworks(*meshconfig.MeshNetworks)
Networks() *meshconfig.MeshNetworks
PrevNetworks() *meshconfig.MeshNetworks
}
// WatcherHandlerRegistration will be returned to caller to remove the handler later.
type WatcherHandlerRegistration struct {
handler func()
}
// NetworksWatcher watches changes to the mesh networks config.
type NetworksWatcher interface {
NetworksHolder
// AddNetworksHandler registers a callback handler for changes to the networks config.
AddNetworksHandler(func()) *WatcherHandlerRegistration
// DeleteNetworksHandler unregisters a callback handler when remote cluster is removed.
DeleteNetworksHandler(registration *WatcherHandlerRegistration)
}
var _ NetworksWatcher = &internalNetworkWatcher{}
type internalNetworkWatcher struct {
mutex sync.RWMutex
handlers []*WatcherHandlerRegistration
networks *meshconfig.MeshNetworks
prevNetworks *meshconfig.MeshNetworks
}
// NewFixedNetworksWatcher creates a new NetworksWatcher that always returns the given config.
// It will never fire any events, since the config never changes.
func NewFixedNetworksWatcher(networks *meshconfig.MeshNetworks) NetworksWatcher {
return &internalNetworkWatcher{
networks: networks,
}
}
// NewNetworksWatcher creates a new watcher for changes to the given networks config file.
func NewNetworksWatcher(fileWatcher filewatcher.FileWatcher, filename string) (NetworksWatcher, error) {
meshNetworks, err := ReadMeshNetworks(filename)
if err != nil {
return nil, fmt.Errorf("failed to read mesh networks configuration from %q: %v", filename, err)
}
networksdump, _ := protomarshal.ToJSONWithIndent(meshNetworks, " ")
log.Infof("mesh networks configuration: %s", networksdump)
w := &internalNetworkWatcher{
networks: meshNetworks,
}
// Watch the networks config file for changes and reload if it got modified
addFileWatcher(fileWatcher, filename, func() {
// Reload the config file
meshNetworks, err := ReadMeshNetworks(filename)
if err != nil {
log.Warnf("failed to read mesh networks configuration from %q: %v", filename, err)
return
}
w.SetNetworks(meshNetworks)
})
return w, nil
}
// Networks returns the latest network configuration for the mesh.
func (w *internalNetworkWatcher) Networks() *meshconfig.MeshNetworks {
if w == nil {
return nil
}
w.mutex.RLock()
defer w.mutex.RUnlock()
return w.networks
}
// PrevNetworks returns the previous network configuration for the mesh.
func (w *internalNetworkWatcher) PrevNetworks() *meshconfig.MeshNetworks {
if w == nil {
return nil
}
w.mutex.RLock()
defer w.mutex.RUnlock()
return w.prevNetworks
}
// SetNetworks will use the given value for mesh networks and notify all handlers of the change
func (w *internalNetworkWatcher) SetNetworks(meshNetworks *meshconfig.MeshNetworks) {
var handlers []*WatcherHandlerRegistration
w.mutex.Lock()
if !reflect.DeepEqual(meshNetworks, w.networks) {
networksdump, _ := protomarshal.ToJSONWithIndent(meshNetworks, " ")
log.Infof("mesh networks configuration updated to: %s", networksdump)
// Store the new config.
w.prevNetworks = w.networks
w.networks = meshNetworks
handlers = append([]*WatcherHandlerRegistration{}, w.handlers...)
}
w.mutex.Unlock()
// Notify the handlers of the change.
for _, h := range handlers {
h.handler()
}
}
// AddNetworksHandler registers a callback handler for changes to the mesh network config.
func (w *internalNetworkWatcher) AddNetworksHandler(h func()) *WatcherHandlerRegistration {
w.mutex.Lock()
defer w.mutex.Unlock()
handler := &WatcherHandlerRegistration{
handler: h,
}
w.handlers = append(w.handlers, handler)
return handler
}
// DeleteNetworksHandler deregister a callback handler for changes to the mesh network config.
func (w *internalNetworkWatcher) DeleteNetworksHandler(registration *WatcherHandlerRegistration) {
if registration == nil {
return
}
w.mutex.Lock()
defer w.mutex.Unlock()
if len(w.handlers) == 0 {
return
}
w.handlers = slices.FilterInPlace(w.handlers, func(handler *WatcherHandlerRegistration) bool {
return handler != registration
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"reflect"
"sync"
"sync/atomic"
"time"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pkg/filewatcher"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/protomarshal"
)
// Holder of a mesh configuration.
type Holder interface {
Mesh() *meshconfig.MeshConfig
}
// Watcher is a Holder whose mesh config can be updated asynchronously.
type Watcher interface {
Holder
// AddMeshHandler registers a callback handler for changes to the mesh config.
AddMeshHandler(h func()) *WatcherHandlerRegistration
// DeleteMeshHandler unregisters a callback handler when remote cluster is removed.
DeleteMeshHandler(registration *WatcherHandlerRegistration)
// HandleUserMeshConfig keeps track of user mesh config overrides. These are merged with the standard
// mesh config, which takes precedence.
HandleUserMeshConfig(string)
}
// MultiWatcher is a struct wrapping the internal injector to let users know that both
type MultiWatcher struct {
*internalWatcher
internalNetworkWatcher
}
func NewMultiWatcher(config *meshconfig.MeshConfig) *MultiWatcher {
iw := &internalWatcher{}
iw.MeshConfig.Store(config)
return &MultiWatcher{
internalWatcher: iw,
}
}
var _ Watcher = &internalWatcher{}
type internalWatcher struct {
mutex sync.Mutex
handlers []*WatcherHandlerRegistration
// Current merged mesh config
MeshConfig atomic.Pointer[meshconfig.MeshConfig]
userMeshConfig string
revMeshConfig string
}
// NewFixedWatcher creates a new Watcher that always returns the given mesh config. It will never
// fire any events, since the config never changes.
func NewFixedWatcher(mesh *meshconfig.MeshConfig) Watcher {
iw := internalWatcher{}
iw.MeshConfig.Store(mesh)
return &iw
}
// NewFileWatcher creates a new Watcher for changes to the given mesh config file. Returns an error
// if the given file does not exist or failed during parsing.
func NewFileWatcher(fileWatcher filewatcher.FileWatcher, filename string, multiWatch bool) (Watcher, error) {
meshConfigYaml, err := ReadMeshConfigData(filename)
if err != nil {
return nil, err
}
meshConfig, err := ApplyMeshConfigDefaults(meshConfigYaml)
if err != nil {
return nil, err
}
w := &internalWatcher{
revMeshConfig: meshConfigYaml,
}
w.MeshConfig.Store(meshConfig)
// Watch the config file for changes and reload if it got modified
addFileWatcher(fileWatcher, filename, func() {
if multiWatch {
meshConfig, err := ReadMeshConfigData(filename)
if err != nil {
log.Warnf("failed to read mesh configuration, using default: %v", err)
return
}
w.HandleMeshConfigData(meshConfig)
return
}
// Reload the config file
meshConfig, err = ReadMeshConfig(filename)
if err != nil {
log.Warnf("failed to read mesh configuration, using default: %v", err)
return
}
w.HandleMeshConfig(meshConfig)
})
return w, nil
}
// Mesh returns the latest mesh config.
func (w *internalWatcher) Mesh() *meshconfig.MeshConfig {
return w.MeshConfig.Load()
}
// AddMeshHandler registers a callback handler for changes to the mesh config.
func (w *internalWatcher) AddMeshHandler(h func()) *WatcherHandlerRegistration {
w.mutex.Lock()
defer w.mutex.Unlock()
handler := &WatcherHandlerRegistration{
handler: h,
}
w.handlers = append(w.handlers, handler)
return handler
}
func (w *internalWatcher) DeleteMeshHandler(registration *WatcherHandlerRegistration) {
w.mutex.Lock()
defer w.mutex.Unlock()
if len(w.handlers) == 0 {
return
}
w.handlers = slices.FilterInPlace(w.handlers, func(handler *WatcherHandlerRegistration) bool {
return handler != registration
})
}
// HandleMeshConfigData keeps track of the standard mesh config. These are merged with the user
// mesh config, but takes precedence.
func (w *internalWatcher) HandleMeshConfigData(yaml string) {
w.mutex.Lock()
defer w.mutex.Unlock()
w.revMeshConfig = yaml
merged := w.merged()
w.handleMeshConfigInternal(merged)
}
// HandleUserMeshConfig keeps track of user mesh config overrides. These are merged with the standard
// mesh config, which takes precedence.
func (w *internalWatcher) HandleUserMeshConfig(yaml string) {
w.mutex.Lock()
defer w.mutex.Unlock()
w.userMeshConfig = yaml
merged := w.merged()
w.handleMeshConfigInternal(merged)
}
// merged returns the merged user and revision config.
func (w *internalWatcher) merged() *meshconfig.MeshConfig {
mc := DefaultMeshConfig()
if w.userMeshConfig != "" {
mc1, err := ApplyMeshConfig(w.userMeshConfig, mc)
if err != nil {
log.Errorf("user config invalid, ignoring it %v %s", err, w.userMeshConfig)
} else {
mc = mc1
log.Infof("Applied user config: %s", PrettyFormatOfMeshConfig(mc))
}
}
if w.revMeshConfig != "" {
mc1, err := ApplyMeshConfig(w.revMeshConfig, mc)
if err != nil {
log.Errorf("revision config invalid, ignoring it %v %s", err, w.userMeshConfig)
} else {
mc = mc1
log.Infof("Applied revision mesh config: %s", PrettyFormatOfMeshConfig(mc))
}
}
return mc
}
// HandleMeshConfig calls all handlers for a given mesh configuration update. This must be called
// with a lock on w.Mutex, or updates may be applied out of order.
func (w *internalWatcher) HandleMeshConfig(meshConfig *meshconfig.MeshConfig) {
w.mutex.Lock()
defer w.mutex.Unlock()
w.handleMeshConfigInternal(meshConfig)
}
// handleMeshConfigInternal behaves the same as HandleMeshConfig but must be called under a lock
func (w *internalWatcher) handleMeshConfigInternal(meshConfig *meshconfig.MeshConfig) {
var handlers []*WatcherHandlerRegistration
current := w.MeshConfig.Load()
if !reflect.DeepEqual(meshConfig, current) {
log.Infof("mesh configuration updated to: %s", PrettyFormatOfMeshConfig(meshConfig))
if !reflect.DeepEqual(meshConfig.ConfigSources, current.ConfigSources) {
log.Info("mesh configuration sources have changed")
// TODO Need to recreate or reload initConfigController()
}
w.MeshConfig.Store(meshConfig)
handlers = append(handlers, w.handlers...)
}
// TODO hack: the first handler added is the ConfigPush, other handlers affect what will be pushed, so reversing iteration
for i := len(handlers) - 1; i >= 0; i-- {
handlers[i].handler()
}
}
// Add to the FileWatcher the provided file and execute the provided function
// on any change event for this file.
// Using a debouncing mechanism to avoid calling the callback multiple times
// per event.
func addFileWatcher(fileWatcher filewatcher.FileWatcher, file string, callback func()) {
_ = fileWatcher.Add(file)
go func() {
var timerC <-chan time.Time
for {
select {
case <-timerC:
timerC = nil
callback()
case <-fileWatcher.Events(file):
// Use a timer to debounce configuration updates
if timerC == nil {
timerC = time.After(100 * time.Millisecond)
}
}
}
}()
}
func PrettyFormatOfMeshConfig(meshConfig *meshconfig.MeshConfig) string {
meshConfigDump, _ := protomarshal.ToJSONWithIndent(meshConfig, " ")
return meshConfigDump
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mesh
import (
"errors"
"time"
meshconfig "istio.io/api/mesh/v1alpha1"
)
// only used for testing, exposes a blocking Update method that allows test environments to trigger meshConfig updates
type TestWatcher struct {
internalWatcher
doneCh chan struct{} // used to implement a blocking Update method
}
func NewTestWatcher(meshConfig *meshconfig.MeshConfig) *TestWatcher {
w := &TestWatcher{
internalWatcher: internalWatcher{},
}
w.internalWatcher.MeshConfig.Store(meshConfig)
w.doneCh = make(chan struct{}, 1)
w.AddMeshHandler(func() {
w.doneCh <- struct{}{}
})
return w
}
// blocks until watcher handlers trigger
func (t *TestWatcher) Update(meshConfig *meshconfig.MeshConfig, timeout time.Duration) error {
t.HandleMeshConfig(meshConfig)
select {
case <-t.doneCh:
return nil
case <-time.After(timeout):
return errors.New("timed out waiting for mesh.Watcher handler to trigger")
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
"time"
gogojsonpb "github.com/gogo/protobuf/jsonpb" // nolint: depguard
customBytes "github.com/AdamKorcz/bugdetectors/bytes"
gogoproto "github.com/gogo/protobuf/proto" // nolint: depguard
gogotypes "github.com/gogo/protobuf/types" // nolint: depguard
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/structpb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
kubetypes "k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/yaml"
"istio.io/api/label"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/util/gogoprotomarshal"
"istio.io/istio/pkg/util/protomarshal"
)
// Meta is metadata attached to each configuration unit.
// The revision is optional, and if provided, identifies the
// last update operation on the object.
type Meta struct {
// GroupVersionKind is a short configuration name that matches the content message type
// (e.g. "route-rule")
GroupVersionKind GroupVersionKind `json:"type,omitempty"`
// UID
UID string `json:"uid,omitempty"`
// Name is a unique immutable identifier in a namespace
Name string `json:"name,omitempty"`
// Namespace defines the space for names (optional for some types),
// applications may choose to use namespaces for a variety of purposes
// (security domains, fault domains, organizational domains)
Namespace string `json:"namespace,omitempty"`
// Domain defines the suffix of the fully qualified name past the namespace.
// Domain is not a part of the unique key unlike name and namespace.
Domain string `json:"domain,omitempty"`
// Map of string keys and values that can be used to organize and categorize
// (scope and select) objects.
Labels map[string]string `json:"labels,omitempty"`
// Annotations is an unstructured key value map stored with a resource that may be
// set by external tools to store and retrieve arbitrary metadata. They are not
// queryable and should be preserved when modifying objects.
Annotations map[string]string `json:"annotations,omitempty"`
// ResourceVersion is an opaque identifier for tracking updates to the config registry.
// The implementation may use a change index or a commit log for the revision.
// The config client should not make any assumptions about revisions and rely only on
// exact equality to implement optimistic concurrency of read-write operations.
//
// The lifetime of an object of a particular revision depends on the underlying data store.
// The data store may compactify old revisions in the interest of storage optimization.
//
// An empty revision carries a special meaning that the associated object has
// not been stored and assigned a revision.
ResourceVersion string `json:"resourceVersion,omitempty"`
// CreationTimestamp records the creation time
CreationTimestamp time.Time `json:"creationTimestamp,omitempty"`
// OwnerReferences allows specifying in-namespace owning objects.
OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty"`
// A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.
Generation int64 `json:"generation,omitempty"`
}
// Config is a configuration unit consisting of the type of configuration, the
// key identifier that is unique per type, and the content represented as a
// protobuf message.
type Config struct {
Meta
// Spec holds the configuration object as a gogo protobuf message
Spec Spec
// Status holds long-running status.
Status Status
}
func LabelsInRevision(lbls map[string]string, rev string) bool {
configEnv, f := lbls[label.IoIstioRev.Name]
if !f {
// This is a global object, and always included
return true
}
// If the revision is empty, this means we don't specify a revision, and
// we should always include it
if rev == "" {
return true
}
// Otherwise, only return true if revisions equal
return configEnv == rev
}
func ObjectInRevision(o *Config, rev string) bool {
return LabelsInRevision(o.Labels, rev)
}
// Spec defines the spec for the config. In order to use below helper methods,
// this must be one of:
// * golang/protobuf Message
// * gogo/protobuf Message
// * Able to marshal/unmarshal using json
type Spec any
func ToProto(s Spec) (*anypb.Any, error) {
// golang protobuf. Use protoreflect.ProtoMessage to distinguish from gogo
// golang/protobuf 1.4+ will have this interface. Older golang/protobuf are gogo compatible
// but also not used by Istio at all.
if pb, ok := s.(protoreflect.ProtoMessage); ok {
return protoconv.MessageToAnyWithError(pb)
}
// gogo protobuf
if pb, ok := s.(gogoproto.Message); ok {
gogoany, err := gogotypes.MarshalAny(pb)
if err != nil {
return nil, err
}
return &anypb.Any{
TypeUrl: gogoany.TypeUrl,
Value: gogoany.Value,
}, nil
}
js, err := json.Marshal(s)
if err != nil {
return nil, err
}
pbs := &structpb.Struct{}
if err := protomarshal.Unmarshal(js, pbs); err != nil {
return nil, err
}
return protoconv.MessageToAnyWithError(pbs)
}
func ToMap(s Spec) (map[string]any, error) {
js, err := ToJSON(s)
if err != nil {
return nil, err
}
// Unmarshal from json bytes to go map
var data map[string]any
err = json.Unmarshal(js, &data)
if err != nil {
return nil, err
}
return data, nil
}
func ToJSON(s Spec) ([]byte, error) {
return toJSON(s, false)
}
func ToPrettyJSON(s Spec) ([]byte, error) {
return toJSON(s, true)
}
func toJSON(s Spec, pretty bool) ([]byte, error) {
indent := ""
if pretty {
indent = " "
}
// golang protobuf. Use protoreflect.ProtoMessage to distinguish from gogo
// golang/protobuf 1.4+ will have this interface. Older golang/protobuf are gogo compatible
// but also not used by Istio at all.
if _, ok := s.(protoreflect.ProtoMessage); ok {
if pb, ok := s.(proto.Message); ok {
b, err := protomarshal.MarshalIndent(pb, indent)
return b, err
}
}
b := &bytes.Buffer{}
// gogo protobuf
if pb, ok := s.(gogoproto.Message); ok {
err := (&gogojsonpb.Marshaler{Indent: indent}).Marshal(b, pb)
return customBytes.CheckLen(b.Bytes(), "/src/istio/pkg/config/model.go:210:10 (May be slightly inaccurate) NEW_LINEb.Bytes()"), err
}
if pretty {
return json.MarshalIndent(s, "", indent)
}
return json.Marshal(s)
}
type deepCopier interface {
DeepCopyInterface() any
}
func ApplyYAML(s Spec, yml string) error {
js, err := yaml.YAMLToJSON([]byte(yml))
if err != nil {
return err
}
return ApplyJSON(s, string(js))
}
func ApplyJSONStrict(s Spec, js string) error {
// golang protobuf. Use protoreflect.ProtoMessage to distinguish from gogo
// golang/protobuf 1.4+ will have this interface. Older golang/protobuf are gogo compatible
// but also not used by Istio at all.
if _, ok := s.(protoreflect.ProtoMessage); ok {
if pb, ok := s.(proto.Message); ok {
err := protomarshal.ApplyJSONStrict(js, pb)
return err
}
}
// gogo protobuf
if pb, ok := s.(gogoproto.Message); ok {
err := gogoprotomarshal.ApplyJSONStrict(js, pb)
return err
}
d := json.NewDecoder(bytes.NewReader([]byte(js)))
d.DisallowUnknownFields()
return d.Decode(&s)
}
func ApplyJSON(s Spec, js string) error {
// golang protobuf. Use protoreflect.ProtoMessage to distinguish from gogo
// golang/protobuf 1.4+ will have this interface. Older golang/protobuf are gogo compatible
// but also not used by Istio at all.
if _, ok := s.(protoreflect.ProtoMessage); ok {
if pb, ok := s.(proto.Message); ok {
err := protomarshal.ApplyJSON(js, pb)
return err
}
}
// gogo protobuf
if pb, ok := s.(gogoproto.Message); ok {
err := gogoprotomarshal.ApplyJSON(js, pb)
return err
}
return json.Unmarshal([]byte(js), &s)
}
func DeepCopy(s any) any {
if s == nil {
return nil
}
// If deep copy is defined, use that
if dc, ok := s.(deepCopier); ok {
return dc.DeepCopyInterface()
}
// golang protobuf. Use protoreflect.ProtoMessage to distinguish from gogo
// golang/protobuf 1.4+ will have this interface. Older golang/protobuf are gogo compatible
// but also not used by Istio at all.
if _, ok := s.(protoreflect.ProtoMessage); ok {
if pb, ok := s.(proto.Message); ok {
return proto.Clone(pb)
}
}
// gogo protobuf
if pb, ok := s.(gogoproto.Message); ok {
return gogoproto.Clone(pb)
}
// If we don't have a deep copy method, we will have to do some reflection magic. Its not ideal,
// but all Istio types have an efficient deep copy.
js, err := json.Marshal(s)
if err != nil {
return nil
}
data := reflect.New(reflect.TypeOf(s)).Interface()
if err := json.Unmarshal(js, data); err != nil {
return nil
}
data = reflect.ValueOf(data).Elem().Interface()
return data
}
type Status any
// Key function for the configuration objects
func Key(grp, ver, typ, name, namespace string) string {
return grp + "/" + ver + "/" + typ + "/" + namespace + "/" + name // Format: %s/%s/%s/%s/%s
}
// Key is the unique identifier for a configuration object
func (meta *Meta) Key() string {
return Key(
meta.GroupVersionKind.Group, meta.GroupVersionKind.Version, meta.GroupVersionKind.Kind,
meta.Name, meta.Namespace)
}
func (meta *Meta) ToObjectMeta() metav1.ObjectMeta {
return metav1.ObjectMeta{
Name: meta.Name,
Namespace: meta.Namespace,
UID: kubetypes.UID(meta.UID),
ResourceVersion: meta.ResourceVersion,
Generation: meta.Generation,
CreationTimestamp: metav1.NewTime(meta.CreationTimestamp),
Labels: meta.Labels,
Annotations: meta.Annotations,
OwnerReferences: meta.OwnerReferences,
}
}
func (c Config) DeepCopy() Config {
var clone Config
clone.Meta = c.Meta
if c.Labels != nil {
clone.Labels = make(map[string]string, len(c.Labels))
for k, v := range c.Labels {
clone.Labels[k] = v
}
}
if c.Annotations != nil {
clone.Annotations = make(map[string]string, len(c.Annotations))
for k, v := range c.Annotations {
clone.Annotations[k] = v
}
}
clone.Spec = DeepCopy(c.Spec)
if c.Status != nil {
clone.Status = DeepCopy(c.Status)
}
return clone
}
func (c Config) GetName() string {
return c.Name
}
func (c Config) GetNamespace() string {
return c.Namespace
}
func (c Config) GetCreationTimestamp() time.Time {
return c.CreationTimestamp
}
func (c Config) NamespacedName() kubetypes.NamespacedName {
return kubetypes.NamespacedName{
Namespace: c.Namespace,
Name: c.Name,
}
}
var _ fmt.Stringer = GroupVersionKind{}
type GroupVersionKind struct {
Group string `json:"group"`
Version string `json:"version"`
Kind string `json:"kind"`
}
func (g GroupVersionKind) String() string {
return g.CanonicalGroup() + "/" + g.Version + "/" + g.Kind
}
// GroupVersion returns the group/version similar to what would be found in the apiVersion field of a Kubernetes resource.
func (g GroupVersionKind) GroupVersion() string {
if g.Group == "" {
return g.Version
}
return g.Group + "/" + g.Version
}
// Kubernetes returns the same GVK, using the Kubernetes object type
func (g GroupVersionKind) Kubernetes() schema.GroupVersionKind {
return schema.GroupVersionKind{
Group: g.Group,
Version: g.Version,
Kind: g.Kind,
}
}
// CanonicalGroup returns the group with defaulting applied. This means an empty group will
// be treated as "core", following Kubernetes API standards
func (g GroupVersionKind) CanonicalGroup() string {
if g.Group != "" {
return g.Group
}
return "core"
}
// PatchFunc provides the cached config as a base for modification. Only diff the between the cfg
// parameter and the returned Config will be applied.
type PatchFunc func(cfg Config) (Config, kubetypes.PatchType)
type Namer interface {
GetName() string
GetNamespace() string
}
func NamespacedName(o metav1.Object) kubetypes.NamespacedName {
return kubetypes.NamespacedName{
Namespace: o.GetNamespace(),
Name: o.GetName(),
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protocol
import "strings"
// Instance defines network protocols for ports
type Instance string
func (i Instance) String() string {
return string(i)
}
const (
// GRPC declares that the port carries gRPC traffic.
GRPC Instance = "GRPC"
// GRPCWeb declares that the port carries gRPC traffic.
GRPCWeb Instance = "GRPC-Web"
// HTTP declares that the port carries HTTP/1.1 traffic.
// Note that HTTP/1.0 or earlier may not be supported by the proxy.
HTTP Instance = "HTTP"
// HTTP_PROXY declares that the port is a generic outbound proxy port.
// Note that this is currently applicable only for defining sidecar egress listeners.
// nolint
HTTP_PROXY Instance = "HTTP_PROXY"
// HTTP2 declares that the port carries HTTP/2 traffic.
HTTP2 Instance = "HTTP2"
// HTTPS declares that the port carries HTTPS traffic.
HTTPS Instance = "HTTPS"
// TCP declares the port uses TCP.
// This is the default protocol for a service port.
TCP Instance = "TCP"
// TLS declares that the port carries TLS traffic.
// TLS traffic is assumed to contain SNI as part of the handshake.
TLS Instance = "TLS"
// UDP declares that the port uses UDP.
// Note that UDP protocol is not currently supported by the proxy.
UDP Instance = "UDP"
// Mongo declares that the port carries MongoDB traffic.
Mongo Instance = "Mongo"
// Redis declares that the port carries Redis traffic.
Redis Instance = "Redis"
// MySQL declares that the port carries MySQL traffic.
MySQL Instance = "MySQL"
// HBONE declares that the port carries HBONE traffic.
// This cannot be declared by Services, but is used for some internal code that uses Protocol
HBONE Instance = "HBONE"
// Unsupported - value to signify that the protocol is unsupported.
Unsupported Instance = "UnsupportedProtocol"
)
// Parse from string ignoring case
func Parse(s string) Instance {
switch strings.ToLower(s) {
case "tcp":
return TCP
case "udp":
return UDP
case "grpc":
return GRPC
case "grpc-web":
return GRPCWeb
case "http":
return HTTP
case "http_proxy":
return HTTP_PROXY
case "http2":
return HTTP2
case "https":
return HTTPS
case "tls":
return TLS
case "mongo":
return Mongo
case "redis":
return Redis
case "mysql":
return MySQL
}
return Unsupported
}
// IsHTTP2 is true for protocols that use HTTP/2 as transport protocol
func (i Instance) IsHTTP2() bool {
switch i {
case HTTP2, GRPC, GRPCWeb:
return true
default:
return false
}
}
// IsHTTPOrSniffed is true for protocols that use HTTP as transport protocol, or *can* use it if sniffed to be HTTP
func (i Instance) IsHTTPOrSniffed() bool {
return i.IsHTTP() || i.IsUnsupported()
}
// IsHTTP is true for protocols that use HTTP as transport protocol
func (i Instance) IsHTTP() bool {
switch i {
case HTTP, HTTP2, HTTP_PROXY, GRPC, GRPCWeb:
return true
default:
return false
}
}
// IsTCP is true for protocols that use TCP as transport protocol
func (i Instance) IsTCP() bool {
switch i {
case TCP, HTTPS, TLS, Mongo, Redis, MySQL:
return true
default:
return false
}
}
// IsTLS is true for protocols on top of TLS (e.g. HTTPS)
func (i Instance) IsTLS() bool {
switch i {
case HTTPS, TLS:
return true
default:
return false
}
}
// IsHTTPS is true if protocol is HTTPS
func (i Instance) IsHTTPS() bool {
switch i {
case HTTPS:
return true
default:
return false
}
}
// IsGRPC is true for GRPC protocols.
func (i Instance) IsGRPC() bool {
switch i {
case GRPC, GRPCWeb:
return true
default:
return false
}
}
func (i Instance) IsUnsupported() bool {
return i == Unsupported
}
// AfterTLSTermination returns the protocol that will be used if TLS is terminated on the current protocol.
func (i Instance) AfterTLSTermination() Instance {
switch i {
case HTTPS:
return HTTP
case TLS:
return TCP
default:
return i
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package resource contains core abstract types for representing configuration resources.
package resource
import (
"istio.io/istio/pkg/config"
)
// Instance is the abstract representation of a versioned config resource in Istio.
type Instance struct {
Metadata Metadata
Message config.Spec
Origin Origin
}
// IsEmpty returns true if the resource Instance.Message is nil.
func (r *Instance) IsEmpty() bool {
return r.Message == nil
}
// Clone returns a deep-copy of this entry. Warning, this is expensive!
func (r *Instance) Clone() *Instance {
result := &Instance{}
if r.Message != nil {
result.Message = config.DeepCopy(r.Message)
}
result.Metadata = r.Metadata.Clone()
return result
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resource
import (
"time"
"istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/maps"
)
// Metadata about a resource.
type Metadata struct {
Schema resource.Schema
FullName FullName
CreateTime time.Time
Version Version
Generation int64
Labels map[string]string
Annotations map[string]string
}
// Clone Metadata. Warning, this is expensive!
func (m *Metadata) Clone() Metadata {
result := *m
result.Annotations = maps.Clone(m.Annotations)
result.Labels = maps.Clone(m.Labels)
return result
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resource
import (
"fmt"
"strings"
)
// Namespace containing the resource.
type Namespace string
func (n Namespace) String() string {
return string(n)
}
// LocalName that uniquely identifies the resource within the Namespace.
type LocalName string
func (n LocalName) String() string {
return string(n)
}
// FullName is a name that uniquely identifies a resource within the mesh.
type FullName struct {
Namespace Namespace
Name LocalName
}
// String interface implementation.
func (n FullName) String() string {
if len(n.Namespace) == 0 {
return string(n.Name)
}
return string(n.Namespace) + "/" + string(n.Name)
}
// NewShortOrFullName tries to parse the given name to resource.Name. If the name does not include namespace information,
// the defaultNamespace is used.
func NewShortOrFullName(defaultNamespace Namespace, name string) FullName {
parts := strings.SplitN(name, "/", 2)
if len(parts) == 1 {
return FullName{
Namespace: defaultNamespace,
Name: LocalName(parts[0]),
}
}
return FullName{
Namespace: Namespace(parts[0]),
Name: LocalName(parts[1]),
}
}
// Validate that the Name and Namespace are set.
func (n FullName) Validate() error {
if len(n.Name) == 0 {
return fmt.Errorf("invalid name '%s': name must not be empty", n.String())
}
return nil
}
// NewFullName creates a new FullName from the given Namespace and Name.
func NewFullName(ns Namespace, n LocalName) FullName {
return FullName{
Namespace: ns,
Name: n,
}
}
// ParseFullName parses the given name string that was serialized via FullName.String()
func ParseFullName(name string) (FullName, error) {
return ParseFullNameWithDefaultNamespace("", name)
}
// ParseFullNameWithDefaultNamespace parses the given name string using defaultNamespace if no namespace is found.
func ParseFullNameWithDefaultNamespace(defaultNamespace Namespace, name string) (FullName, error) {
out := NewShortOrFullName(defaultNamespace, name)
if err := out.Validate(); err != nil {
return FullName{}, fmt.Errorf("failed parsing name '%v': %v", name, err)
}
return out, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resource
import (
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/kube/controllers"
)
// PilotConfigToInstance convert from config.Config, which has no associated proto, to MCP Resource proto.
func PilotConfigToInstance(c *config.Config, schema resource.Schema) *Instance {
return &Instance{
Metadata: Metadata{
Schema: schema,
FullName: FullName{Namespace(c.Namespace), LocalName(c.Name)},
CreateTime: c.CreationTimestamp,
Version: Version(c.ResourceVersion),
Labels: c.Labels,
Annotations: c.Annotations,
},
Message: c.Spec,
}
}
// ObjectToInstance convert from a controller object to MCP Resource proto.
// Note you need to pass the object and its spec
func ObjectToInstance(c controllers.Object, spec config.Spec, schema resource.Schema) *Instance {
return &Instance{
Metadata: Metadata{
Schema: schema,
FullName: FullName{Namespace(c.GetNamespace()), LocalName(c.GetName())},
CreateTime: c.GetCreationTimestamp().Time,
Version: Version(c.GetResourceVersion()),
Labels: c.GetLabels(),
Annotations: c.GetAnnotations(),
},
Message: spec,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collection
import (
"fmt"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/resource"
)
// Schema for a collection.
type Schema interface {
fmt.Stringer
// VariableName is a utility method used to help with codegen. It provides the name of a Schema instance variable.
VariableName() string
// Resource is the schema for resources contained in this collection.
Resource() resource.Schema
// Equal is a helper function for testing equality between Schema instances. This supports comparison
// with the cmp library.
Equal(other Schema) bool
}
// Builder is config for the creation of a Schema
type Builder struct {
VariableName string
Resource resource.Schema
}
// Build a Schema instance.
func (b Builder) Build() (Schema, error) {
if b.Resource == nil {
return nil, fmt.Errorf("collection %s: resource must be non-nil", b.VariableName)
}
return &schemaImpl{
variableName: b.VariableName,
resource: b.Resource,
}, nil
}
// MustBuild calls Build and panics if it fails.
func (b Builder) MustBuild() Schema {
s, err := b.Build()
if err != nil {
panic(fmt.Sprintf("MustBuild: %v", err))
}
return s
}
type schemaImpl struct {
resource resource.Schema
name config.GroupVersionKind
variableName string
}
// String interface method implementation.
func (s *schemaImpl) String() string {
return fmt.Sprintf("[Schema](%s, %q, %s)", s.name, s.resource.ProtoPackage(), s.resource.Proto())
}
func (s *schemaImpl) VariableName() string {
return s.variableName
}
func (s *schemaImpl) Resource() resource.Schema {
return s.resource
}
func (s *schemaImpl) Equal(o Schema) bool {
return s.variableName == o.VariableName() &&
s.Resource().Equal(o.Resource())
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collection
import (
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/hashicorp/go-multierror"
"k8s.io/apimachinery/pkg/runtime/schema"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/sets"
)
// Schemas contains metadata about configuration resources.
type Schemas struct {
byCollection map[config.GroupVersionKind]resource.Schema
byAddOrder []resource.Schema
}
// SchemasFor is a shortcut for creating Schemas. It uses MustAdd for each element.
func SchemasFor(schemas ...resource.Schema) Schemas {
b := NewSchemasBuilder()
for _, s := range schemas {
b.MustAdd(s)
}
return b.Build()
}
// SchemasBuilder is a builder for the schemas type.
type SchemasBuilder struct {
schemas Schemas
}
// NewSchemasBuilder returns a new instance of SchemasBuilder.
func NewSchemasBuilder() *SchemasBuilder {
s := Schemas{
byCollection: make(map[config.GroupVersionKind]resource.Schema),
}
return &SchemasBuilder{
schemas: s,
}
}
// Add a new collection to the schemas.
func (b *SchemasBuilder) Add(s resource.Schema) error {
if _, found := b.schemas.byCollection[s.GroupVersionKind()]; found {
return fmt.Errorf("collection already exists: %v", s.GroupVersionKind())
}
b.schemas.byCollection[s.GroupVersionKind()] = s
b.schemas.byAddOrder = append(b.schemas.byAddOrder, s)
return nil
}
// MustAdd calls Add and panics if it fails.
func (b *SchemasBuilder) MustAdd(s resource.Schema) *SchemasBuilder {
if err := b.Add(s); err != nil {
panic(fmt.Sprintf("SchemasBuilder.MustAdd: %v", err))
}
return b
}
// Build a new schemas from this SchemasBuilder.
func (b *SchemasBuilder) Build() Schemas {
s := b.schemas
// Avoid modify after Build.
b.schemas = Schemas{}
return s
}
// ForEach executes the given function on each contained schema, until the function returns true.
func (s Schemas) ForEach(handleSchema func(resource.Schema) (done bool)) {
for _, schema := range s.byAddOrder {
if handleSchema(schema) {
return
}
}
}
func (s Schemas) Union(otherSchemas Schemas) Schemas {
resultBuilder := NewSchemasBuilder()
for _, myschema := range s.All() {
// an error indicates the schema has already been added, which doesn't negatively impact intersect
_ = resultBuilder.Add(myschema)
}
for _, myschema := range otherSchemas.All() {
// an error indicates the schema has already been added, which doesn't negatively impact intersect
_ = resultBuilder.Add(myschema)
}
return resultBuilder.Build()
}
func (s Schemas) Intersect(otherSchemas Schemas) Schemas {
resultBuilder := NewSchemasBuilder()
schemaLookup := sets.String{}
for _, myschema := range s.All() {
schemaLookup.Insert(myschema.String())
}
// Only add schemas that are in both sets
for _, myschema := range otherSchemas.All() {
if schemaLookup.Contains(myschema.String()) {
_ = resultBuilder.Add(myschema)
}
}
return resultBuilder.Build()
}
// FindByGroupVersionKind searches and returns the first schema with the given GVK
func (s Schemas) FindByGroupVersionKind(gvk config.GroupVersionKind) (resource.Schema, bool) {
for _, rs := range s.byAddOrder {
if rs.GroupVersionKind() == gvk {
return rs, true
}
}
return nil, false
}
// FindByGroupVersionAliasesKind searches and returns the first schema with the given GVK,
// if not found, it will search for version aliases for the schema to see if there is a match.
func (s Schemas) FindByGroupVersionAliasesKind(gvk config.GroupVersionKind) (resource.Schema, bool) {
for _, rs := range s.byAddOrder {
for _, va := range rs.GroupVersionAliasKinds() {
if va == gvk {
return rs, true
}
}
}
return nil, false
}
// FindByGroupVersionResource searches and returns the first schema with the given GVR
func (s Schemas) FindByGroupVersionResource(gvr schema.GroupVersionResource) (resource.Schema, bool) {
for _, rs := range s.byAddOrder {
if rs.GroupVersionResource() == gvr {
return rs, true
}
}
return nil, false
}
// All returns all known Schemas
func (s Schemas) All() []resource.Schema {
return slices.Clone(s.byAddOrder)
}
// GroupVersionKinds returns all known GroupVersionKinds
func (s Schemas) GroupVersionKinds() []config.GroupVersionKind {
res := []config.GroupVersionKind{}
for _, r := range s.All() {
res = append(res, r.GroupVersionKind())
}
return res
}
// Add creates a copy of this Schemas with the given schemas added.
func (s Schemas) Add(toAdd ...resource.Schema) Schemas {
b := NewSchemasBuilder()
for _, s := range s.byAddOrder {
b.MustAdd(s)
}
for _, s := range toAdd {
b.MustAdd(s)
}
return b.Build()
}
// Remove creates a copy of this Schemas with the given schemas removed.
func (s Schemas) Remove(toRemove ...resource.Schema) Schemas {
b := NewSchemasBuilder()
for _, s := range s.byAddOrder {
shouldAdd := true
for _, r := range toRemove {
if r.Equal(s) {
shouldAdd = false
break
}
}
if shouldAdd {
b.MustAdd(s)
}
}
return b.Build()
}
// Kinds returns all known resource kinds.
func (s Schemas) Kinds() []string {
kinds := sets.NewWithLength[string](len(s.byAddOrder))
for _, s := range s.byAddOrder {
kinds.Insert(s.Kind())
}
out := kinds.UnsortedList()
return slices.Sort(out)
}
// Validate the schemas. Returns error if there is a problem.
func (s Schemas) Validate() (err error) {
for _, c := range s.byAddOrder {
err = multierror.Append(err, c.Validate()).ErrorOrNil()
}
return
}
func (s Schemas) Equal(o Schemas) bool {
return cmp.Equal(s.byAddOrder, o.byAddOrder)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collections
import (
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config/schema/collection"
)
var Istio = Pilot.Add(MeshNetworks).Add(MeshConfig)
func PilotGatewayAPI() collection.Schemas {
if features.EnableAlphaGatewayAPI {
return pilotGatewayAPI
}
return pilotStableGatewayAPI
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package collections
import (
"errors"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/config/validation"
testconfig "istio.io/istio/pkg/test/config"
)
var (
// Mock is used purely for testing
Mock = resource.Builder{
ClusterScoped: false,
Kind: "MockConfig",
Plural: "mockconfigs",
Group: "test.istio.io",
Version: "v1",
Proto: "config.MockConfig",
ProtoPackage: "istio.io/istio/pkg/test/config",
ValidateProto: func(cfg config.Config) (validation.Warning, error) {
if cfg.Spec.(*testconfig.MockConfig).Key == "" {
return nil, errors.New("empty key")
}
return nil, nil
},
}.MustBuild()
// Mocks is a Schemas containing the Mock Schema.
Mocks = collection.NewSchemasBuilder().MustAdd(Mock).Build()
)
// Code generated by pkg/config/schema/codegen/tools/collections.main.go. DO NOT EDIT.
package gvk
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvr"
)
var (
AuthorizationPolicy = config.GroupVersionKind{Group: "security.istio.io", Version: "v1beta1", Kind: "AuthorizationPolicy"}
AuthorizationPolicy_v1 = config.GroupVersionKind{Group: "security.istio.io", Version: "v1", Kind: "AuthorizationPolicy"}
CertificateSigningRequest = config.GroupVersionKind{Group: "certificates.k8s.io", Version: "v1", Kind: "CertificateSigningRequest"}
ConfigMap = config.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}
CustomResourceDefinition = config.GroupVersionKind{Group: "apiextensions.k8s.io", Version: "v1", Kind: "CustomResourceDefinition"}
DaemonSet = config.GroupVersionKind{Group: "apps", Version: "v1", Kind: "DaemonSet"}
Deployment = config.GroupVersionKind{Group: "apps", Version: "v1", Kind: "Deployment"}
DestinationRule = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "DestinationRule"}
DestinationRule_v1beta1 = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1beta1", Kind: "DestinationRule"}
EndpointSlice = config.GroupVersionKind{Group: "", Version: "v1", Kind: "EndpointSlice"}
Endpoints = config.GroupVersionKind{Group: "", Version: "v1", Kind: "Endpoints"}
EnvoyFilter = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "EnvoyFilter"}
GRPCRoute = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Kind: "GRPCRoute"}
Gateway = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "Gateway"}
Gateway_v1beta1 = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1beta1", Kind: "Gateway"}
GatewayClass = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1beta1", Kind: "GatewayClass"}
GatewayClass_v1alpha2 = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Kind: "GatewayClass"}
GatewayClass_v1 = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1", Kind: "GatewayClass"}
HTTPRoute = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1beta1", Kind: "HTTPRoute"}
HTTPRoute_v1alpha2 = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Kind: "HTTPRoute"}
HTTPRoute_v1 = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1", Kind: "HTTPRoute"}
Ingress = config.GroupVersionKind{Group: "networking.k8s.io", Version: "v1", Kind: "Ingress"}
IngressClass = config.GroupVersionKind{Group: "networking.k8s.io", Version: "v1", Kind: "IngressClass"}
KubernetesGateway = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1beta1", Kind: "Gateway"}
KubernetesGateway_v1alpha2 = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Kind: "Gateway"}
KubernetesGateway_v1 = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1", Kind: "Gateway"}
Lease = config.GroupVersionKind{Group: "coordination.k8s.io", Version: "v1", Kind: "Lease"}
MeshConfig = config.GroupVersionKind{Group: "", Version: "v1alpha1", Kind: "MeshConfig"}
MeshNetworks = config.GroupVersionKind{Group: "", Version: "v1alpha1", Kind: "MeshNetworks"}
MutatingWebhookConfiguration = config.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1", Kind: "MutatingWebhookConfiguration"}
Namespace = config.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}
Node = config.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}
PeerAuthentication = config.GroupVersionKind{Group: "security.istio.io", Version: "v1beta1", Kind: "PeerAuthentication"}
Pod = config.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}
ProxyConfig = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1beta1", Kind: "ProxyConfig"}
ReferenceGrant = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1beta1", Kind: "ReferenceGrant"}
ReferenceGrant_v1alpha2 = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Kind: "ReferenceGrant"}
RequestAuthentication = config.GroupVersionKind{Group: "security.istio.io", Version: "v1beta1", Kind: "RequestAuthentication"}
RequestAuthentication_v1 = config.GroupVersionKind{Group: "security.istio.io", Version: "v1", Kind: "RequestAuthentication"}
Secret = config.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"}
Service = config.GroupVersionKind{Group: "", Version: "v1", Kind: "Service"}
ServiceAccount = config.GroupVersionKind{Group: "", Version: "v1", Kind: "ServiceAccount"}
ServiceEntry = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "ServiceEntry"}
ServiceEntry_v1beta1 = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1beta1", Kind: "ServiceEntry"}
Sidecar = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "Sidecar"}
Sidecar_v1beta1 = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1beta1", Kind: "Sidecar"}
StatefulSet = config.GroupVersionKind{Group: "apps", Version: "v1", Kind: "StatefulSet"}
TCPRoute = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Kind: "TCPRoute"}
TLSRoute = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Kind: "TLSRoute"}
Telemetry = config.GroupVersionKind{Group: "telemetry.istio.io", Version: "v1alpha1", Kind: "Telemetry"}
UDPRoute = config.GroupVersionKind{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Kind: "UDPRoute"}
ValidatingWebhookConfiguration = config.GroupVersionKind{Group: "admissionregistration.k8s.io", Version: "v1", Kind: "ValidatingWebhookConfiguration"}
VirtualService = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "VirtualService"}
VirtualService_v1beta1 = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1beta1", Kind: "VirtualService"}
WasmPlugin = config.GroupVersionKind{Group: "extensions.istio.io", Version: "v1alpha1", Kind: "WasmPlugin"}
WorkloadEntry = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "WorkloadEntry"}
WorkloadEntry_v1beta1 = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1beta1", Kind: "WorkloadEntry"}
WorkloadGroup = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1alpha3", Kind: "WorkloadGroup"}
WorkloadGroup_v1beta1 = config.GroupVersionKind{Group: "networking.istio.io", Version: "v1beta1", Kind: "WorkloadGroup"}
)
// ToGVR converts a GVK to a GVR.
func ToGVR(g config.GroupVersionKind) (schema.GroupVersionResource, bool) {
switch g {
case AuthorizationPolicy:
return gvr.AuthorizationPolicy, true
case AuthorizationPolicy_v1:
return gvr.AuthorizationPolicy_v1, true
case CertificateSigningRequest:
return gvr.CertificateSigningRequest, true
case ConfigMap:
return gvr.ConfigMap, true
case CustomResourceDefinition:
return gvr.CustomResourceDefinition, true
case DaemonSet:
return gvr.DaemonSet, true
case Deployment:
return gvr.Deployment, true
case DestinationRule:
return gvr.DestinationRule, true
case DestinationRule_v1beta1:
return gvr.DestinationRule_v1beta1, true
case EndpointSlice:
return gvr.EndpointSlice, true
case Endpoints:
return gvr.Endpoints, true
case EnvoyFilter:
return gvr.EnvoyFilter, true
case GRPCRoute:
return gvr.GRPCRoute, true
case Gateway:
return gvr.Gateway, true
case Gateway_v1beta1:
return gvr.Gateway_v1beta1, true
case GatewayClass:
return gvr.GatewayClass, true
case GatewayClass_v1alpha2:
return gvr.GatewayClass_v1alpha2, true
case GatewayClass_v1:
return gvr.GatewayClass_v1, true
case HTTPRoute:
return gvr.HTTPRoute, true
case HTTPRoute_v1alpha2:
return gvr.HTTPRoute_v1alpha2, true
case HTTPRoute_v1:
return gvr.HTTPRoute_v1, true
case Ingress:
return gvr.Ingress, true
case IngressClass:
return gvr.IngressClass, true
case KubernetesGateway:
return gvr.KubernetesGateway, true
case KubernetesGateway_v1alpha2:
return gvr.KubernetesGateway_v1alpha2, true
case KubernetesGateway_v1:
return gvr.KubernetesGateway_v1, true
case Lease:
return gvr.Lease, true
case MeshConfig:
return gvr.MeshConfig, true
case MeshNetworks:
return gvr.MeshNetworks, true
case MutatingWebhookConfiguration:
return gvr.MutatingWebhookConfiguration, true
case Namespace:
return gvr.Namespace, true
case Node:
return gvr.Node, true
case PeerAuthentication:
return gvr.PeerAuthentication, true
case Pod:
return gvr.Pod, true
case ProxyConfig:
return gvr.ProxyConfig, true
case ReferenceGrant:
return gvr.ReferenceGrant, true
case ReferenceGrant_v1alpha2:
return gvr.ReferenceGrant_v1alpha2, true
case RequestAuthentication:
return gvr.RequestAuthentication, true
case RequestAuthentication_v1:
return gvr.RequestAuthentication_v1, true
case Secret:
return gvr.Secret, true
case Service:
return gvr.Service, true
case ServiceAccount:
return gvr.ServiceAccount, true
case ServiceEntry:
return gvr.ServiceEntry, true
case ServiceEntry_v1beta1:
return gvr.ServiceEntry_v1beta1, true
case Sidecar:
return gvr.Sidecar, true
case Sidecar_v1beta1:
return gvr.Sidecar_v1beta1, true
case StatefulSet:
return gvr.StatefulSet, true
case TCPRoute:
return gvr.TCPRoute, true
case TLSRoute:
return gvr.TLSRoute, true
case Telemetry:
return gvr.Telemetry, true
case UDPRoute:
return gvr.UDPRoute, true
case ValidatingWebhookConfiguration:
return gvr.ValidatingWebhookConfiguration, true
case VirtualService:
return gvr.VirtualService, true
case VirtualService_v1beta1:
return gvr.VirtualService_v1beta1, true
case WasmPlugin:
return gvr.WasmPlugin, true
case WorkloadEntry:
return gvr.WorkloadEntry, true
case WorkloadEntry_v1beta1:
return gvr.WorkloadEntry_v1beta1, true
case WorkloadGroup:
return gvr.WorkloadGroup, true
case WorkloadGroup_v1beta1:
return gvr.WorkloadGroup_v1beta1, true
}
return schema.GroupVersionResource{}, false
}
// MustToGVR converts a GVK to a GVR, and panics if it cannot be converted
// Warning: this is only safe for known types; do not call on arbitrary GVKs
func MustToGVR(g config.GroupVersionKind) schema.GroupVersionResource {
r, ok := ToGVR(g)
if !ok {
panic("unknown kind: " + g.String())
}
return r
}
// FromGVR converts a GVR to a GVK.
func FromGVR(g schema.GroupVersionResource) (config.GroupVersionKind, bool) {
switch g {
case gvr.AuthorizationPolicy:
return AuthorizationPolicy, true
case gvr.CertificateSigningRequest:
return CertificateSigningRequest, true
case gvr.ConfigMap:
return ConfigMap, true
case gvr.CustomResourceDefinition:
return CustomResourceDefinition, true
case gvr.DaemonSet:
return DaemonSet, true
case gvr.Deployment:
return Deployment, true
case gvr.DestinationRule:
return DestinationRule, true
case gvr.EndpointSlice:
return EndpointSlice, true
case gvr.Endpoints:
return Endpoints, true
case gvr.EnvoyFilter:
return EnvoyFilter, true
case gvr.GRPCRoute:
return GRPCRoute, true
case gvr.Gateway:
return Gateway, true
case gvr.GatewayClass:
return GatewayClass, true
case gvr.HTTPRoute:
return HTTPRoute, true
case gvr.Ingress:
return Ingress, true
case gvr.IngressClass:
return IngressClass, true
case gvr.KubernetesGateway:
return KubernetesGateway, true
case gvr.Lease:
return Lease, true
case gvr.MeshConfig:
return MeshConfig, true
case gvr.MeshNetworks:
return MeshNetworks, true
case gvr.MutatingWebhookConfiguration:
return MutatingWebhookConfiguration, true
case gvr.Namespace:
return Namespace, true
case gvr.Node:
return Node, true
case gvr.PeerAuthentication:
return PeerAuthentication, true
case gvr.Pod:
return Pod, true
case gvr.ProxyConfig:
return ProxyConfig, true
case gvr.ReferenceGrant:
return ReferenceGrant, true
case gvr.RequestAuthentication:
return RequestAuthentication, true
case gvr.Secret:
return Secret, true
case gvr.Service:
return Service, true
case gvr.ServiceAccount:
return ServiceAccount, true
case gvr.ServiceEntry:
return ServiceEntry, true
case gvr.Sidecar:
return Sidecar, true
case gvr.StatefulSet:
return StatefulSet, true
case gvr.TCPRoute:
return TCPRoute, true
case gvr.TLSRoute:
return TLSRoute, true
case gvr.Telemetry:
return Telemetry, true
case gvr.UDPRoute:
return UDPRoute, true
case gvr.ValidatingWebhookConfiguration:
return ValidatingWebhookConfiguration, true
case gvr.VirtualService:
return VirtualService, true
case gvr.WasmPlugin:
return WasmPlugin, true
case gvr.WorkloadEntry:
return WorkloadEntry, true
case gvr.WorkloadGroup:
return WorkloadGroup, true
}
return config.GroupVersionKind{}, false
}
// FromGVR converts a GVR to a GVK, and panics if it cannot be converted
// Warning: this is only safe for known types; do not call on arbitrary GVRs
func MustFromGVR(g schema.GroupVersionResource) config.GroupVersionKind {
r, ok := FromGVR(g)
if !ok {
panic("unknown kind: " + g.String())
}
return r
}
// Code generated by pkg/config/schema/codegen/tools/collections.main.go. DO NOT EDIT.
package gvr
import "k8s.io/apimachinery/pkg/runtime/schema"
var (
ServiceExport = schema.GroupVersionResource{Group: "multicluster.x-k8s.io", Version: "v1alpha1", Resource: "serviceexports"}
ServiceImport = schema.GroupVersionResource{Group: "multicluster.x-k8s.io", Version: "v1alpha1", Resource: "serviceimports"}
AuthorizationPolicy = schema.GroupVersionResource{Group: "security.istio.io", Version: "v1beta1", Resource: "authorizationpolicies"}
AuthorizationPolicy_v1 = schema.GroupVersionResource{Group: "security.istio.io", Version: "v1", Resource: "authorizationpolicies"}
CertificateSigningRequest = schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1", Resource: "certificatesigningrequests"}
ConfigMap = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}
CustomResourceDefinition = schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}
DaemonSet = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}
Deployment = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}
DestinationRule = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "destinationrules"}
DestinationRule_v1beta1 = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1beta1", Resource: "destinationrules"}
EndpointSlice = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpointslices"}
Endpoints = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "endpoints"}
EnvoyFilter = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "envoyfilters"}
GRPCRoute = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Resource: "grpcroutes"}
Gateway = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "gateways"}
Gateway_v1beta1 = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1beta1", Resource: "gateways"}
GatewayClass = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1beta1", Resource: "gatewayclasses"}
GatewayClass_v1alpha2 = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Resource: "gatewayclasses"}
GatewayClass_v1 = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1", Resource: "gatewayclasses"}
HTTPRoute = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1beta1", Resource: "httproutes"}
HTTPRoute_v1alpha2 = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Resource: "httproutes"}
HTTPRoute_v1 = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1", Resource: "httproutes"}
Ingress = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "ingresses"}
IngressClass = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "ingressclasses"}
KubernetesGateway = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1beta1", Resource: "gateways"}
KubernetesGateway_v1alpha2 = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Resource: "gateways"}
KubernetesGateway_v1 = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1", Resource: "gateways"}
Lease = schema.GroupVersionResource{Group: "coordination.k8s.io", Version: "v1", Resource: "leases"}
MeshConfig = schema.GroupVersionResource{Group: "", Version: "v1alpha1", Resource: "meshconfigs"}
MeshNetworks = schema.GroupVersionResource{Group: "", Version: "v1alpha1", Resource: "meshnetworks"}
MutatingWebhookConfiguration = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1", Resource: "mutatingwebhookconfigurations"}
Namespace = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}
Node = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"}
PeerAuthentication = schema.GroupVersionResource{Group: "security.istio.io", Version: "v1beta1", Resource: "peerauthentications"}
Pod = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
ProxyConfig = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1beta1", Resource: "proxyconfigs"}
ReferenceGrant = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1beta1", Resource: "referencegrants"}
ReferenceGrant_v1alpha2 = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Resource: "referencegrants"}
RequestAuthentication = schema.GroupVersionResource{Group: "security.istio.io", Version: "v1beta1", Resource: "requestauthentications"}
RequestAuthentication_v1 = schema.GroupVersionResource{Group: "security.istio.io", Version: "v1", Resource: "requestauthentications"}
Secret = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}
Service = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}
ServiceAccount = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"}
ServiceEntry = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "serviceentries"}
ServiceEntry_v1beta1 = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1beta1", Resource: "serviceentries"}
Sidecar = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "sidecars"}
Sidecar_v1beta1 = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1beta1", Resource: "sidecars"}
StatefulSet = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}
TCPRoute = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Resource: "tcproutes"}
TLSRoute = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Resource: "tlsroutes"}
Telemetry = schema.GroupVersionResource{Group: "telemetry.istio.io", Version: "v1alpha1", Resource: "telemetries"}
UDPRoute = schema.GroupVersionResource{Group: "gateway.networking.k8s.io", Version: "v1alpha2", Resource: "udproutes"}
ValidatingWebhookConfiguration = schema.GroupVersionResource{Group: "admissionregistration.k8s.io", Version: "v1", Resource: "validatingwebhookconfigurations"}
VirtualService = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "virtualservices"}
VirtualService_v1beta1 = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1beta1", Resource: "virtualservices"}
WasmPlugin = schema.GroupVersionResource{Group: "extensions.istio.io", Version: "v1alpha1", Resource: "wasmplugins"}
WorkloadEntry = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "workloadentries"}
WorkloadEntry_v1beta1 = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1beta1", Resource: "workloadentries"}
WorkloadGroup = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1alpha3", Resource: "workloadgroups"}
WorkloadGroup_v1beta1 = schema.GroupVersionResource{Group: "networking.istio.io", Version: "v1beta1", Resource: "workloadgroups"}
)
func IsClusterScoped(g schema.GroupVersionResource) bool {
switch g {
case ServiceExport:
return false
case ServiceImport:
return false
case AuthorizationPolicy:
return false
case AuthorizationPolicy_v1:
return false
case CertificateSigningRequest:
return true
case ConfigMap:
return false
case CustomResourceDefinition:
return true
case DaemonSet:
return false
case Deployment:
return false
case DestinationRule:
return false
case DestinationRule_v1beta1:
return false
case EndpointSlice:
return false
case Endpoints:
return false
case EnvoyFilter:
return false
case GRPCRoute:
return false
case Gateway:
return false
case Gateway_v1beta1:
return false
case GatewayClass:
return true
case GatewayClass_v1alpha2:
return true
case GatewayClass_v1:
return true
case HTTPRoute:
return false
case HTTPRoute_v1alpha2:
return false
case HTTPRoute_v1:
return false
case Ingress:
return false
case IngressClass:
return true
case KubernetesGateway:
return false
case KubernetesGateway_v1alpha2:
return false
case KubernetesGateway_v1:
return false
case Lease:
return false
case MutatingWebhookConfiguration:
return true
case Namespace:
return true
case Node:
return true
case PeerAuthentication:
return false
case Pod:
return false
case ProxyConfig:
return false
case ReferenceGrant:
return false
case ReferenceGrant_v1alpha2:
return false
case RequestAuthentication:
return false
case RequestAuthentication_v1:
return false
case Secret:
return false
case Service:
return false
case ServiceAccount:
return false
case ServiceEntry:
return false
case ServiceEntry_v1beta1:
return false
case Sidecar:
return false
case Sidecar_v1beta1:
return false
case StatefulSet:
return false
case TCPRoute:
return false
case TLSRoute:
return false
case Telemetry:
return false
case UDPRoute:
return false
case ValidatingWebhookConfiguration:
return true
case VirtualService:
return false
case VirtualService_v1beta1:
return false
case WasmPlugin:
return false
case WorkloadEntry:
return false
case WorkloadEntry_v1beta1:
return false
case WorkloadGroup:
return false
case WorkloadGroup_v1beta1:
return false
}
// shouldn't happen
return false
}
// Code generated by pkg/config/schema/codegen/tools/collections.main.go. DO NOT EDIT.
package kind
import (
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvk"
)
const (
Address Kind = iota
AuthorizationPolicy
CertificateSigningRequest
ConfigMap
CustomResourceDefinition
DaemonSet
Deployment
DestinationRule
EndpointSlice
Endpoints
EnvoyFilter
GRPCRoute
Gateway
GatewayClass
HTTPRoute
Ingress
IngressClass
KubernetesGateway
Lease
MeshConfig
MeshNetworks
MutatingWebhookConfiguration
Namespace
Node
PeerAuthentication
Pod
ProxyConfig
ReferenceGrant
RequestAuthentication
Secret
Service
ServiceAccount
ServiceEntry
Sidecar
StatefulSet
TCPRoute
TLSRoute
Telemetry
UDPRoute
ValidatingWebhookConfiguration
VirtualService
WasmPlugin
WorkloadEntry
WorkloadGroup
)
func (k Kind) String() string {
switch k {
case Address:
return "Address"
case AuthorizationPolicy:
return "AuthorizationPolicy"
case CertificateSigningRequest:
return "CertificateSigningRequest"
case ConfigMap:
return "ConfigMap"
case CustomResourceDefinition:
return "CustomResourceDefinition"
case DaemonSet:
return "DaemonSet"
case Deployment:
return "Deployment"
case DestinationRule:
return "DestinationRule"
case EndpointSlice:
return "EndpointSlice"
case Endpoints:
return "Endpoints"
case EnvoyFilter:
return "EnvoyFilter"
case GRPCRoute:
return "GRPCRoute"
case Gateway:
return "Gateway"
case GatewayClass:
return "GatewayClass"
case HTTPRoute:
return "HTTPRoute"
case Ingress:
return "Ingress"
case IngressClass:
return "IngressClass"
case KubernetesGateway:
return "Gateway"
case Lease:
return "Lease"
case MeshConfig:
return "MeshConfig"
case MeshNetworks:
return "MeshNetworks"
case MutatingWebhookConfiguration:
return "MutatingWebhookConfiguration"
case Namespace:
return "Namespace"
case Node:
return "Node"
case PeerAuthentication:
return "PeerAuthentication"
case Pod:
return "Pod"
case ProxyConfig:
return "ProxyConfig"
case ReferenceGrant:
return "ReferenceGrant"
case RequestAuthentication:
return "RequestAuthentication"
case Secret:
return "Secret"
case Service:
return "Service"
case ServiceAccount:
return "ServiceAccount"
case ServiceEntry:
return "ServiceEntry"
case Sidecar:
return "Sidecar"
case StatefulSet:
return "StatefulSet"
case TCPRoute:
return "TCPRoute"
case TLSRoute:
return "TLSRoute"
case Telemetry:
return "Telemetry"
case UDPRoute:
return "UDPRoute"
case ValidatingWebhookConfiguration:
return "ValidatingWebhookConfiguration"
case VirtualService:
return "VirtualService"
case WasmPlugin:
return "WasmPlugin"
case WorkloadEntry:
return "WorkloadEntry"
case WorkloadGroup:
return "WorkloadGroup"
default:
return "Unknown"
}
}
func MustFromGVK(g config.GroupVersionKind) Kind {
switch g {
case gvk.AuthorizationPolicy:
return AuthorizationPolicy
case gvk.CertificateSigningRequest:
return CertificateSigningRequest
case gvk.ConfigMap:
return ConfigMap
case gvk.CustomResourceDefinition:
return CustomResourceDefinition
case gvk.DaemonSet:
return DaemonSet
case gvk.Deployment:
return Deployment
case gvk.DestinationRule:
return DestinationRule
case gvk.EndpointSlice:
return EndpointSlice
case gvk.Endpoints:
return Endpoints
case gvk.EnvoyFilter:
return EnvoyFilter
case gvk.GRPCRoute:
return GRPCRoute
case gvk.Gateway:
return Gateway
case gvk.GatewayClass:
return GatewayClass
case gvk.HTTPRoute:
return HTTPRoute
case gvk.Ingress:
return Ingress
case gvk.IngressClass:
return IngressClass
case gvk.KubernetesGateway:
return KubernetesGateway
case gvk.Lease:
return Lease
case gvk.MeshConfig:
return MeshConfig
case gvk.MeshNetworks:
return MeshNetworks
case gvk.MutatingWebhookConfiguration:
return MutatingWebhookConfiguration
case gvk.Namespace:
return Namespace
case gvk.Node:
return Node
case gvk.PeerAuthentication:
return PeerAuthentication
case gvk.Pod:
return Pod
case gvk.ProxyConfig:
return ProxyConfig
case gvk.ReferenceGrant:
return ReferenceGrant
case gvk.RequestAuthentication:
return RequestAuthentication
case gvk.Secret:
return Secret
case gvk.Service:
return Service
case gvk.ServiceAccount:
return ServiceAccount
case gvk.ServiceEntry:
return ServiceEntry
case gvk.Sidecar:
return Sidecar
case gvk.StatefulSet:
return StatefulSet
case gvk.TCPRoute:
return TCPRoute
case gvk.TLSRoute:
return TLSRoute
case gvk.Telemetry:
return Telemetry
case gvk.UDPRoute:
return UDPRoute
case gvk.ValidatingWebhookConfiguration:
return ValidatingWebhookConfiguration
case gvk.VirtualService:
return VirtualService
case gvk.WasmPlugin:
return WasmPlugin
case gvk.WorkloadEntry:
return WorkloadEntry
case gvk.WorkloadGroup:
return WorkloadGroup
}
panic("unknown kind: " + g.String())
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubeclient
import (
"context"
kubeext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/metadata"
"k8s.io/client-go/tools/cache"
gatewayapiclient "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned"
istioclient "istio.io/client-go/pkg/clientset/versioned"
"istio.io/istio/pilot/pkg/util/informermetric"
"istio.io/istio/pkg/config/schema/kubetypes"
"istio.io/istio/pkg/kube/informerfactory"
ktypes "istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/log"
)
type ClientGetter interface {
// Ext returns the API extensions client.
Ext() kubeext.Interface
// Kube returns the core kube client
Kube() kubernetes.Interface
// Dynamic client.
Dynamic() dynamic.Interface
// Metadata returns the Metadata kube client.
Metadata() metadata.Interface
// Istio returns the Istio kube client.
Istio() istioclient.Interface
// GatewayAPI returns the gateway-api kube client.
GatewayAPI() gatewayapiclient.Interface
// Informers returns an informer factory.
Informers() informerfactory.InformerFactory
}
func GetInformerFiltered[T runtime.Object](c ClientGetter, opts ktypes.InformerOptions) informerfactory.StartableInformer {
return GetInformerFilteredFromGVR(c, opts, kubetypes.GetGVR[T]())
}
func GetInformerFilteredFromGVR(c ClientGetter, opts ktypes.InformerOptions, g schema.GroupVersionResource) informerfactory.StartableInformer {
switch opts.InformerType {
case ktypes.DynamicInformer:
return getInformerFilteredDynamic(c, opts, g)
case ktypes.MetadataInformer:
return getInformerFilteredMetadata(c, opts, g)
default:
return getInformerFiltered(c, opts, g)
}
}
func getInformerFilteredDynamic(c ClientGetter, opts ktypes.InformerOptions, g schema.GroupVersionResource) informerfactory.StartableInformer {
return c.Informers().InformerFor(g, opts, func() cache.SharedIndexInformer {
inf := cache.NewSharedIndexInformerWithOptions(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = opts.FieldSelector
options.LabelSelector = opts.LabelSelector
return c.Dynamic().Resource(g).Namespace(opts.Namespace).List(context.Background(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = opts.FieldSelector
options.LabelSelector = opts.LabelSelector
return c.Dynamic().Resource(g).Namespace(opts.Namespace).Watch(context.Background(), options)
},
},
&unstructured.Unstructured{},
cache.SharedIndexInformerOptions{
ResyncPeriod: 0,
Indexers: cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
ObjectDescription: g.String(),
},
)
setupInformer(opts, inf)
return inf
})
}
func getInformerFilteredMetadata(c ClientGetter, opts ktypes.InformerOptions, g schema.GroupVersionResource) informerfactory.StartableInformer {
return c.Informers().InformerFor(g, opts, func() cache.SharedIndexInformer {
inf := cache.NewSharedIndexInformerWithOptions(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = opts.FieldSelector
options.LabelSelector = opts.LabelSelector
return c.Metadata().Resource(g).Namespace(opts.Namespace).List(context.Background(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = opts.FieldSelector
options.LabelSelector = opts.LabelSelector
return c.Metadata().Resource(g).Namespace(opts.Namespace).Watch(context.Background(), options)
},
},
&metav1.PartialObjectMetadata{},
cache.SharedIndexInformerOptions{
ResyncPeriod: 0,
Indexers: cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
ObjectDescription: g.String(),
},
)
setupInformer(opts, inf)
return inf
})
}
func setupInformer(opts ktypes.InformerOptions, inf cache.SharedIndexInformer) {
// It is important to set this in the newFunc rather than after InformerFor to avoid
// https://github.com/kubernetes/kubernetes/issues/117869
if opts.ObjectTransform != nil {
_ = inf.SetTransform(opts.ObjectTransform)
} else {
_ = inf.SetTransform(stripUnusedFields)
}
if err := inf.SetWatchErrorHandler(informermetric.ErrorHandlerForCluster(opts.Cluster)); err != nil {
log.Debugf("failed to set watch handler, informer may already be started: %v", err)
}
}
// stripUnusedFields is the transform function for shared informers,
// it removes unused fields from objects before they are stored in the cache to save memory.
func stripUnusedFields(obj any) (any, error) {
t, ok := obj.(metav1.ObjectMetaAccessor)
if !ok {
// shouldn't happen
return obj, nil
}
// ManagedFields is large and we never use it
t.GetObjectMeta().SetManagedFields(nil)
return obj, nil
}
// Code generated by pkg/config/schema/codegen/tools/collections.main.go. DO NOT EDIT.
package kubeclient
import (
"context"
"fmt"
k8sioapiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1"
k8sioapiappsv1 "k8s.io/api/apps/v1"
k8sioapicertificatesv1 "k8s.io/api/certificates/v1"
k8sioapicoordinationv1 "k8s.io/api/coordination/v1"
k8sioapicorev1 "k8s.io/api/core/v1"
k8sioapidiscoveryv1 "k8s.io/api/discovery/v1"
k8sioapinetworkingv1 "k8s.io/api/networking/v1"
k8sioapiextensionsapiserverpkgapisapiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/tools/cache"
sigsk8siogatewayapiapisv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2"
sigsk8siogatewayapiapisv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
apiistioioapiextensionsv1alpha1 "istio.io/client-go/pkg/apis/extensions/v1alpha1"
apiistioioapinetworkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
apiistioioapinetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1"
apiistioioapisecurityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1"
apiistioioapitelemetryv1alpha1 "istio.io/client-go/pkg/apis/telemetry/v1alpha1"
"istio.io/istio/pkg/config/schema/gvr"
"istio.io/istio/pkg/kube/informerfactory"
ktypes "istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/ptr"
)
func GetWriteClient[T runtime.Object](c ClientGetter, namespace string) ktypes.WriteAPI[T] {
switch any(ptr.Empty[T]()).(type) {
case *apiistioioapisecurityv1beta1.AuthorizationPolicy:
return c.Istio().SecurityV1beta1().AuthorizationPolicies(namespace).(ktypes.WriteAPI[T])
case *k8sioapicertificatesv1.CertificateSigningRequest:
return c.Kube().CertificatesV1().CertificateSigningRequests().(ktypes.WriteAPI[T])
case *k8sioapicorev1.ConfigMap:
return c.Kube().CoreV1().ConfigMaps(namespace).(ktypes.WriteAPI[T])
case *k8sioapiextensionsapiserverpkgapisapiextensionsv1.CustomResourceDefinition:
return c.Ext().ApiextensionsV1().CustomResourceDefinitions().(ktypes.WriteAPI[T])
case *k8sioapiappsv1.DaemonSet:
return c.Kube().AppsV1().DaemonSets(namespace).(ktypes.WriteAPI[T])
case *k8sioapiappsv1.Deployment:
return c.Kube().AppsV1().Deployments(namespace).(ktypes.WriteAPI[T])
case *apiistioioapinetworkingv1alpha3.DestinationRule:
return c.Istio().NetworkingV1alpha3().DestinationRules(namespace).(ktypes.WriteAPI[T])
case *k8sioapidiscoveryv1.EndpointSlice:
return c.Kube().DiscoveryV1().EndpointSlices(namespace).(ktypes.WriteAPI[T])
case *k8sioapicorev1.Endpoints:
return c.Kube().CoreV1().Endpoints(namespace).(ktypes.WriteAPI[T])
case *apiistioioapinetworkingv1alpha3.EnvoyFilter:
return c.Istio().NetworkingV1alpha3().EnvoyFilters(namespace).(ktypes.WriteAPI[T])
case *sigsk8siogatewayapiapisv1alpha2.GRPCRoute:
return c.GatewayAPI().GatewayV1alpha2().GRPCRoutes(namespace).(ktypes.WriteAPI[T])
case *apiistioioapinetworkingv1alpha3.Gateway:
return c.Istio().NetworkingV1alpha3().Gateways(namespace).(ktypes.WriteAPI[T])
case *sigsk8siogatewayapiapisv1beta1.GatewayClass:
return c.GatewayAPI().GatewayV1beta1().GatewayClasses().(ktypes.WriteAPI[T])
case *sigsk8siogatewayapiapisv1beta1.HTTPRoute:
return c.GatewayAPI().GatewayV1beta1().HTTPRoutes(namespace).(ktypes.WriteAPI[T])
case *k8sioapinetworkingv1.Ingress:
return c.Kube().NetworkingV1().Ingresses(namespace).(ktypes.WriteAPI[T])
case *k8sioapinetworkingv1.IngressClass:
return c.Kube().NetworkingV1().IngressClasses().(ktypes.WriteAPI[T])
case *sigsk8siogatewayapiapisv1beta1.Gateway:
return c.GatewayAPI().GatewayV1beta1().Gateways(namespace).(ktypes.WriteAPI[T])
case *k8sioapicoordinationv1.Lease:
return c.Kube().CoordinationV1().Leases(namespace).(ktypes.WriteAPI[T])
case *k8sioapiadmissionregistrationv1.MutatingWebhookConfiguration:
return c.Kube().AdmissionregistrationV1().MutatingWebhookConfigurations().(ktypes.WriteAPI[T])
case *k8sioapicorev1.Namespace:
return c.Kube().CoreV1().Namespaces().(ktypes.WriteAPI[T])
case *k8sioapicorev1.Node:
return c.Kube().CoreV1().Nodes().(ktypes.WriteAPI[T])
case *apiistioioapisecurityv1beta1.PeerAuthentication:
return c.Istio().SecurityV1beta1().PeerAuthentications(namespace).(ktypes.WriteAPI[T])
case *k8sioapicorev1.Pod:
return c.Kube().CoreV1().Pods(namespace).(ktypes.WriteAPI[T])
case *apiistioioapinetworkingv1beta1.ProxyConfig:
return c.Istio().NetworkingV1beta1().ProxyConfigs(namespace).(ktypes.WriteAPI[T])
case *sigsk8siogatewayapiapisv1beta1.ReferenceGrant:
return c.GatewayAPI().GatewayV1beta1().ReferenceGrants(namespace).(ktypes.WriteAPI[T])
case *apiistioioapisecurityv1beta1.RequestAuthentication:
return c.Istio().SecurityV1beta1().RequestAuthentications(namespace).(ktypes.WriteAPI[T])
case *k8sioapicorev1.Secret:
return c.Kube().CoreV1().Secrets(namespace).(ktypes.WriteAPI[T])
case *k8sioapicorev1.Service:
return c.Kube().CoreV1().Services(namespace).(ktypes.WriteAPI[T])
case *k8sioapicorev1.ServiceAccount:
return c.Kube().CoreV1().ServiceAccounts(namespace).(ktypes.WriteAPI[T])
case *apiistioioapinetworkingv1alpha3.ServiceEntry:
return c.Istio().NetworkingV1alpha3().ServiceEntries(namespace).(ktypes.WriteAPI[T])
case *apiistioioapinetworkingv1alpha3.Sidecar:
return c.Istio().NetworkingV1alpha3().Sidecars(namespace).(ktypes.WriteAPI[T])
case *k8sioapiappsv1.StatefulSet:
return c.Kube().AppsV1().StatefulSets(namespace).(ktypes.WriteAPI[T])
case *sigsk8siogatewayapiapisv1alpha2.TCPRoute:
return c.GatewayAPI().GatewayV1alpha2().TCPRoutes(namespace).(ktypes.WriteAPI[T])
case *sigsk8siogatewayapiapisv1alpha2.TLSRoute:
return c.GatewayAPI().GatewayV1alpha2().TLSRoutes(namespace).(ktypes.WriteAPI[T])
case *apiistioioapitelemetryv1alpha1.Telemetry:
return c.Istio().TelemetryV1alpha1().Telemetries(namespace).(ktypes.WriteAPI[T])
case *sigsk8siogatewayapiapisv1alpha2.UDPRoute:
return c.GatewayAPI().GatewayV1alpha2().UDPRoutes(namespace).(ktypes.WriteAPI[T])
case *k8sioapiadmissionregistrationv1.ValidatingWebhookConfiguration:
return c.Kube().AdmissionregistrationV1().ValidatingWebhookConfigurations().(ktypes.WriteAPI[T])
case *apiistioioapinetworkingv1alpha3.VirtualService:
return c.Istio().NetworkingV1alpha3().VirtualServices(namespace).(ktypes.WriteAPI[T])
case *apiistioioapiextensionsv1alpha1.WasmPlugin:
return c.Istio().ExtensionsV1alpha1().WasmPlugins(namespace).(ktypes.WriteAPI[T])
case *apiistioioapinetworkingv1alpha3.WorkloadEntry:
return c.Istio().NetworkingV1alpha3().WorkloadEntries(namespace).(ktypes.WriteAPI[T])
case *apiistioioapinetworkingv1alpha3.WorkloadGroup:
return c.Istio().NetworkingV1alpha3().WorkloadGroups(namespace).(ktypes.WriteAPI[T])
default:
panic(fmt.Sprintf("Unknown type %T", ptr.Empty[T]()))
}
}
func GetClient[T, TL runtime.Object](c ClientGetter, namespace string) ktypes.ReadWriteAPI[T, TL] {
switch any(ptr.Empty[T]()).(type) {
case *apiistioioapisecurityv1beta1.AuthorizationPolicy:
return c.Istio().SecurityV1beta1().AuthorizationPolicies(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapicertificatesv1.CertificateSigningRequest:
return c.Kube().CertificatesV1().CertificateSigningRequests().(ktypes.ReadWriteAPI[T, TL])
case *k8sioapicorev1.ConfigMap:
return c.Kube().CoreV1().ConfigMaps(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapiextensionsapiserverpkgapisapiextensionsv1.CustomResourceDefinition:
return c.Ext().ApiextensionsV1().CustomResourceDefinitions().(ktypes.ReadWriteAPI[T, TL])
case *k8sioapiappsv1.DaemonSet:
return c.Kube().AppsV1().DaemonSets(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapiappsv1.Deployment:
return c.Kube().AppsV1().Deployments(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapinetworkingv1alpha3.DestinationRule:
return c.Istio().NetworkingV1alpha3().DestinationRules(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapidiscoveryv1.EndpointSlice:
return c.Kube().DiscoveryV1().EndpointSlices(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapicorev1.Endpoints:
return c.Kube().CoreV1().Endpoints(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapinetworkingv1alpha3.EnvoyFilter:
return c.Istio().NetworkingV1alpha3().EnvoyFilters(namespace).(ktypes.ReadWriteAPI[T, TL])
case *sigsk8siogatewayapiapisv1alpha2.GRPCRoute:
return c.GatewayAPI().GatewayV1alpha2().GRPCRoutes(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapinetworkingv1alpha3.Gateway:
return c.Istio().NetworkingV1alpha3().Gateways(namespace).(ktypes.ReadWriteAPI[T, TL])
case *sigsk8siogatewayapiapisv1beta1.GatewayClass:
return c.GatewayAPI().GatewayV1beta1().GatewayClasses().(ktypes.ReadWriteAPI[T, TL])
case *sigsk8siogatewayapiapisv1beta1.HTTPRoute:
return c.GatewayAPI().GatewayV1beta1().HTTPRoutes(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapinetworkingv1.Ingress:
return c.Kube().NetworkingV1().Ingresses(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapinetworkingv1.IngressClass:
return c.Kube().NetworkingV1().IngressClasses().(ktypes.ReadWriteAPI[T, TL])
case *sigsk8siogatewayapiapisv1beta1.Gateway:
return c.GatewayAPI().GatewayV1beta1().Gateways(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapicoordinationv1.Lease:
return c.Kube().CoordinationV1().Leases(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapiadmissionregistrationv1.MutatingWebhookConfiguration:
return c.Kube().AdmissionregistrationV1().MutatingWebhookConfigurations().(ktypes.ReadWriteAPI[T, TL])
case *k8sioapicorev1.Namespace:
return c.Kube().CoreV1().Namespaces().(ktypes.ReadWriteAPI[T, TL])
case *k8sioapicorev1.Node:
return c.Kube().CoreV1().Nodes().(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapisecurityv1beta1.PeerAuthentication:
return c.Istio().SecurityV1beta1().PeerAuthentications(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapicorev1.Pod:
return c.Kube().CoreV1().Pods(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapinetworkingv1beta1.ProxyConfig:
return c.Istio().NetworkingV1beta1().ProxyConfigs(namespace).(ktypes.ReadWriteAPI[T, TL])
case *sigsk8siogatewayapiapisv1beta1.ReferenceGrant:
return c.GatewayAPI().GatewayV1beta1().ReferenceGrants(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapisecurityv1beta1.RequestAuthentication:
return c.Istio().SecurityV1beta1().RequestAuthentications(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapicorev1.Secret:
return c.Kube().CoreV1().Secrets(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapicorev1.Service:
return c.Kube().CoreV1().Services(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapicorev1.ServiceAccount:
return c.Kube().CoreV1().ServiceAccounts(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapinetworkingv1alpha3.ServiceEntry:
return c.Istio().NetworkingV1alpha3().ServiceEntries(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapinetworkingv1alpha3.Sidecar:
return c.Istio().NetworkingV1alpha3().Sidecars(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapiappsv1.StatefulSet:
return c.Kube().AppsV1().StatefulSets(namespace).(ktypes.ReadWriteAPI[T, TL])
case *sigsk8siogatewayapiapisv1alpha2.TCPRoute:
return c.GatewayAPI().GatewayV1alpha2().TCPRoutes(namespace).(ktypes.ReadWriteAPI[T, TL])
case *sigsk8siogatewayapiapisv1alpha2.TLSRoute:
return c.GatewayAPI().GatewayV1alpha2().TLSRoutes(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapitelemetryv1alpha1.Telemetry:
return c.Istio().TelemetryV1alpha1().Telemetries(namespace).(ktypes.ReadWriteAPI[T, TL])
case *sigsk8siogatewayapiapisv1alpha2.UDPRoute:
return c.GatewayAPI().GatewayV1alpha2().UDPRoutes(namespace).(ktypes.ReadWriteAPI[T, TL])
case *k8sioapiadmissionregistrationv1.ValidatingWebhookConfiguration:
return c.Kube().AdmissionregistrationV1().ValidatingWebhookConfigurations().(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapinetworkingv1alpha3.VirtualService:
return c.Istio().NetworkingV1alpha3().VirtualServices(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapiextensionsv1alpha1.WasmPlugin:
return c.Istio().ExtensionsV1alpha1().WasmPlugins(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapinetworkingv1alpha3.WorkloadEntry:
return c.Istio().NetworkingV1alpha3().WorkloadEntries(namespace).(ktypes.ReadWriteAPI[T, TL])
case *apiistioioapinetworkingv1alpha3.WorkloadGroup:
return c.Istio().NetworkingV1alpha3().WorkloadGroups(namespace).(ktypes.ReadWriteAPI[T, TL])
default:
panic(fmt.Sprintf("Unknown type %T", ptr.Empty[T]()))
}
}
func gvrToObject(g schema.GroupVersionResource) runtime.Object {
switch g {
case gvr.AuthorizationPolicy:
return &apiistioioapisecurityv1beta1.AuthorizationPolicy{}
case gvr.CertificateSigningRequest:
return &k8sioapicertificatesv1.CertificateSigningRequest{}
case gvr.ConfigMap:
return &k8sioapicorev1.ConfigMap{}
case gvr.CustomResourceDefinition:
return &k8sioapiextensionsapiserverpkgapisapiextensionsv1.CustomResourceDefinition{}
case gvr.DaemonSet:
return &k8sioapiappsv1.DaemonSet{}
case gvr.Deployment:
return &k8sioapiappsv1.Deployment{}
case gvr.DestinationRule:
return &apiistioioapinetworkingv1alpha3.DestinationRule{}
case gvr.EndpointSlice:
return &k8sioapidiscoveryv1.EndpointSlice{}
case gvr.Endpoints:
return &k8sioapicorev1.Endpoints{}
case gvr.EnvoyFilter:
return &apiistioioapinetworkingv1alpha3.EnvoyFilter{}
case gvr.GRPCRoute:
return &sigsk8siogatewayapiapisv1alpha2.GRPCRoute{}
case gvr.Gateway:
return &apiistioioapinetworkingv1alpha3.Gateway{}
case gvr.GatewayClass:
return &sigsk8siogatewayapiapisv1beta1.GatewayClass{}
case gvr.HTTPRoute:
return &sigsk8siogatewayapiapisv1beta1.HTTPRoute{}
case gvr.Ingress:
return &k8sioapinetworkingv1.Ingress{}
case gvr.IngressClass:
return &k8sioapinetworkingv1.IngressClass{}
case gvr.KubernetesGateway:
return &sigsk8siogatewayapiapisv1beta1.Gateway{}
case gvr.Lease:
return &k8sioapicoordinationv1.Lease{}
case gvr.MutatingWebhookConfiguration:
return &k8sioapiadmissionregistrationv1.MutatingWebhookConfiguration{}
case gvr.Namespace:
return &k8sioapicorev1.Namespace{}
case gvr.Node:
return &k8sioapicorev1.Node{}
case gvr.PeerAuthentication:
return &apiistioioapisecurityv1beta1.PeerAuthentication{}
case gvr.Pod:
return &k8sioapicorev1.Pod{}
case gvr.ProxyConfig:
return &apiistioioapinetworkingv1beta1.ProxyConfig{}
case gvr.ReferenceGrant:
return &sigsk8siogatewayapiapisv1beta1.ReferenceGrant{}
case gvr.RequestAuthentication:
return &apiistioioapisecurityv1beta1.RequestAuthentication{}
case gvr.Secret:
return &k8sioapicorev1.Secret{}
case gvr.Service:
return &k8sioapicorev1.Service{}
case gvr.ServiceAccount:
return &k8sioapicorev1.ServiceAccount{}
case gvr.ServiceEntry:
return &apiistioioapinetworkingv1alpha3.ServiceEntry{}
case gvr.Sidecar:
return &apiistioioapinetworkingv1alpha3.Sidecar{}
case gvr.StatefulSet:
return &k8sioapiappsv1.StatefulSet{}
case gvr.TCPRoute:
return &sigsk8siogatewayapiapisv1alpha2.TCPRoute{}
case gvr.TLSRoute:
return &sigsk8siogatewayapiapisv1alpha2.TLSRoute{}
case gvr.Telemetry:
return &apiistioioapitelemetryv1alpha1.Telemetry{}
case gvr.UDPRoute:
return &sigsk8siogatewayapiapisv1alpha2.UDPRoute{}
case gvr.ValidatingWebhookConfiguration:
return &k8sioapiadmissionregistrationv1.ValidatingWebhookConfiguration{}
case gvr.VirtualService:
return &apiistioioapinetworkingv1alpha3.VirtualService{}
case gvr.WasmPlugin:
return &apiistioioapiextensionsv1alpha1.WasmPlugin{}
case gvr.WorkloadEntry:
return &apiistioioapinetworkingv1alpha3.WorkloadEntry{}
case gvr.WorkloadGroup:
return &apiistioioapinetworkingv1alpha3.WorkloadGroup{}
default:
panic(fmt.Sprintf("Unknown type %v", g))
}
}
func getInformerFiltered(c ClientGetter, opts ktypes.InformerOptions, g schema.GroupVersionResource) informerfactory.StartableInformer {
var l func(options metav1.ListOptions) (runtime.Object, error)
var w func(options metav1.ListOptions) (watch.Interface, error)
switch g {
case gvr.AuthorizationPolicy:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().SecurityV1beta1().AuthorizationPolicies(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().SecurityV1beta1().AuthorizationPolicies(opts.Namespace).Watch(context.Background(), options)
}
case gvr.CertificateSigningRequest:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().CertificatesV1().CertificateSigningRequests().List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().CertificatesV1().CertificateSigningRequests().Watch(context.Background(), options)
}
case gvr.ConfigMap:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().CoreV1().ConfigMaps(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().CoreV1().ConfigMaps(opts.Namespace).Watch(context.Background(), options)
}
case gvr.CustomResourceDefinition:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Ext().ApiextensionsV1().CustomResourceDefinitions().List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Ext().ApiextensionsV1().CustomResourceDefinitions().Watch(context.Background(), options)
}
case gvr.DaemonSet:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().AppsV1().DaemonSets(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().AppsV1().DaemonSets(opts.Namespace).Watch(context.Background(), options)
}
case gvr.Deployment:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().AppsV1().Deployments(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().AppsV1().Deployments(opts.Namespace).Watch(context.Background(), options)
}
case gvr.DestinationRule:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().NetworkingV1alpha3().DestinationRules(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().NetworkingV1alpha3().DestinationRules(opts.Namespace).Watch(context.Background(), options)
}
case gvr.EndpointSlice:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().DiscoveryV1().EndpointSlices(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().DiscoveryV1().EndpointSlices(opts.Namespace).Watch(context.Background(), options)
}
case gvr.Endpoints:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().CoreV1().Endpoints(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().CoreV1().Endpoints(opts.Namespace).Watch(context.Background(), options)
}
case gvr.EnvoyFilter:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().NetworkingV1alpha3().EnvoyFilters(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().NetworkingV1alpha3().EnvoyFilters(opts.Namespace).Watch(context.Background(), options)
}
case gvr.GRPCRoute:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.GatewayAPI().GatewayV1alpha2().GRPCRoutes(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.GatewayAPI().GatewayV1alpha2().GRPCRoutes(opts.Namespace).Watch(context.Background(), options)
}
case gvr.Gateway:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().NetworkingV1alpha3().Gateways(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().NetworkingV1alpha3().Gateways(opts.Namespace).Watch(context.Background(), options)
}
case gvr.GatewayClass:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.GatewayAPI().GatewayV1beta1().GatewayClasses().List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.GatewayAPI().GatewayV1beta1().GatewayClasses().Watch(context.Background(), options)
}
case gvr.HTTPRoute:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.GatewayAPI().GatewayV1beta1().HTTPRoutes(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.GatewayAPI().GatewayV1beta1().HTTPRoutes(opts.Namespace).Watch(context.Background(), options)
}
case gvr.Ingress:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().NetworkingV1().Ingresses(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().NetworkingV1().Ingresses(opts.Namespace).Watch(context.Background(), options)
}
case gvr.IngressClass:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().NetworkingV1().IngressClasses().List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().NetworkingV1().IngressClasses().Watch(context.Background(), options)
}
case gvr.KubernetesGateway:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.GatewayAPI().GatewayV1beta1().Gateways(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.GatewayAPI().GatewayV1beta1().Gateways(opts.Namespace).Watch(context.Background(), options)
}
case gvr.Lease:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().CoordinationV1().Leases(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().CoordinationV1().Leases(opts.Namespace).Watch(context.Background(), options)
}
case gvr.MutatingWebhookConfiguration:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().AdmissionregistrationV1().MutatingWebhookConfigurations().List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().AdmissionregistrationV1().MutatingWebhookConfigurations().Watch(context.Background(), options)
}
case gvr.Namespace:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().CoreV1().Namespaces().List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().CoreV1().Namespaces().Watch(context.Background(), options)
}
case gvr.Node:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().CoreV1().Nodes().List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().CoreV1().Nodes().Watch(context.Background(), options)
}
case gvr.PeerAuthentication:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().SecurityV1beta1().PeerAuthentications(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().SecurityV1beta1().PeerAuthentications(opts.Namespace).Watch(context.Background(), options)
}
case gvr.Pod:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().CoreV1().Pods(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().CoreV1().Pods(opts.Namespace).Watch(context.Background(), options)
}
case gvr.ProxyConfig:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().NetworkingV1beta1().ProxyConfigs(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().NetworkingV1beta1().ProxyConfigs(opts.Namespace).Watch(context.Background(), options)
}
case gvr.ReferenceGrant:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.GatewayAPI().GatewayV1beta1().ReferenceGrants(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.GatewayAPI().GatewayV1beta1().ReferenceGrants(opts.Namespace).Watch(context.Background(), options)
}
case gvr.RequestAuthentication:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().SecurityV1beta1().RequestAuthentications(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().SecurityV1beta1().RequestAuthentications(opts.Namespace).Watch(context.Background(), options)
}
case gvr.Secret:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().CoreV1().Secrets(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().CoreV1().Secrets(opts.Namespace).Watch(context.Background(), options)
}
case gvr.Service:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().CoreV1().Services(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().CoreV1().Services(opts.Namespace).Watch(context.Background(), options)
}
case gvr.ServiceAccount:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().CoreV1().ServiceAccounts(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().CoreV1().ServiceAccounts(opts.Namespace).Watch(context.Background(), options)
}
case gvr.ServiceEntry:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().NetworkingV1alpha3().ServiceEntries(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().NetworkingV1alpha3().ServiceEntries(opts.Namespace).Watch(context.Background(), options)
}
case gvr.Sidecar:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().NetworkingV1alpha3().Sidecars(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().NetworkingV1alpha3().Sidecars(opts.Namespace).Watch(context.Background(), options)
}
case gvr.StatefulSet:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().AppsV1().StatefulSets(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().AppsV1().StatefulSets(opts.Namespace).Watch(context.Background(), options)
}
case gvr.TCPRoute:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.GatewayAPI().GatewayV1alpha2().TCPRoutes(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.GatewayAPI().GatewayV1alpha2().TCPRoutes(opts.Namespace).Watch(context.Background(), options)
}
case gvr.TLSRoute:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.GatewayAPI().GatewayV1alpha2().TLSRoutes(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.GatewayAPI().GatewayV1alpha2().TLSRoutes(opts.Namespace).Watch(context.Background(), options)
}
case gvr.Telemetry:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().TelemetryV1alpha1().Telemetries(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().TelemetryV1alpha1().Telemetries(opts.Namespace).Watch(context.Background(), options)
}
case gvr.UDPRoute:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.GatewayAPI().GatewayV1alpha2().UDPRoutes(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.GatewayAPI().GatewayV1alpha2().UDPRoutes(opts.Namespace).Watch(context.Background(), options)
}
case gvr.ValidatingWebhookConfiguration:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Kube().AdmissionregistrationV1().ValidatingWebhookConfigurations().List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Kube().AdmissionregistrationV1().ValidatingWebhookConfigurations().Watch(context.Background(), options)
}
case gvr.VirtualService:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().NetworkingV1alpha3().VirtualServices(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().NetworkingV1alpha3().VirtualServices(opts.Namespace).Watch(context.Background(), options)
}
case gvr.WasmPlugin:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().ExtensionsV1alpha1().WasmPlugins(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().ExtensionsV1alpha1().WasmPlugins(opts.Namespace).Watch(context.Background(), options)
}
case gvr.WorkloadEntry:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().NetworkingV1alpha3().WorkloadEntries(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().NetworkingV1alpha3().WorkloadEntries(opts.Namespace).Watch(context.Background(), options)
}
case gvr.WorkloadGroup:
l = func(options metav1.ListOptions) (runtime.Object, error) {
return c.Istio().NetworkingV1alpha3().WorkloadGroups(opts.Namespace).List(context.Background(), options)
}
w = func(options metav1.ListOptions) (watch.Interface, error) {
return c.Istio().NetworkingV1alpha3().WorkloadGroups(opts.Namespace).Watch(context.Background(), options)
}
default:
panic(fmt.Sprintf("Unknown type %v", g))
}
return c.Informers().InformerFor(g, opts, func() cache.SharedIndexInformer {
inf := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = opts.FieldSelector
options.LabelSelector = opts.LabelSelector
return l(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = opts.FieldSelector
options.LabelSelector = opts.LabelSelector
return w(options)
},
},
gvrToObject(g),
0,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
setupInformer(opts, inf)
return inf
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubetypes
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/ptr"
)
func GetGVR[T runtime.Object]() schema.GroupVersionResource {
gk := GetGVK[T]()
gr, ok := gvk.ToGVR(gk)
if !ok {
panic(fmt.Sprintf("unknown GVR for GVK %v", gk))
}
return gr
}
func GetGVK[T runtime.Object]() config.GroupVersionKind {
return getGvk(ptr.Empty[T]())
}
func GvkFromObject(obj runtime.Object) config.GroupVersionKind {
return getGvk(obj)
}
// Code generated by pkg/config/schema/codegen/tools/collections.main.go. DO NOT EDIT.
package kubetypes
import (
"fmt"
k8sioapiadmissionregistrationv1 "k8s.io/api/admissionregistration/v1"
k8sioapiappsv1 "k8s.io/api/apps/v1"
k8sioapicertificatesv1 "k8s.io/api/certificates/v1"
k8sioapicoordinationv1 "k8s.io/api/coordination/v1"
k8sioapicorev1 "k8s.io/api/core/v1"
k8sioapidiscoveryv1 "k8s.io/api/discovery/v1"
k8sioapinetworkingv1 "k8s.io/api/networking/v1"
k8sioapiextensionsapiserverpkgapisapiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
sigsk8siogatewayapiapisv1alpha2 "sigs.k8s.io/gateway-api/apis/v1alpha2"
sigsk8siogatewayapiapisv1beta1 "sigs.k8s.io/gateway-api/apis/v1beta1"
istioioapiextensionsv1alpha1 "istio.io/api/extensions/v1alpha1"
istioioapimeshv1alpha1 "istio.io/api/mesh/v1alpha1"
istioioapinetworkingv1alpha3 "istio.io/api/networking/v1alpha3"
istioioapinetworkingv1beta1 "istio.io/api/networking/v1beta1"
istioioapisecurityv1beta1 "istio.io/api/security/v1beta1"
istioioapitelemetryv1alpha1 "istio.io/api/telemetry/v1alpha1"
apiistioioapiextensionsv1alpha1 "istio.io/client-go/pkg/apis/extensions/v1alpha1"
apiistioioapinetworkingv1alpha3 "istio.io/client-go/pkg/apis/networking/v1alpha3"
apiistioioapinetworkingv1beta1 "istio.io/client-go/pkg/apis/networking/v1beta1"
apiistioioapisecurityv1beta1 "istio.io/client-go/pkg/apis/security/v1beta1"
apiistioioapitelemetryv1alpha1 "istio.io/client-go/pkg/apis/telemetry/v1alpha1"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvk"
)
func getGvk(obj any) config.GroupVersionKind {
switch obj.(type) {
case *istioioapisecurityv1beta1.AuthorizationPolicy:
return gvk.AuthorizationPolicy
case *apiistioioapisecurityv1beta1.AuthorizationPolicy:
return gvk.AuthorizationPolicy
case *k8sioapicertificatesv1.CertificateSigningRequest:
return gvk.CertificateSigningRequest
case *k8sioapicorev1.ConfigMap:
return gvk.ConfigMap
case *k8sioapiextensionsapiserverpkgapisapiextensionsv1.CustomResourceDefinition:
return gvk.CustomResourceDefinition
case *k8sioapiappsv1.DaemonSet:
return gvk.DaemonSet
case *k8sioapiappsv1.Deployment:
return gvk.Deployment
case *istioioapinetworkingv1alpha3.DestinationRule:
return gvk.DestinationRule
case *apiistioioapinetworkingv1alpha3.DestinationRule:
return gvk.DestinationRule
case *k8sioapidiscoveryv1.EndpointSlice:
return gvk.EndpointSlice
case *k8sioapicorev1.Endpoints:
return gvk.Endpoints
case *istioioapinetworkingv1alpha3.EnvoyFilter:
return gvk.EnvoyFilter
case *apiistioioapinetworkingv1alpha3.EnvoyFilter:
return gvk.EnvoyFilter
case *sigsk8siogatewayapiapisv1alpha2.GRPCRoute:
return gvk.GRPCRoute
case *istioioapinetworkingv1alpha3.Gateway:
return gvk.Gateway
case *apiistioioapinetworkingv1alpha3.Gateway:
return gvk.Gateway
case *sigsk8siogatewayapiapisv1beta1.GatewayClass:
return gvk.GatewayClass
case *sigsk8siogatewayapiapisv1beta1.HTTPRoute:
return gvk.HTTPRoute
case *k8sioapinetworkingv1.Ingress:
return gvk.Ingress
case *k8sioapinetworkingv1.IngressClass:
return gvk.IngressClass
case *sigsk8siogatewayapiapisv1beta1.Gateway:
return gvk.KubernetesGateway
case *k8sioapicoordinationv1.Lease:
return gvk.Lease
case *istioioapimeshv1alpha1.MeshConfig:
return gvk.MeshConfig
case *istioioapimeshv1alpha1.MeshNetworks:
return gvk.MeshNetworks
case *k8sioapiadmissionregistrationv1.MutatingWebhookConfiguration:
return gvk.MutatingWebhookConfiguration
case *k8sioapicorev1.Namespace:
return gvk.Namespace
case *k8sioapicorev1.Node:
return gvk.Node
case *istioioapisecurityv1beta1.PeerAuthentication:
return gvk.PeerAuthentication
case *apiistioioapisecurityv1beta1.PeerAuthentication:
return gvk.PeerAuthentication
case *k8sioapicorev1.Pod:
return gvk.Pod
case *istioioapinetworkingv1beta1.ProxyConfig:
return gvk.ProxyConfig
case *apiistioioapinetworkingv1beta1.ProxyConfig:
return gvk.ProxyConfig
case *sigsk8siogatewayapiapisv1beta1.ReferenceGrant:
return gvk.ReferenceGrant
case *istioioapisecurityv1beta1.RequestAuthentication:
return gvk.RequestAuthentication
case *apiistioioapisecurityv1beta1.RequestAuthentication:
return gvk.RequestAuthentication
case *k8sioapicorev1.Secret:
return gvk.Secret
case *k8sioapicorev1.Service:
return gvk.Service
case *k8sioapicorev1.ServiceAccount:
return gvk.ServiceAccount
case *istioioapinetworkingv1alpha3.ServiceEntry:
return gvk.ServiceEntry
case *apiistioioapinetworkingv1alpha3.ServiceEntry:
return gvk.ServiceEntry
case *istioioapinetworkingv1alpha3.Sidecar:
return gvk.Sidecar
case *apiistioioapinetworkingv1alpha3.Sidecar:
return gvk.Sidecar
case *k8sioapiappsv1.StatefulSet:
return gvk.StatefulSet
case *sigsk8siogatewayapiapisv1alpha2.TCPRoute:
return gvk.TCPRoute
case *sigsk8siogatewayapiapisv1alpha2.TLSRoute:
return gvk.TLSRoute
case *istioioapitelemetryv1alpha1.Telemetry:
return gvk.Telemetry
case *apiistioioapitelemetryv1alpha1.Telemetry:
return gvk.Telemetry
case *sigsk8siogatewayapiapisv1alpha2.UDPRoute:
return gvk.UDPRoute
case *k8sioapiadmissionregistrationv1.ValidatingWebhookConfiguration:
return gvk.ValidatingWebhookConfiguration
case *istioioapinetworkingv1alpha3.VirtualService:
return gvk.VirtualService
case *apiistioioapinetworkingv1alpha3.VirtualService:
return gvk.VirtualService
case *istioioapiextensionsv1alpha1.WasmPlugin:
return gvk.WasmPlugin
case *apiistioioapiextensionsv1alpha1.WasmPlugin:
return gvk.WasmPlugin
case *istioioapinetworkingv1alpha3.WorkloadEntry:
return gvk.WorkloadEntry
case *apiistioioapinetworkingv1alpha3.WorkloadEntry:
return gvk.WorkloadEntry
case *istioioapinetworkingv1alpha3.WorkloadGroup:
return gvk.WorkloadGroup
case *apiistioioapinetworkingv1alpha3.WorkloadGroup:
return gvk.WorkloadGroup
default:
panic(fmt.Sprintf("Unknown type %T", obj))
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package resource
import (
"errors"
"fmt"
"reflect"
"github.com/hashicorp/go-multierror"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"k8s.io/apimachinery/pkg/runtime/schema"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/validation"
)
// Schema for a resource.
type Schema interface {
fmt.Stringer
// GroupVersionKind of the resource. This is the only way to uniquely identify a resource.
GroupVersionKind() config.GroupVersionKind
// GroupVersionResource of the resource.
GroupVersionResource() schema.GroupVersionResource
// IsClusterScoped indicates that this resource is scoped to a particular namespace within a cluster.
IsClusterScoped() bool
// IsBuiltin indicates that this resource is builtin (not a CRD)
IsBuiltin() bool
// Identifier returns a unique identifier for the resource
Identifier() string
// Kind for this resource.
Kind() string
// Plural returns the plural form of the Kind.
Plural() string
// Group for this resource.
Group() string
// Version of this resource.
Version() string
// GroupVersionAliasKinds is the GVK of this resource,
// but the version is from its version aliases to perform version conversion.
GroupVersionAliasKinds() []config.GroupVersionKind
// APIVersion is a utility that returns a k8s API version string of the form "Group/Version".
APIVersion() string
// Proto returns the protocol buffer type name for this resource.
Proto() string
// ProtoPackage returns the golang package for the protobuf resource.
ProtoPackage() string
// NewInstance returns a new instance of the protocol buffer message for this resource.
NewInstance() (config.Spec, error)
// Status returns the associated status of the schema
Status() (config.Status, error)
// StatusKind returns the Kind of the status field. If unset, the field does not support status.
StatusKind() string
StatusPackage() string
// MustNewInstance calls NewInstance and panics if an error occurs.
MustNewInstance() config.Spec
// Validate this schema.
Validate() error
// ValidateConfig validates that the given config message is of the correct type for this schema
// and that the contents are valid.
ValidateConfig(cfg config.Config) (validation.Warning, error)
// Equal is a helper function for testing equality between Schema instances. This supports comparison
// with the cmp library.
Equal(other Schema) bool
}
// Builder for a Schema.
type Builder struct {
// ClusterScoped is true for resource in cluster-level.
ClusterScoped bool
// Synthetic is true for resource that do not actually exist in a cluster
Synthetic bool
// Builtin is true for resources that are builtin (not CRD)
Builtin bool
// Identifier is the unique identifier for the resource
Identifier string
// Kind is the config proto type.
Kind string
// Plural is the type in plural.
Plural string
// Group is the config proto group.
Group string
// Version is the config proto version.
Version string
// VersionAliases is the config proto version aliases.
VersionAliases []string
// Proto refers to the protobuf message type name corresponding to the type
Proto string
StatusProto string
// ReflectType is the type of the go struct
ReflectType reflect.Type
// StatusType is the type of the associated status.
StatusType reflect.Type
// ProtoPackage refers to the name of golang package for the protobuf message.
ProtoPackage string
// StatusPackage refers to the name of the golang status package.
StatusPackage string
// ValidateProto performs validation on protobuf messages based on this schema.
ValidateProto validation.ValidateFunc
}
// Build a Schema instance.
func (b Builder) Build() (Schema, error) {
s := b.BuildNoValidate()
// Validate the schema.
if err := s.Validate(); err != nil {
return nil, err
}
return s, nil
}
// MustBuild calls Build and panics if it fails.
func (b Builder) MustBuild() Schema {
s, err := b.Build()
if err != nil {
panic(fmt.Sprintf("MustBuild: %v", err))
}
return s
}
// BuildNoValidate builds the Schema without checking the fields.
func (b Builder) BuildNoValidate() Schema {
if b.ValidateProto == nil {
b.ValidateProto = validation.EmptyValidate
}
return &schemaImpl{
clusterScoped: b.ClusterScoped,
synthetic: b.Synthetic,
builtin: b.Builtin,
gvk: config.GroupVersionKind{
Group: b.Group,
Version: b.Version,
Kind: b.Kind,
},
plural: b.Plural,
apiVersion: b.Group + "/" + b.Version,
versionAliases: b.VersionAliases,
proto: b.Proto,
goPackage: b.ProtoPackage,
identifier: b.Identifier,
reflectType: b.ReflectType,
validateConfig: b.ValidateProto,
statusType: b.StatusType,
statusPackage: b.StatusPackage,
}
}
type schemaImpl struct {
clusterScoped bool
builtin bool
gvk config.GroupVersionKind
versionAliases []string
plural string
apiVersion string
proto string
goPackage string
validateConfig validation.ValidateFunc
reflectType reflect.Type
statusType reflect.Type
statusPackage string
identifier string
synthetic bool
}
func (s *schemaImpl) GroupVersionKind() config.GroupVersionKind {
return s.gvk
}
func (s *schemaImpl) GroupVersionResource() schema.GroupVersionResource {
return schema.GroupVersionResource{
Group: s.Group(),
Version: s.Version(),
Resource: s.Plural(),
}
}
func (s *schemaImpl) IsClusterScoped() bool {
return s.clusterScoped
}
func (s *schemaImpl) IsBuiltin() bool {
return s.builtin
}
func (s *schemaImpl) Identifier() string {
return s.identifier
}
func (s *schemaImpl) Kind() string {
return s.gvk.Kind
}
func (s *schemaImpl) Plural() string {
return s.plural
}
func (s *schemaImpl) Group() string {
return s.gvk.Group
}
func (s *schemaImpl) Version() string {
return s.gvk.Version
}
func (s *schemaImpl) GroupVersionAliasKinds() []config.GroupVersionKind {
gvks := make([]config.GroupVersionKind, len(s.versionAliases))
for i, va := range s.versionAliases {
gvks[i] = s.gvk
gvks[i].Version = va
}
gvks = append(gvks, s.GroupVersionKind())
return gvks
}
func (s *schemaImpl) APIVersion() string {
return s.apiVersion
}
func (s *schemaImpl) Proto() string {
return s.proto
}
func (s *schemaImpl) ProtoPackage() string {
return s.goPackage
}
func (s *schemaImpl) StatusPackage() string {
return s.statusPackage
}
func (s *schemaImpl) Validate() (err error) {
if !labels.IsDNS1123Label(s.Kind()) {
err = multierror.Append(err, fmt.Errorf("invalid kind: %s", s.Kind()))
}
if !labels.IsDNS1123Label(s.plural) {
err = multierror.Append(err, fmt.Errorf("invalid plural for kind %s: %s", s.Kind(), s.plural))
}
if s.reflectType == nil && getProtoMessageType(s.proto) == nil {
err = multierror.Append(err, fmt.Errorf("proto message or reflect type not found: %v", s.proto))
}
return
}
func (s *schemaImpl) String() string {
return fmt.Sprintf("[Schema](%s, %q, %s)", s.Kind(), s.goPackage, s.proto)
}
func (s *schemaImpl) NewInstance() (config.Spec, error) {
rt := s.reflectType
var instance any
if rt == nil {
// Use proto
t, err := protoMessageType(protoreflect.FullName(s.proto))
if err != nil || t == nil {
return nil, errors.New("failed to find reflect type")
}
instance = t.New().Interface()
} else {
instance = reflect.New(rt).Interface()
}
p, ok := instance.(config.Spec)
if !ok {
return nil, fmt.Errorf(
"newInstance: message is not an instance of config.Spec. kind:%s, type:%v, value:%v",
s.Kind(), rt, instance)
}
return p, nil
}
func (s *schemaImpl) Status() (config.Status, error) {
statTyp := s.statusType
if statTyp == nil {
return nil, errors.New("unknown status type")
}
instance := reflect.New(statTyp).Interface()
p, ok := instance.(config.Status)
if !ok {
return nil, fmt.Errorf("status: statusType not an instance of config.Status. type: %v, value: %v", statTyp, instance)
}
return p, nil
}
func (s *schemaImpl) StatusKind() string {
if s.statusType == nil {
return ""
}
return s.statusType.Name()
}
func (s *schemaImpl) MustNewInstance() config.Spec {
p, err := s.NewInstance()
if err != nil {
panic(err)
}
return p
}
func (s *schemaImpl) ValidateConfig(cfg config.Config) (validation.Warning, error) {
return s.validateConfig(cfg)
}
func (s *schemaImpl) Equal(o Schema) bool {
return s.IsClusterScoped() == o.IsClusterScoped() &&
s.Kind() == o.Kind() &&
s.Plural() == o.Plural() &&
s.Group() == o.Group() &&
s.Version() == o.Version() &&
s.Proto() == o.Proto() &&
s.ProtoPackage() == o.ProtoPackage()
}
// FromKubernetesGVK converts a Kubernetes GVK to an Istio GVK
func FromKubernetesGVK(in *schema.GroupVersionKind) config.GroupVersionKind {
return config.GroupVersionKind{
Group: in.Group,
Version: in.Version,
Kind: in.Kind,
}
}
// getProtoMessageType returns the Go lang type of the proto with the specified name.
func getProtoMessageType(protoMessageName string) reflect.Type {
t, err := protoMessageType(protoreflect.FullName(protoMessageName))
if err != nil || t == nil {
return nil
}
return reflect.TypeOf(t.Zero().Interface())
}
var protoMessageType = protoregistry.GlobalTypes.FindMessageByName
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package security
import (
"fmt"
"net/netip"
"net/url"
"strconv"
"strings"
"unicode"
"github.com/hashicorp/go-multierror"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
// JwksInfo provides values resulting from parsing a jwks URI.
type JwksInfo struct {
Hostname host.Name
Scheme string
Port int
UseSSL bool
}
const (
attrRequestHeader = "request.headers" // header name is surrounded by brackets, e.g. "request.headers[User-Agent]".
attrSrcIP = "source.ip" // supports both single ip and cidr, e.g. "10.1.2.3" or "10.1.0.0/16".
attrRemoteIP = "remote.ip" // original client ip determined from x-forwarded-for or proxy protocol.
attrSrcNamespace = "source.namespace" // e.g. "default".
attrSrcPrincipal = "source.principal" // source identity, e,g, "cluster.local/ns/default/sa/productpage".
attrRequestPrincipal = "request.auth.principal" // authenticated principal of the request.
attrRequestAudiences = "request.auth.audiences" // intended audience(s) for this authentication information.
attrRequestPresenter = "request.auth.presenter" // authorized presenter of the credential.
attrRequestClaims = "request.auth.claims" // claim name is surrounded by brackets, e.g. "request.auth.claims[iss]".
attrDestIP = "destination.ip" // supports both single ip and cidr, e.g. "10.1.2.3" or "10.1.0.0/16".
attrDestPort = "destination.port" // must be in the range [0, 65535].
attrDestLabel = "destination.labels" // label name is surrounded by brackets, e.g. "destination.labels[version]".
attrDestName = "destination.name" // short service name, e.g. "productpage".
attrDestNamespace = "destination.namespace" // e.g. "default".
attrDestUser = "destination.user" // service account, e.g. "bookinfo-productpage".
attrConnSNI = "connection.sni" // server name indication, e.g. "www.example.com".
attrExperimental = "experimental.envoy.filters."
)
// ParseJwksURI parses the input URI and returns the corresponding hostname, port, and whether SSL is used.
// URI must start with "http://" or "https://", which corresponding to "http" or "https" scheme.
// Port number is extracted from URI if available (i.e from postfix :<port>, eg. ":80"), or assigned
// to a default value based on URI scheme (80 for http and 443 for https).
// Port name is set to URI scheme value.
func ParseJwksURI(jwksURI string) (JwksInfo, error) {
u, err := url.Parse(jwksURI)
if err != nil {
return JwksInfo{}, err
}
info := JwksInfo{}
switch u.Scheme {
case "http":
info.UseSSL = false
info.Port = 80
case "https":
info.UseSSL = true
info.Port = 443
default:
return JwksInfo{}, fmt.Errorf("URI scheme %q is not supported", u.Scheme)
}
if u.Port() != "" {
info.Port, err = strconv.Atoi(u.Port())
if err != nil {
return JwksInfo{}, err
}
}
info.Hostname = host.Name(u.Hostname())
info.Scheme = u.Scheme
return info, nil
}
func CheckEmptyValues(key string, values []string) error {
for _, value := range values {
if value == "" {
return fmt.Errorf("empty value not allowed, found in %s", key)
}
}
return nil
}
func ValidateAttribute(key string, values []string) error {
if err := CheckEmptyValues(key, values); err != nil {
return err
}
switch {
case hasPrefix(key, attrRequestHeader):
return validateMapKey(key)
case isEqual(key, attrSrcIP):
return ValidateIPs(values)
case isEqual(key, attrRemoteIP):
return ValidateIPs(values)
case isEqual(key, attrSrcNamespace):
case isEqual(key, attrSrcPrincipal):
case isEqual(key, attrRequestPrincipal):
case isEqual(key, attrRequestAudiences):
case isEqual(key, attrRequestPresenter):
case hasPrefix(key, attrRequestClaims):
return validateMapKey(key)
case isEqual(key, attrDestIP):
return ValidateIPs(values)
case isEqual(key, attrDestPort):
return ValidatePorts(values)
case isEqual(key, attrConnSNI):
case hasPrefix(key, attrExperimental):
return validateMapKey(key)
case isEqual(key, attrDestNamespace):
return fmt.Errorf("attribute %s is replaced by the metadata.namespace", key)
case hasPrefix(key, attrDestLabel):
return fmt.Errorf("attribute %s is replaced by the workload selector", key)
case isEqual(key, attrDestName, attrDestUser):
return fmt.Errorf("deprecated attribute %s: only supported in v1alpha1", key)
default:
return fmt.Errorf("unknown attribute: %s", key)
}
return nil
}
func isEqual(key string, values ...string) bool {
for _, v := range values {
if key == v {
return true
}
}
return false
}
func hasPrefix(key string, prefix string) bool {
return strings.HasPrefix(key, prefix)
}
func ValidateIPs(ips []string) error {
var errs *multierror.Error
for _, v := range ips {
if strings.Contains(v, "/") {
if _, err := netip.ParsePrefix(v); err != nil {
errs = multierror.Append(errs, fmt.Errorf("bad CIDR range (%s): %v", v, err))
}
} else {
if _, err := netip.ParseAddr(v); err != nil {
errs = multierror.Append(errs, fmt.Errorf("bad IP address (%s)", v))
}
}
}
return errs.ErrorOrNil()
}
func ValidatePorts(ports []string) error {
var errs *multierror.Error
for _, port := range ports {
p, err := strconv.ParseUint(port, 10, 32)
if err != nil || p > 65535 {
errs = multierror.Append(errs, fmt.Errorf("bad port (%s): %v", port, err))
}
}
return errs.ErrorOrNil()
}
func validateMapKey(key string) error {
open := strings.Index(key, "[")
if strings.HasSuffix(key, "]") && open > 0 && open < len(key)-2 {
return nil
}
return fmt.Errorf("bad key (%s): should have format a[b]", key)
}
// ValidCipherSuites contains a list of all ciphers supported in Gateway.server.tls.cipherSuites
// Extracted from: `bssl ciphers -openssl-name ALL | rg -v PSK`
var ValidCipherSuites = sets.New(
"ECDHE-ECDSA-AES128-GCM-SHA256",
"ECDHE-RSA-AES128-GCM-SHA256",
"ECDHE-ECDSA-AES256-GCM-SHA384",
"ECDHE-RSA-AES256-GCM-SHA384",
"ECDHE-ECDSA-CHACHA20-POLY1305",
"ECDHE-RSA-CHACHA20-POLY1305",
"ECDHE-ECDSA-AES128-SHA",
"ECDHE-RSA-AES128-SHA",
"ECDHE-ECDSA-AES256-SHA",
"ECDHE-RSA-AES256-SHA",
"AES128-GCM-SHA256",
"AES256-GCM-SHA384",
"AES128-SHA",
"AES256-SHA",
"DES-CBC3-SHA",
)
// ValidECDHCurves contains a list of all ecdh curves supported in MeshConfig.TlsDefaults.ecdhCurves
// Source:
// https://github.com/google/boringssl/blob/3743aafdacff2f7b083615a043a37101f740fa53/ssl/ssl_key_share.cc#L302-L309
var ValidECDHCurves = sets.New(
"P-224",
"P-256",
"P-521",
"P-384",
"X25519",
"CECPQ2",
)
func IsValidCipherSuite(cs string) bool {
if cs == "" || cs == "ALL" {
return true
}
if !unicode.IsNumber(rune(cs[0])) && !unicode.IsLetter(rune(cs[0])) {
// Not all of these are correct, but this is needed to support advanced cases like - and + operators
// without needing to parse the full expression
return true
}
return ValidCipherSuites.Contains(cs)
}
func IsValidECDHCurve(cs string) bool {
if cs == "" {
return true
}
return ValidECDHCurves.Contains(cs)
}
// FilterCipherSuites filters out invalid cipher suites which would lead Envoy to NACKing.
func FilterCipherSuites(suites []string) []string {
if len(suites) == 0 {
return nil
}
ret := make([]string, 0, len(suites))
validCiphers := sets.New[string]()
for _, s := range suites {
if IsValidCipherSuite(s) {
if !validCiphers.InsertContains(s) {
ret = append(ret, s)
} else if log.DebugEnabled() {
log.Debugf("ignoring duplicated cipherSuite: %q", s)
}
} else if log.DebugEnabled() {
log.Debugf("ignoring unsupported cipherSuite: %q", s)
}
}
return ret
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validation
import (
"fmt"
"net/url"
"strconv"
"strings"
envoytypev3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
"github.com/hashicorp/go-multierror"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pkg/util/sets"
)
func validateExtensionProviderService(service string) error {
if service == "" {
return fmt.Errorf("service must not be empty")
}
parts := strings.Split(service, "/")
if len(parts) == 1 {
if err := ValidateFQDN(service); err != nil {
if err2 := ValidateIPAddress(service); err2 != nil {
return fmt.Errorf("invalid service fmt %s: %s", service, err2)
}
}
} else {
if err := validateNamespaceSlashWildcardHostname(service, false, false); err != nil {
return err
}
}
return nil
}
func validateExtensionProviderEnvoyExtAuthzStatusOnError(status string) error {
if status == "" {
return nil
}
code, err := strconv.ParseInt(status, 10, 32)
if err != nil {
return fmt.Errorf("invalid statusOnError value %s: %v", status, err)
}
if _, found := envoytypev3.StatusCode_name[int32(code)]; !found {
return fmt.Errorf("unsupported statusOnError value %s, supported values: %v", status, envoytypev3.StatusCode_name)
}
return nil
}
func ValidateExtensionProviderEnvoyExtAuthzHTTP(config *meshconfig.MeshConfig_ExtensionProvider_EnvoyExternalAuthorizationHttpProvider) (errs error) {
if config == nil {
return fmt.Errorf("nil EnvoyExternalAuthorizationHttpProvider")
}
if err := ValidatePort(int(config.Port)); err != nil {
errs = appendErrors(errs, err)
}
if err := validateExtensionProviderService(config.Service); err != nil {
errs = appendErrors(errs, err)
}
if err := validateExtensionProviderEnvoyExtAuthzStatusOnError(config.StatusOnError); err != nil {
errs = appendErrors(errs, err)
}
if config.PathPrefix != "" {
if _, err := url.Parse(config.PathPrefix); err != nil {
errs = appendErrors(errs, fmt.Errorf("invalid pathPrefix %s: %v", config.PathPrefix, err))
}
if !strings.HasPrefix(config.PathPrefix, "/") {
errs = appendErrors(errs, fmt.Errorf("pathPrefix should begin with `/` but found %q", config.PathPrefix))
}
}
return
}
func ValidateExtensionProviderEnvoyExtAuthzGRPC(config *meshconfig.MeshConfig_ExtensionProvider_EnvoyExternalAuthorizationGrpcProvider) (errs error) {
if config == nil {
return fmt.Errorf("nil EnvoyExternalAuthorizationGrpcProvider")
}
if err := ValidatePort(int(config.Port)); err != nil {
errs = appendErrors(errs, fmt.Errorf("invalid service port: %v", err))
}
if err := validateExtensionProviderService(config.Service); err != nil {
errs = appendErrors(errs, err)
}
if err := validateExtensionProviderEnvoyExtAuthzStatusOnError(config.StatusOnError); err != nil {
errs = appendErrors(errs, err)
}
return
}
func validateExtensionProviderTracingZipkin(config *meshconfig.MeshConfig_ExtensionProvider_ZipkinTracingProvider) (errs error) {
if config == nil {
return fmt.Errorf("nil TracingZipkinProvider")
}
if err := validateExtensionProviderService(config.Service); err != nil {
errs = appendErrors(errs, err)
}
if err := ValidatePort(int(config.Port)); err != nil {
errs = appendErrors(errs, fmt.Errorf("invalid service port: %v", err))
}
return
}
func validateExtensionProviderTracingLightStep(config *meshconfig.MeshConfig_ExtensionProvider_LightstepTracingProvider) (errs error) {
if config == nil {
return fmt.Errorf("nil TracingLightStepProvider")
}
if err := validateExtensionProviderService(config.Service); err != nil {
errs = appendErrors(errs, err)
}
if err := ValidatePort(int(config.Port)); err != nil {
errs = appendErrors(errs, fmt.Errorf("invalid service port: %v", err))
}
if config.AccessToken == "" {
errs = appendErrors(errs, fmt.Errorf("access token is required"))
}
return
}
func validateExtensionProviderTracingDatadog(config *meshconfig.MeshConfig_ExtensionProvider_DatadogTracingProvider) (errs error) {
if config == nil {
return fmt.Errorf("nil TracingDatadogProvider")
}
if err := validateExtensionProviderService(config.Service); err != nil {
errs = appendErrors(errs, err)
}
if err := ValidatePort(int(config.Port)); err != nil {
errs = appendErrors(errs, fmt.Errorf("invalid service port: %v", err))
}
return
}
func validateExtensionProviderTracingOpenCensusAgent(config *meshconfig.MeshConfig_ExtensionProvider_OpenCensusAgentTracingProvider) (errs error) {
if config == nil {
return fmt.Errorf("nil OpenCensusAgent")
}
if err := validateExtensionProviderService(config.Service); err != nil {
errs = appendErrors(errs, err)
}
if err := ValidatePort(int(config.Port)); err != nil {
errs = appendErrors(errs, fmt.Errorf("invalid service port: %v", err))
}
return
}
func validateExtensionProviderTracingSkyWalking(config *meshconfig.MeshConfig_ExtensionProvider_SkyWalkingTracingProvider) (errs error) {
if config == nil {
return fmt.Errorf("nil TracingSkyWalkingProvider")
}
if err := validateExtensionProviderService(config.Service); err != nil {
errs = appendErrors(errs, err)
}
if err := ValidatePort(int(config.Port)); err != nil {
errs = appendErrors(errs, fmt.Errorf("invalid service port: %v", err))
}
return
}
func validateExtensionProviderMetricsPrometheus(_ *meshconfig.MeshConfig_ExtensionProvider_PrometheusMetricsProvider) error {
return nil
}
func validateExtensionProviderStackdriver(_ *meshconfig.MeshConfig_ExtensionProvider_StackdriverProvider) error {
return nil
}
func validateExtensionProviderEnvoyFileAccessLog(_ *meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLogProvider) error {
return nil
}
func ValidateExtensionProviderEnvoyOtelAls(provider *meshconfig.MeshConfig_ExtensionProvider_EnvoyOpenTelemetryLogProvider) (errs error) {
if provider == nil {
return fmt.Errorf("nil EnvoyOpenTelemetryLogProvider")
}
if err := ValidatePort(int(provider.Port)); err != nil {
errs = appendErrors(errs, err)
}
if err := validateExtensionProviderService(provider.Service); err != nil {
errs = appendErrors(errs, err)
}
return
}
func ValidateExtensionProviderTracingOpentelemetry(provider *meshconfig.MeshConfig_ExtensionProvider_OpenTelemetryTracingProvider) (errs error) {
if provider == nil {
return fmt.Errorf("nil OpenTelemetryTracingProvider")
}
if err := ValidatePort(int(provider.Port)); err != nil {
errs = appendErrors(errs, err)
}
if err := validateExtensionProviderService(provider.Service); err != nil {
errs = appendErrors(errs, err)
}
return
}
func ValidateExtensionProviderEnvoyHTTPAls(provider *meshconfig.MeshConfig_ExtensionProvider_EnvoyHttpGrpcV3LogProvider) (errs error) {
if provider == nil {
return fmt.Errorf("nil EnvoyHttpGrpcV3LogProvider")
}
if err := ValidatePort(int(provider.Port)); err != nil {
errs = appendErrors(errs, err)
}
if err := validateExtensionProviderService(provider.Service); err != nil {
errs = appendErrors(errs, err)
}
return
}
func ValidateExtensionProviderEnvoyTCPAls(provider *meshconfig.MeshConfig_ExtensionProvider_EnvoyTcpGrpcV3LogProvider) (errs error) {
if provider == nil {
return fmt.Errorf("nil EnvoyTcpGrpcV3LogProvider")
}
if err := ValidatePort(int(provider.Port)); err != nil {
errs = appendErrors(errs, err)
}
if err := validateExtensionProviderService(provider.Service); err != nil {
errs = appendErrors(errs, err)
}
return
}
func validateExtensionProvider(config *meshconfig.MeshConfig) (errs error) {
definedProviders := sets.String{}
for _, c := range config.ExtensionProviders {
var currentErrs error
// Provider name must be unique and not empty.
if c.Name == "" {
currentErrs = appendErrors(currentErrs, fmt.Errorf("empty extension provider name"))
} else {
if definedProviders.Contains(c.Name) {
currentErrs = appendErrors(currentErrs, fmt.Errorf("duplicate extension provider name %s", c.Name))
}
definedProviders.Insert(c.Name)
}
switch provider := c.Provider.(type) {
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyExtAuthzHttp:
currentErrs = appendErrors(currentErrs, ValidateExtensionProviderEnvoyExtAuthzHTTP(provider.EnvoyExtAuthzHttp))
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyExtAuthzGrpc:
currentErrs = appendErrors(currentErrs, ValidateExtensionProviderEnvoyExtAuthzGRPC(provider.EnvoyExtAuthzGrpc))
case *meshconfig.MeshConfig_ExtensionProvider_Zipkin:
currentErrs = appendErrors(currentErrs, validateExtensionProviderTracingZipkin(provider.Zipkin))
//nolint: staticcheck // Lightstep deprecated
case *meshconfig.MeshConfig_ExtensionProvider_Lightstep:
currentErrs = appendErrors(currentErrs, validateExtensionProviderTracingLightStep(provider.Lightstep))
case *meshconfig.MeshConfig_ExtensionProvider_Datadog:
currentErrs = appendErrors(currentErrs, validateExtensionProviderTracingDatadog(provider.Datadog))
//nolint: staticcheck
case *meshconfig.MeshConfig_ExtensionProvider_Opencensus:
currentErrs = appendErrors(currentErrs, validateExtensionProviderTracingOpenCensusAgent(provider.Opencensus))
case *meshconfig.MeshConfig_ExtensionProvider_Skywalking:
currentErrs = appendErrors(currentErrs, validateExtensionProviderTracingSkyWalking(provider.Skywalking))
case *meshconfig.MeshConfig_ExtensionProvider_Prometheus:
currentErrs = appendErrors(currentErrs, validateExtensionProviderMetricsPrometheus(provider.Prometheus))
case *meshconfig.MeshConfig_ExtensionProvider_Stackdriver:
currentErrs = appendErrors(currentErrs, validateExtensionProviderStackdriver(provider.Stackdriver))
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyFileAccessLog:
currentErrs = appendErrors(currentErrs, validateExtensionProviderEnvoyFileAccessLog(provider.EnvoyFileAccessLog))
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyOtelAls:
currentErrs = appendErrors(currentErrs, ValidateExtensionProviderEnvoyOtelAls(provider.EnvoyOtelAls))
case *meshconfig.MeshConfig_ExtensionProvider_Opentelemetry:
currentErrs = appendErrors(currentErrs, ValidateExtensionProviderTracingOpentelemetry(provider.Opentelemetry))
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyHttpAls:
currentErrs = appendErrors(currentErrs, ValidateExtensionProviderEnvoyHTTPAls(provider.EnvoyHttpAls))
case *meshconfig.MeshConfig_ExtensionProvider_EnvoyTcpAls:
currentErrs = appendErrors(currentErrs, ValidateExtensionProviderEnvoyTCPAls(provider.EnvoyTcpAls))
// TODO: add exhaustiveness test
default:
currentErrs = appendErrors(currentErrs, fmt.Errorf("unsupported provider: %v of type %T", provider, provider))
}
currentErrs = multierror.Prefix(currentErrs, fmt.Sprintf("invalid extension provider %s:", c.Name))
errs = appendErrors(errs, currentErrs)
}
return
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validation
import (
"errors"
"fmt"
)
type ParserState int32
const (
LiteralParserState ParserState = iota // processing literal data
VariableNameParserState // consuming a %VAR% name
ExpectArrayParserState // expect starting [ in %VAR([...])%
ExpectStringParserState // expect starting " in array of strings
StringParserState // consuming an array element string
ExpectArrayDelimiterOrEndParserState // expect array delimiter (,) or end of array (])
ExpectArgsEndParserState // expect closing ) in %VAR(...)%
ExpectVariableEndParserState // expect closing % in %VAR(...)%
)
// validateHeaderValue is golang port version of
// https://github.com/envoyproxy/envoy/blob/master/source/common/router/header_parser.cc#L73
func validateHeaderValue(headerValue string) error {
if headerValue == "" {
return nil
}
var (
pos = 0
state = LiteralParserState
)
for pos < len(headerValue) {
ch := headerValue[pos]
hasNextCh := (pos + 1) < len(headerValue)
switch state {
case LiteralParserState:
// Searching for start of %VARIABLE% expression.
if ch != '%' {
break
}
if !hasNextCh {
return errors.New("invalid header configuration. Un-escaped %")
}
if headerValue[pos+1] == '%' {
// Escaped %, skip next character.
pos++
break
}
// Un-escaped %: start of variable name. Create a formatter for preceding characters, if any.
state = VariableNameParserState
case VariableNameParserState:
// Consume "VAR" from "%VAR%" or "%VAR(...)%"
if ch == '%' {
state = LiteralParserState
break
}
if ch == '(' {
// Variable with arguments, search for start of arg array.
state = ExpectArrayParserState
}
case ExpectArrayParserState:
// Skip over whitespace searching for the start of JSON array args.
if ch == '[' {
// Search for first argument string
state = ExpectStringParserState
} else if !isSpace(ch) {
// Consume it as a string argument.
state = StringParserState
}
case ExpectArrayDelimiterOrEndParserState:
// Skip over whitespace searching for a comma or close bracket.
if ch == ',' {
state = ExpectStringParserState
} else if ch == ']' {
state = ExpectArgsEndParserState
} else if !isSpace(ch) {
return errors.New("invalid header configuration. Expecting ',', ']', or whitespace")
}
case ExpectStringParserState:
// Skip over whitespace looking for the starting quote of a JSON string.
if ch == '"' {
state = StringParserState
} else if !isSpace(ch) {
return errors.New("invalid header configuration. Expecting '\"'")
}
case StringParserState:
// Consume a JSON string (ignoring backslash-escaped chars).
if ch == '\\' {
if !hasNextCh {
return errors.New("invalid header configuration. Un-terminated backslash in JSON string")
}
// Skip escaped char.
pos++
} else if ch == ')' {
state = ExpectVariableEndParserState
} else if ch == '"' {
state = ExpectArrayDelimiterOrEndParserState
}
case ExpectArgsEndParserState:
// Search for the closing paren of a %VAR(...)% expression.
if ch == ')' {
state = ExpectVariableEndParserState
} else if !isSpace(ch) {
return errors.New("invalid header configuration. Expecting ')' or whitespace after '{}', but found '{}'")
}
case ExpectVariableEndParserState:
// Search for closing % of a %VAR(...)% expression
if ch == '%' {
state = LiteralParserState
break
}
if !isSpace(ch) {
return errors.New("invalid header configuration. Expecting '%' or whitespace after")
}
}
pos++
}
if state != LiteralParserState {
// Parsing terminated mid-variable.
return fmt.Errorf("invalid header configuration. Un-terminated variable expression '%s'", headerValue)
}
return nil
}
func isSpace(chr byte) bool {
return chr == ' '
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validation
import (
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
"net/netip"
"net/url"
"path"
"regexp"
"strconv"
"strings"
"time"
udpaa "github.com/cncf/xds/go/udpa/annotations"
"github.com/hashicorp/go-multierror"
"github.com/lestrrat-go/jwx/jwk"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/known/anypb"
"google.golang.org/protobuf/types/known/durationpb"
"istio.io/api/annotation"
extensions "istio.io/api/extensions/v1alpha1"
meshconfig "istio.io/api/mesh/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
networkingv1beta1 "istio.io/api/networking/v1beta1"
security_beta "istio.io/api/security/v1beta1"
telemetry "istio.io/api/telemetry/v1alpha1"
type_beta "istio.io/api/type/v1beta1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/serviceregistry/util/label"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/gateway"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/config/security"
"istio.io/istio/pkg/config/visibility"
"istio.io/istio/pkg/config/xds"
"istio.io/istio/pkg/jwt"
"istio.io/istio/pkg/kube/apimirror"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/grpc"
netutil "istio.io/istio/pkg/util/net"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/pkg/wellknown"
)
// Constants for duration fields
const (
// nolint: revive
connectTimeoutMax = time.Second * 30
// nolint: revive
connectTimeoutMin = time.Millisecond
drainTimeMax = time.Hour
// UnixAddressPrefix is the prefix used to indicate an address is for a Unix Domain socket. It is used in
// ServiceEntry.Endpoint.Address message.
UnixAddressPrefix = "unix://"
matchExact = "exact:"
matchPrefix = "prefix:"
)
const (
regionIndex int = iota
zoneIndex
subZoneIndex
kb = 1024
mb = 1024 * kb
)
var (
// envoy supported retry on header values
supportedRetryOnPolicies = map[string]bool{
// 'x-envoy-retry-on' supported policies:
// https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter.html#x-envoy-retry-on
"5xx": true,
"gateway-error": true,
"reset": true,
"connect-failure": true,
"retriable-4xx": true,
"refused-stream": true,
"retriable-status-codes": true,
"retriable-headers": true,
"envoy-ratelimited": true,
// 'x-envoy-retry-grpc-on' supported policies:
// https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#x-envoy-retry-grpc-on
"cancelled": true,
"deadline-exceeded": true,
"internal": true,
"resource-exhausted": true,
"unavailable": true,
}
// golang supported methods: https://golang.org/src/net/http/method.go
supportedMethods = map[string]bool{
http.MethodGet: true,
http.MethodHead: true,
http.MethodPost: true,
http.MethodPut: true,
http.MethodPatch: true,
http.MethodDelete: true,
http.MethodConnect: true,
http.MethodOptions: true,
http.MethodTrace: true,
}
scope = log.RegisterScope("validation", "CRD validation debugging")
// EmptyValidate is a Validate that does nothing and returns no error.
EmptyValidate = registerValidateFunc("EmptyValidate",
func(config.Config) (Warning, error) {
return nil, nil
})
validateFuncs = make(map[string]ValidateFunc)
)
type Warning error
// Validation holds errors and warnings. They can be joined with additional errors by called appendValidation
type Validation struct {
Err error
Warning Warning
}
type AnalysisAwareError struct {
Type string
Msg string
Parameters []any
}
// OverlappingMatchValidationForHTTPRoute holds necessary information from virtualservice
// to do such overlapping match validation
type OverlappingMatchValidationForHTTPRoute struct {
RouteStr string
MatchStr string
Prefix string
MatchPort uint32
MatchMethod string
MatchAuthority string
MatchHeaders map[string]string
MatchQueryParams map[string]string
MatchNonHeaders map[string]string
}
var _ error = Validation{}
// WrapError turns an error into a Validation
func WrapError(e error) Validation {
return Validation{Err: e}
}
// WrapWarning turns an error into a Validation as a warning
func WrapWarning(e error) Validation {
return Validation{Warning: e}
}
// Warningf formats according to a format specifier and returns the string as a
// value that satisfies error. Like Errorf, but for warnings.
func Warningf(format string, a ...any) Validation {
return WrapWarning(fmt.Errorf(format, a...))
}
func (v Validation) Unwrap() (Warning, error) {
return v.Warning, v.Err
}
func (v Validation) Error() string {
if v.Err == nil {
return ""
}
return v.Err.Error()
}
// ValidateFunc defines a validation func for an API proto.
type ValidateFunc func(config config.Config) (Warning, error)
// IsValidateFunc indicates whether there is a validation function with the given name.
func IsValidateFunc(name string) bool {
return GetValidateFunc(name) != nil
}
// GetValidateFunc returns the validation function with the given name, or null if it does not exist.
func GetValidateFunc(name string) ValidateFunc {
return validateFuncs[name]
}
func registerValidateFunc(name string, f ValidateFunc) ValidateFunc {
// Wrap the original validate function with an extra validate function for object metadata
validate := validateMetadata(f)
validateFuncs[name] = validate
return validate
}
func validateMetadata(f ValidateFunc) ValidateFunc {
return func(config config.Config) (Warning, error) {
// Check the annotation "istio.io/dry-run".
_, isAuthz := config.Spec.(*security_beta.AuthorizationPolicy)
// Only the AuthorizationPolicy supports the annotation "istio.io/dry-run".
if err := checkDryRunAnnotation(config, isAuthz); err != nil {
return nil, err
}
if _, f := config.Annotations[constants.AlwaysReject]; f {
return nil, fmt.Errorf("%q annotation found, rejecting", constants.AlwaysReject)
}
return f(config)
}
}
func checkDryRunAnnotation(cfg config.Config, allowed bool) error {
if val, found := cfg.Annotations[annotation.IoIstioDryRun.Name]; found {
if !allowed {
return fmt.Errorf("%s/%s has unsupported annotation %s, please remove the annotation", cfg.Namespace, cfg.Name, annotation.IoIstioDryRun.Name)
}
if spec, ok := cfg.Spec.(*security_beta.AuthorizationPolicy); ok {
switch spec.Action {
case security_beta.AuthorizationPolicy_ALLOW, security_beta.AuthorizationPolicy_DENY:
if _, err := strconv.ParseBool(val); err != nil {
return fmt.Errorf("%s/%s has annotation %s with invalid value (%s): %v", cfg.Namespace, cfg.Name, annotation.IoIstioDryRun.Name, val, err)
}
default:
return fmt.Errorf("the annotation %s currently only supports action ALLOW/DENY, found action %v in %s/%s",
annotation.IoIstioDryRun.Name, spec.Action, cfg.Namespace, cfg.Name)
}
}
}
return nil
}
// ValidatePort checks that the network port is in range
func ValidatePort(port int) error {
if 1 <= port && port <= 65535 {
return nil
}
return fmt.Errorf("port number %d must be in the range 1..65535", port)
}
// ValidateFQDN checks a fully-qualified domain name
func ValidateFQDN(fqdn string) error {
if err := checkDNS1123Preconditions(fqdn); err != nil {
return err
}
return validateDNS1123Labels(fqdn)
}
// ValidateWildcardDomain checks that a domain is a valid FQDN, but also allows wildcard prefixes.
func ValidateWildcardDomain(domain string) error {
if err := checkDNS1123Preconditions(domain); err != nil {
return err
}
// We only allow wildcards in the first label; split off the first label (parts[0]) from the rest of the host (parts[1])
parts := strings.SplitN(domain, ".", 2)
if !labels.IsWildcardDNS1123Label(parts[0]) {
return fmt.Errorf("domain name %q invalid (label %q invalid)", domain, parts[0])
} else if len(parts) > 1 {
return validateDNS1123Labels(parts[1])
}
return nil
}
// encapsulates DNS 1123 checks common to both wildcarded hosts and FQDNs
func checkDNS1123Preconditions(name string) error {
if len(name) > 255 {
return fmt.Errorf("domain name %q too long (max 255)", name)
}
if len(name) == 0 {
return fmt.Errorf("empty domain name not allowed")
}
return nil
}
func validateDNS1123Labels(domain string) error {
parts := strings.Split(domain, ".")
topLevelDomain := parts[len(parts)-1]
if _, err := strconv.Atoi(topLevelDomain); err == nil {
return fmt.Errorf("domain name %q invalid (top level domain %q cannot be all-numeric)", domain, topLevelDomain)
}
for i, label := range parts {
// Allow the last part to be empty, for unambiguous names like `istio.io.`
if i == len(parts)-1 && label == "" {
return nil
}
if !labels.IsDNS1123Label(label) {
return fmt.Errorf("domain name %q invalid (label %q invalid)", domain, label)
}
}
return nil
}
// validate the trust domain format
func ValidateTrustDomain(domain string) error {
if len(domain) == 0 {
return fmt.Errorf("empty domain name not allowed")
}
parts := strings.Split(domain, ".")
for i, label := range parts {
// Allow the last part to be empty, for unambiguous names like `istio.io.`
if i == len(parts)-1 && label == "" {
return nil
}
if !labels.IsDNS1123Label(label) {
return fmt.Errorf("trust domain name %q invalid", domain)
}
}
return nil
}
// ValidateHTTPHeaderName validates a header name
func ValidateHTTPHeaderName(name string) error {
if name == "" {
return fmt.Errorf("header name cannot be empty")
}
return nil
}
// ValidateHTTPHeaderWithAuthorityOperationName validates a header name when used to add/set in request.
func ValidateHTTPHeaderWithAuthorityOperationName(name string) error {
if name == "" {
return fmt.Errorf("header name cannot be empty")
}
// Authority header is validated later
if isInternalHeader(name) && !isAuthorityHeader(name) {
return fmt.Errorf(`invalid header %q: header cannot have ":" prefix`, name)
}
return nil
}
// ValidateHTTPHeaderOperationName validates a header name when used to remove from request or modify response.
func ValidateHTTPHeaderOperationName(name string) error {
if name == "" {
return fmt.Errorf("header name cannot be empty")
}
if strings.EqualFold(name, "host") {
return fmt.Errorf(`invalid header %q: cannot set Host header`, name)
}
if isInternalHeader(name) {
return fmt.Errorf(`invalid header %q: header cannot have ":" prefix`, name)
}
return nil
}
// Copy from https://github.com/bufbuild/protoc-gen-validate/blob/a65858624dd654f2fb306d6af60f737132986f44/module/checker.go#L18
var httpHeaderValueRegexp = regexp.MustCompile("^[^\u0000-\u0008\u000A-\u001F\u007F]*$")
// ValidateHTTPHeaderValue validates a header value for Envoy
// Valid: "foo", "%HOSTNAME%", "100%%", "prefix %HOSTNAME% suffix"
// Invalid: "abc%123", "%START_TIME%%"
// We don't try to check that what is inside the %% is one of Envoy recognized values, we just prevent invalid config.
// See: https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers.html#custom-request-response-headers
func ValidateHTTPHeaderValue(value string) error {
if !httpHeaderValueRegexp.MatchString(value) {
return fmt.Errorf("header value configuration %s is invalid", value)
}
if err := validateHeaderValue(value); err != nil {
return fmt.Errorf("header value configuration: %w", err)
}
// TODO: find a better way to validate fields supported in custom header, e.g %ENVIRONMENT(X):Z%
return nil
}
// validateWeight checks if weight is valid
func validateWeight(weight int32) error {
if weight < 0 {
return fmt.Errorf("weight %d < 0", weight)
}
return nil
}
// ValidatePercent checks that percent is in range
func ValidatePercent(val int32) error {
if val < 0 || val > 100 {
return fmt.Errorf("percentage %v is not in range 0..100", val)
}
return nil
}
// validatePercentage checks if the specified fractional percentage is valid.
func validatePercentage(percentage *networking.Percent) error {
if percentage != nil {
if percentage.Value < 0.0 || percentage.Value > 100.0 || (percentage.Value > 0.0 && percentage.Value < 0.0001) {
return fmt.Errorf("percentage %v is neither 0.0, nor in range [0.0001, 100.0]", percentage.Value)
}
}
return nil
}
// ValidateIPSubnet checks that a string is in "CIDR notation" or "Dot-decimal notation"
func ValidateIPSubnet(subnet string) error {
// We expect a string in "CIDR notation" or "Dot-decimal notation"
// E.g., a.b.c.d/xx form or just a.b.c.d or 2001:1::1/64
if strings.Count(subnet, "/") == 1 {
// We expect a string in "CIDR notation", i.e. a.b.c.d/xx or 2001:1::1/64 form
if _, err := netip.ParsePrefix(subnet); err != nil {
return fmt.Errorf("%v is not a valid CIDR block", subnet)
}
return nil
}
return ValidateIPAddress(subnet)
}
// ValidateIPAddress validates that a string in "CIDR notation" or "Dot-decimal notation"
func ValidateIPAddress(addr string) error {
if _, err := netip.ParseAddr(addr); err != nil {
return fmt.Errorf("%v is not a valid IP", addr)
}
return nil
}
// ValidateUnixAddress validates that the string is a valid unix domain socket path.
func ValidateUnixAddress(addr string) error {
if len(addr) == 0 {
return errors.New("unix address must not be empty")
}
// Allow unix abstract domain sockets whose names start with @
if strings.HasPrefix(addr, "@") {
return nil
}
// Note that we use path, not path/filepath even though a domain socket path is a file path. We don't want the
// Pilot output to depend on which OS Pilot is run on, so we always use Unix-style forward slashes.
if !path.IsAbs(addr) || strings.HasSuffix(addr, "/") {
return fmt.Errorf("%s is not an absolute path to a file", addr)
}
return nil
}
// ValidateGateway checks gateway specifications
var ValidateGateway = registerValidateFunc("ValidateGateway",
func(cfg config.Config) (Warning, error) {
name := cfg.Name
// Check if this was converted from a k8s gateway-api resource
gatewaySemantics := cfg.Annotations[constants.InternalGatewaySemantics] == constants.GatewaySemanticsGateway
v := Validation{}
// Gateway name must conform to the DNS label format (no dots)
if !labels.IsDNS1123Label(name) {
v = appendValidation(v, fmt.Errorf("invalid gateway name: %q", name))
}
value, ok := cfg.Spec.(*networking.Gateway)
if !ok {
v = appendValidation(v, fmt.Errorf("cannot cast to gateway: %#v", cfg.Spec))
return v.Unwrap()
}
if len(value.Servers) == 0 {
v = appendValidation(v, fmt.Errorf("gateway must have at least one server"))
} else {
for _, server := range value.Servers {
v = appendValidation(v, validateServer(server, gatewaySemantics))
}
}
// Ensure unique port names
portNames := make(map[string]bool)
for _, s := range value.Servers {
if s == nil {
v = appendValidation(v, fmt.Errorf("server may not be nil"))
continue
}
if s.Port != nil {
if portNames[s.Port.Name] {
v = appendValidation(v, fmt.Errorf("port names in servers must be unique: duplicate name %s", s.Port.Name))
}
portNames[s.Port.Name] = true
if !protocol.Parse(s.Port.Protocol).IsHTTP() && s.GetTls().GetHttpsRedirect() {
v = appendValidation(v, WrapWarning(fmt.Errorf("tls.httpsRedirect should only be used with http servers")))
}
}
}
return v.Unwrap()
})
func validateServer(server *networking.Server, gatewaySemantics bool) (v Validation) {
if server == nil {
return WrapError(fmt.Errorf("cannot have nil server"))
}
if len(server.Hosts) == 0 {
v = appendValidation(v, fmt.Errorf("server config must contain at least one host"))
} else {
for _, hostname := range server.Hosts {
v = appendValidation(v, validateNamespaceSlashWildcardHostname(hostname, true, gatewaySemantics))
}
}
portErr := validateServerPort(server.Port, server.Bind)
v = appendValidation(v, portErr)
v = appendValidation(v, validateServerBind(server.Port, server.Bind))
v = appendValidation(v, validateTLSOptions(server.Tls))
// If port is HTTPS or TLS, make sure that server has TLS options
if _, err := portErr.Unwrap(); err == nil {
p := protocol.Parse(server.Port.Protocol)
if p.IsTLS() && server.Tls == nil {
v = appendValidation(v, fmt.Errorf("server must have TLS settings for HTTPS/TLS protocols"))
} else if !p.IsTLS() && server.Tls != nil {
// only tls redirect is allowed if this is a HTTP server
if p.IsHTTP() {
if !gateway.IsPassThroughServer(server) ||
server.Tls.CaCertificates != "" || server.Tls.PrivateKey != "" || server.Tls.ServerCertificate != "" {
v = appendValidation(v, fmt.Errorf("server cannot have TLS settings for plain text HTTP ports"))
}
} else {
v = appendValidation(v, fmt.Errorf("server cannot have TLS settings for non HTTPS/TLS ports"))
}
}
}
return v
}
func validateServerPort(port *networking.Port, bind string) (errs Validation) {
if port == nil {
return appendValidation(errs, fmt.Errorf("port is required"))
}
if protocol.Parse(port.Protocol) == protocol.Unsupported {
errs = appendValidation(errs, fmt.Errorf("invalid protocol %q, supported protocols are HTTP, HTTP2, GRPC, GRPC-WEB, MONGO, REDIS, MYSQL, TCP", port.Protocol))
}
if port.Number > 0 || !strings.HasPrefix(bind, UnixAddressPrefix) {
errs = appendValidation(errs, ValidatePort(int(port.Number)))
}
// nolint: staticcheck
if port.TargetPort > 0 {
errs = appendValidation(errs, fmt.Errorf("targetPort has no impact on Gateways"))
}
if port.Name == "" {
errs = appendValidation(errs, fmt.Errorf("port name must be set: %v", port))
}
return
}
func validateServerBind(port *networking.Port, bind string) (errs error) {
if strings.HasPrefix(bind, UnixAddressPrefix) {
errs = appendErrors(errs, ValidateUnixAddress(strings.TrimPrefix(bind, UnixAddressPrefix)))
if port != nil && port.Number != 0 {
errs = appendErrors(errs, fmt.Errorf("port number must be 0 for unix domain socket: %v", port))
}
} else if len(bind) != 0 {
errs = appendErrors(errs, ValidateIPAddress(bind))
}
return
}
func validateTLSOptions(tls *networking.ServerTLSSettings) (v Validation) {
if tls == nil {
// no tls config at all is valid
return
}
if tls.MinProtocolVersion == networking.ServerTLSSettings_TLSV1_0 || tls.MinProtocolVersion == networking.ServerTLSSettings_TLSV1_1 {
if len(tls.CipherSuites) == 0 {
v = appendWarningf(v, "TLS version below TLSV1_2 require setting compatible ciphers as by default they no longer include compatible ciphers.")
}
}
invalidCiphers := sets.New[string]()
validCiphers := sets.New[string]()
duplicateCiphers := sets.New[string]()
for _, cs := range tls.CipherSuites {
if !security.IsValidCipherSuite(cs) {
invalidCiphers.Insert(cs)
} else if validCiphers.InsertContains(cs) {
duplicateCiphers.Insert(cs)
}
}
if len(invalidCiphers) > 0 {
v = appendWarningf(v, "ignoring invalid cipher suites: %v", sets.SortedList(invalidCiphers))
}
if len(duplicateCiphers) > 0 {
v = appendWarningf(v, "ignoring duplicate cipher suites: %v", sets.SortedList(duplicateCiphers))
}
if tls.Mode == networking.ServerTLSSettings_ISTIO_MUTUAL {
// ISTIO_MUTUAL TLS mode uses either SDS or default certificate mount paths
// therefore, we should fail validation if other TLS fields are set
if tls.ServerCertificate != "" {
v = appendValidation(v, fmt.Errorf("ISTIO_MUTUAL TLS cannot have associated server certificate"))
}
if tls.PrivateKey != "" {
v = appendValidation(v, fmt.Errorf("ISTIO_MUTUAL TLS cannot have associated private key"))
}
if tls.CaCertificates != "" {
v = appendValidation(v, fmt.Errorf("ISTIO_MUTUAL TLS cannot have associated CA bundle"))
}
if tls.CredentialName != "" {
v = appendValidation(v, fmt.Errorf("ISTIO_MUTUAL TLS cannot have associated credentialName"))
}
return
}
if tls.Mode == networking.ServerTLSSettings_PASSTHROUGH || tls.Mode == networking.ServerTLSSettings_AUTO_PASSTHROUGH {
if tls.ServerCertificate != "" || tls.PrivateKey != "" || tls.CaCertificates != "" || tls.CredentialName != "" {
// Warn for backwards compatibility
v = appendWarningf(v, "%v mode does not use certificates, they will be ignored", tls.Mode)
}
}
if (tls.Mode == networking.ServerTLSSettings_SIMPLE || tls.Mode == networking.ServerTLSSettings_MUTUAL ||
tls.Mode == networking.ServerTLSSettings_OPTIONAL_MUTUAL) && tls.CredentialName != "" {
// If tls mode is SIMPLE or MUTUAL/OPTIONL_MUTUAL, and CredentialName is specified, credentials are fetched
// remotely. ServerCertificate and CaCertificates fields are not required.
return
}
if tls.Mode == networking.ServerTLSSettings_SIMPLE {
if tls.ServerCertificate == "" {
v = appendValidation(v, fmt.Errorf("SIMPLE TLS requires a server certificate"))
}
if tls.PrivateKey == "" {
v = appendValidation(v, fmt.Errorf("SIMPLE TLS requires a private key"))
}
} else if tls.Mode == networking.ServerTLSSettings_MUTUAL || tls.Mode == networking.ServerTLSSettings_OPTIONAL_MUTUAL {
if tls.ServerCertificate == "" {
v = appendValidation(v, fmt.Errorf("MUTUAL TLS requires a server certificate"))
}
if tls.PrivateKey == "" {
v = appendValidation(v, fmt.Errorf("MUTUAL TLS requires a private key"))
}
if tls.CaCertificates == "" {
v = appendValidation(v, fmt.Errorf("MUTUAL TLS requires a client CA bundle"))
}
}
return
}
// ValidateDestinationRule checks proxy policies
var ValidateDestinationRule = registerValidateFunc("ValidateDestinationRule",
func(cfg config.Config) (Warning, error) {
rule, ok := cfg.Spec.(*networking.DestinationRule)
if !ok {
return nil, fmt.Errorf("cannot cast to destination rule")
}
v := Validation{}
v = appendValidation(v,
ValidateWildcardDomain(rule.Host),
validateTrafficPolicy(rule.TrafficPolicy))
for _, subset := range rule.Subsets {
if subset == nil {
v = appendValidation(v, errors.New("subset may not be null"))
continue
}
v = appendValidation(v, validateSubset(subset))
}
v = appendValidation(v,
validateExportTo(cfg.Namespace, rule.ExportTo, false, rule.GetWorkloadSelector() != nil))
v = appendValidation(v, validateWorkloadSelector(rule.GetWorkloadSelector()))
return v.Unwrap()
})
func validateExportTo(namespace string, exportTo []string, isServiceEntry bool, isDestinationRuleWithSelector bool) (errs error) {
if len(exportTo) > 0 {
// Make sure there are no duplicates
exportToSet := sets.New[string]()
for _, e := range exportTo {
key := e
if visibility.Instance(e) == visibility.Private {
// substitute this with the current namespace so that we
// can check for duplicates like ., namespace
key = namespace
}
if exportToSet.Contains(key) {
if key != e {
errs = appendErrors(errs, fmt.Errorf("duplicate entries in exportTo: . and current namespace %s", namespace))
} else {
errs = appendErrors(errs, fmt.Errorf("duplicate entries in exportTo for entry %s", e))
}
} else {
// if this is a serviceEntry, allow ~ in exportTo as it can be used to create
// a service that is not even visible within the local namespace to anyone other
// than the proxies of that service.
if isServiceEntry && visibility.Instance(e) == visibility.None {
exportToSet.Insert(key)
} else {
if err := visibility.Instance(key).Validate(); err != nil {
errs = appendErrors(errs, err)
} else {
exportToSet.Insert(key)
}
}
}
}
// Make sure workloadSelector based destination rule does not use exportTo other than current namespace
if isDestinationRuleWithSelector && !exportToSet.IsEmpty() {
if exportToSet.Contains(namespace) {
if exportToSet.Len() > 1 {
errs = appendErrors(errs, fmt.Errorf("destination rule with workload selector cannot have multiple entries in exportTo"))
}
} else {
errs = appendErrors(errs, fmt.Errorf("destination rule with workload selector cannot have exportTo beyond current namespace"))
}
}
// Make sure we have only one of . or *
if exportToSet.Contains(string(visibility.Public)) {
// make sure that there are no other entries in the exportTo
// i.e. no point in saying ns1,ns2,*. Might as well say *
if len(exportTo) > 1 {
errs = appendErrors(errs, fmt.Errorf("cannot have both public (*) and non-public exportTo values for a resource"))
}
}
// if this is a service entry, then we need to disallow * and ~ together. Or ~ and other namespaces
if exportToSet.Contains(string(visibility.None)) {
if len(exportTo) > 1 {
errs = appendErrors(errs, fmt.Errorf("cannot export service entry to no one (~) and someone"))
}
}
}
return
}
func validateAlphaWorkloadSelector(selector *networking.WorkloadSelector) (Warning, error) {
var errs error
var warning Warning
if selector != nil {
for k, v := range selector.Labels {
if k == "" {
errs = appendErrors(errs,
fmt.Errorf("empty key is not supported in selector: %q", fmt.Sprintf("%s=%s", k, v)))
}
if strings.Contains(k, "*") || strings.Contains(v, "*") {
errs = appendErrors(errs,
fmt.Errorf("wildcard is not supported in selector: %q", fmt.Sprintf("%s=%s", k, v)))
}
}
if len(selector.Labels) == 0 {
warning = fmt.Errorf("workload selector specified without labels") // nolint: stylecheck
}
}
return warning, errs
}
// ValidateEnvoyFilter checks envoy filter config supplied by user
var ValidateEnvoyFilter = registerValidateFunc("ValidateEnvoyFilter",
func(cfg config.Config) (Warning, error) {
errs := Validation{}
rule, ok := cfg.Spec.(*networking.EnvoyFilter)
if !ok {
return nil, fmt.Errorf("cannot cast to Envoy filter")
}
warning, err := validateAlphaWorkloadSelector(rule.WorkloadSelector)
if err != nil {
return nil, err
}
// If workloadSelector is defined and labels are not set, it is most likely
// an user error. Marking it as a warning to keep it backwards compatible.
if warning != nil {
errs = appendValidation(errs, WrapWarning(fmt.Errorf("Envoy filter: %s, will be applied to all services in namespace", warning))) // nolint: stylecheck
}
for _, cp := range rule.ConfigPatches {
if cp == nil {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: null config patch")) // nolint: stylecheck
continue
}
if cp.ApplyTo == networking.EnvoyFilter_INVALID {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: missing applyTo")) // nolint: stylecheck
continue
}
if cp.Patch == nil {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: missing patch")) // nolint: stylecheck
continue
}
if cp.Patch.Operation == networking.EnvoyFilter_Patch_INVALID {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: missing patch operation")) // nolint: stylecheck
continue
}
if cp.Patch.Operation != networking.EnvoyFilter_Patch_REMOVE && cp.Patch.Value == nil {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: missing patch value for non-remove operation")) // nolint: stylecheck
continue
}
// ensure that the supplied regex for proxy version compiles
if cp.Match != nil && cp.Match.Proxy != nil && cp.Match.Proxy.ProxyVersion != "" {
if _, err := regexp.Compile(cp.Match.Proxy.ProxyVersion); err != nil {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: invalid regex for proxy version, [%v]", err)) // nolint: stylecheck
continue
}
}
// ensure that applyTo, match and patch all line up
switch cp.ApplyTo {
case networking.EnvoyFilter_LISTENER,
networking.EnvoyFilter_FILTER_CHAIN,
networking.EnvoyFilter_NETWORK_FILTER,
networking.EnvoyFilter_HTTP_FILTER:
if cp.Match != nil && cp.Match.ObjectTypes != nil {
if cp.Match.GetListener() == nil {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: applyTo for listener class objects cannot have non listener match")) // nolint: stylecheck
continue
}
listenerMatch := cp.Match.GetListener()
if listenerMatch.FilterChain != nil {
if listenerMatch.FilterChain.Filter != nil {
if cp.ApplyTo == networking.EnvoyFilter_LISTENER || cp.ApplyTo == networking.EnvoyFilter_FILTER_CHAIN {
// This would be an error but is a warning for backwards compatibility
errs = appendValidation(errs, WrapWarning(
fmt.Errorf("Envoy filter: filter match has no effect when used with %v", cp.ApplyTo))) // nolint: stylecheck
}
// filter names are required if network filter matches are being made
if listenerMatch.FilterChain.Filter.Name == "" {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: filter match has no name to match on")) // nolint: stylecheck
continue
} else if listenerMatch.FilterChain.Filter.SubFilter != nil {
// sub filter match is supported only for applyTo HTTP_FILTER
if cp.ApplyTo != networking.EnvoyFilter_HTTP_FILTER {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: subfilter match can be used with applyTo HTTP_FILTER only")) // nolint: stylecheck
continue
}
// sub filter match requires the network filter to match to envoy http connection manager
if listenerMatch.FilterChain.Filter.Name != wellknown.HTTPConnectionManager &&
listenerMatch.FilterChain.Filter.Name != "envoy.http_connection_manager" {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: subfilter match requires filter match with %s", // nolint: stylecheck
wellknown.HTTPConnectionManager))
continue
}
if listenerMatch.FilterChain.Filter.SubFilter.Name == "" {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: subfilter match has no name to match on")) // nolint: stylecheck
continue
}
}
errs = appendValidation(errs, validateListenerMatchName(listenerMatch.FilterChain.Filter.GetName()))
errs = appendValidation(errs, validateListenerMatchName(listenerMatch.FilterChain.Filter.GetSubFilter().GetName()))
}
}
}
case networking.EnvoyFilter_ROUTE_CONFIGURATION, networking.EnvoyFilter_VIRTUAL_HOST, networking.EnvoyFilter_HTTP_ROUTE:
if cp.Match != nil && cp.Match.ObjectTypes != nil {
if cp.Match.GetRouteConfiguration() == nil {
errs = appendValidation(errs,
fmt.Errorf("Envoy filter: applyTo for http route class objects cannot have non route configuration match")) // nolint: stylecheck
}
}
case networking.EnvoyFilter_CLUSTER:
if cp.Match != nil && cp.Match.ObjectTypes != nil {
if cp.Match.GetCluster() == nil {
errs = appendValidation(errs, fmt.Errorf("Envoy filter: applyTo for cluster class objects cannot have non cluster match")) // nolint: stylecheck
}
}
}
// ensure that the struct is valid
if _, err := xds.BuildXDSObjectFromStruct(cp.ApplyTo, cp.Patch.Value, false); err != nil {
if strings.Contains(err.Error(), "could not resolve Any message type") {
if strings.Contains(err.Error(), ".v2.") {
err = fmt.Errorf("referenced type unknown (hint: try using the v3 XDS API): %v", err)
} else {
err = fmt.Errorf("referenced type unknown: %v", err)
}
}
errs = appendValidation(errs, err)
} else {
// Run with strict validation, and emit warnings. This helps capture cases like unknown fields
// We do not want to reject in case the proto is valid but our libraries are outdated
obj, err := xds.BuildXDSObjectFromStruct(cp.ApplyTo, cp.Patch.Value, true)
if err != nil {
errs = appendValidation(errs, WrapWarning(err))
}
// Append any deprecation notices
if obj != nil {
// Note: since we no longer import v2 protos, v2 references will fail during BuildXDSObjectFromStruct.
errs = appendValidation(errs, validateDeprecatedFilterTypes(obj))
errs = appendValidation(errs, validateMissingTypedConfigFilterTypes(obj))
}
}
}
return errs.Unwrap()
})
func validateListenerMatchName(name string) error {
if newName, f := xds.ReverseDeprecatedFilterNames[name]; f {
return WrapWarning(fmt.Errorf("using deprecated filter name %q; use %q instead", name, newName))
}
return nil
}
func recurseDeprecatedTypes(message protoreflect.Message) ([]string, error) {
var topError error
var deprecatedTypes []string
if message == nil {
return nil, nil
}
message.Range(func(descriptor protoreflect.FieldDescriptor, value protoreflect.Value) bool {
m, isMessage := value.Interface().(protoreflect.Message)
if isMessage {
anyMessage, isAny := m.Interface().(*anypb.Any)
if isAny {
mt, err := protoregistry.GlobalTypes.FindMessageByURL(anyMessage.TypeUrl)
if err != nil {
topError = err
return false
}
var fileOpts proto.Message = mt.Descriptor().ParentFile().Options().(*descriptorpb.FileOptions)
if proto.HasExtension(fileOpts, udpaa.E_FileStatus) {
ext := proto.GetExtension(fileOpts, udpaa.E_FileStatus)
udpaext, ok := ext.(*udpaa.StatusAnnotation)
if !ok {
topError = fmt.Errorf("extension was of wrong type: %T", ext)
return false
}
if udpaext.PackageVersionStatus == udpaa.PackageVersionStatus_FROZEN {
deprecatedTypes = append(deprecatedTypes, anyMessage.TypeUrl)
}
}
}
newTypes, err := recurseDeprecatedTypes(m)
if err != nil {
topError = err
return false
}
deprecatedTypes = append(deprecatedTypes, newTypes...)
}
return true
})
return deprecatedTypes, topError
}
// recurseMissingTypedConfig checks that configured filters do not rely on `name` and elide `typed_config`.
// This is temporarily enabled in Envoy by the envoy.reloadable_features.no_extension_lookup_by_name flag, but in the future will be removed.
func recurseMissingTypedConfig(message protoreflect.Message) []string {
var deprecatedTypes []string
if message == nil {
return nil
}
// First, iterate over the fields to find the 'name' field to help with reporting errors.
var name string
for i := 0; i < message.Type().Descriptor().Fields().Len(); i++ {
field := message.Type().Descriptor().Fields().Get(i)
if field.JSONName() == "name" {
name = fmt.Sprintf("%v", message.Get(field).Interface())
}
}
hasTypedConfig := false
requiresTypedConfig := false
// Now go through fields again
for i := 0; i < message.Type().Descriptor().Fields().Len(); i++ {
field := message.Type().Descriptor().Fields().Get(i)
set := message.Has(field)
// If it has a typedConfig field, it must be set.
requiresTypedConfig = requiresTypedConfig || field.JSONName() == "typedConfig"
// Note: it is possible there is some API that has typedConfig but has a non-deprecated alternative,
// but I couldn't find any. Worst case, this is a warning, not an error, so a false positive is not so bad.
// The one exception is configDiscovery (used for ECDS)
if field.JSONName() == "typedConfig" && set {
hasTypedConfig = true
}
if field.JSONName() == "configDiscovery" && set {
hasTypedConfig = true
}
if set {
// If the field was set and is a message, recurse into it to check children
m, isMessage := message.Get(field).Interface().(protoreflect.Message)
if isMessage {
deprecatedTypes = append(deprecatedTypes, recurseMissingTypedConfig(m)...)
}
}
}
if requiresTypedConfig && !hasTypedConfig {
deprecatedTypes = append(deprecatedTypes, name)
}
return deprecatedTypes
}
func validateDeprecatedFilterTypes(obj proto.Message) error {
deprecated, err := recurseDeprecatedTypes(obj.ProtoReflect())
if err != nil {
return fmt.Errorf("failed to find deprecated types: %v", err)
}
if len(deprecated) > 0 {
return WrapWarning(fmt.Errorf("using deprecated type_url(s); %v", strings.Join(deprecated, ", ")))
}
return nil
}
func validateMissingTypedConfigFilterTypes(obj proto.Message) error {
missing := recurseMissingTypedConfig(obj.ProtoReflect())
if len(missing) > 0 {
return WrapWarning(fmt.Errorf("using deprecated types by name without typed_config; %v", strings.Join(missing, ", ")))
}
return nil
}
// validates that hostname in ns/<hostname> is a valid hostname according to
// API specs
func validateSidecarOrGatewayHostnamePart(hostname string, isGateway bool) (errs error) {
// short name hosts are not allowed
if hostname != "*" && !strings.Contains(hostname, ".") {
errs = appendErrors(errs, fmt.Errorf("short names (non FQDN) are not allowed"))
}
if err := ValidateWildcardDomain(hostname); err != nil {
if !isGateway {
errs = appendErrors(errs, err)
}
// Gateway allows IP as the host string, as well
if !netutil.IsValidIPAddress(hostname) {
errs = appendErrors(errs, err)
}
}
// partial wildcard is not allowed
// More details please refer to:
// Gateway: https://istio.io/latest/docs/reference/config/networking/gateway/
// SideCar: https://istio.io/latest/docs/reference/config/networking/sidecar/#IstioEgressListener
errs = appendErrors(errs, validatePartialWildCard(hostname))
return
}
func validateNamespaceSlashWildcardHostname(hostname string, isGateway bool, gatewaySemantics bool) (errs error) {
parts := strings.SplitN(hostname, "/", 2)
if len(parts) != 2 {
if isGateway {
// Old style host in the gateway
return validateSidecarOrGatewayHostnamePart(hostname, true)
}
errs = appendErrors(errs, fmt.Errorf("host must be of form namespace/dnsName"))
return
}
if len(parts[0]) == 0 || len(parts[1]) == 0 {
errs = appendErrors(errs, fmt.Errorf("config namespace and dnsName in host entry cannot be empty"))
}
if !isGateway {
// namespace can be * or . or ~ or a valid DNS label in sidecars
if parts[0] != "*" && parts[0] != "." && parts[0] != "~" {
if !labels.IsDNS1123Label(parts[0]) {
errs = appendErrors(errs, fmt.Errorf("invalid namespace value %q in sidecar", parts[0]))
}
}
} else {
// namespace can be * or . or a valid DNS label in gateways
// namespace can be ~ in gateways converted from Gateway API when no routes match
if parts[0] != "*" && parts[0] != "." && (parts[0] != "~" || !gatewaySemantics) {
if !labels.IsDNS1123Label(parts[0]) {
errs = appendErrors(errs, fmt.Errorf("invalid namespace value %q in gateway", parts[0]))
}
}
}
errs = appendErrors(errs, validateSidecarOrGatewayHostnamePart(parts[1], isGateway))
return
}
// ValidateSidecar checks sidecar config supplied by user
var ValidateSidecar = registerValidateFunc("ValidateSidecar",
func(cfg config.Config) (Warning, error) {
errs := Validation{}
rule, ok := cfg.Spec.(*networking.Sidecar)
if !ok {
return nil, fmt.Errorf("cannot cast to Sidecar")
}
warning, err := validateAlphaWorkloadSelector(rule.WorkloadSelector)
if err != nil {
return nil, err
}
// If workloadSelector is defined and labels are not set, it is most likely
// an user error. Marking it as a warning to keep it backwards compatible.
if warning != nil {
errs = appendValidation(errs, WrapWarning(fmt.Errorf("sidecar: %s, will be applied to all services in namespace",
warning))) // nolint: stylecheck
}
if len(rule.Egress) == 0 && len(rule.Ingress) == 0 && rule.OutboundTrafficPolicy == nil && rule.InboundConnectionPool == nil {
return nil, fmt.Errorf("sidecar: empty configuration provided")
}
portMap := sets.Set[uint32]{}
for _, i := range rule.Ingress {
if i == nil {
errs = appendValidation(errs, fmt.Errorf("sidecar: ingress may not be null"))
continue
}
if i.Port == nil {
errs = appendValidation(errs, fmt.Errorf("sidecar: port is required for ingress listeners"))
continue
}
// nolint: staticcheck
if i.Port.TargetPort > 0 {
errs = appendValidation(errs, fmt.Errorf("targetPort has no impact on Sidecars"))
}
bind := i.GetBind()
errs = appendValidation(errs, validateSidecarIngressPortAndBind(i.Port, bind))
if portMap.Contains(i.Port.Number) {
errs = appendValidation(errs, fmt.Errorf("sidecar: ports on IP bound listeners must be unique"))
}
portMap.Insert(i.Port.Number)
if len(i.DefaultEndpoint) != 0 {
if strings.HasPrefix(i.DefaultEndpoint, UnixAddressPrefix) {
errs = appendValidation(errs, ValidateUnixAddress(strings.TrimPrefix(i.DefaultEndpoint, UnixAddressPrefix)))
} else {
// format should be 127.0.0.1:port, [::1]:port or :port
sHost, sPort, sErr := net.SplitHostPort(i.DefaultEndpoint)
if sErr != nil {
errs = appendValidation(errs, sErr)
}
if sHost != "" && sHost != "127.0.0.1" && sHost != "0.0.0.0" && sHost != "::1" && sHost != "::" {
errMsg := "sidecar: defaultEndpoint must be of form 127.0.0.1:<port>,0.0.0.0:<port>,[::1]:port,[::]:port,unix://filepath or unset"
errs = appendValidation(errs, fmt.Errorf(errMsg))
}
port, err := strconv.Atoi(sPort)
if err != nil {
errs = appendValidation(errs, fmt.Errorf("sidecar: defaultEndpoint port (%s) is not a number: %v", sPort, err))
} else {
errs = appendValidation(errs, ValidatePort(port))
}
}
}
if i.Tls != nil {
if len(i.Tls.SubjectAltNames) > 0 {
errs = appendValidation(errs, fmt.Errorf("sidecar: subjectAltNames is not supported in ingress tls"))
}
if i.Tls.HttpsRedirect {
errs = appendValidation(errs, fmt.Errorf("sidecar: httpsRedirect is not supported"))
}
if i.Tls.CredentialName != "" {
errs = appendValidation(errs, fmt.Errorf("sidecar: credentialName is not currently supported"))
}
if i.Tls.Mode == networking.ServerTLSSettings_ISTIO_MUTUAL || i.Tls.Mode == networking.ServerTLSSettings_AUTO_PASSTHROUGH {
errs = appendValidation(errs, fmt.Errorf("configuration is invalid: cannot set mode to %s in sidecar ingress tls", i.Tls.Mode.String()))
}
protocol := protocol.Parse(i.Port.Protocol)
if !protocol.IsTLS() {
errs = appendValidation(errs, fmt.Errorf("server cannot have TLS settings for non HTTPS/TLS ports"))
}
errs = appendValidation(errs, validateTLSOptions(i.Tls))
}
// Validate per-port connection pool settings
errs = appendValidation(errs, validateConnectionPool(i.ConnectionPool))
if i.ConnectionPool != nil && i.ConnectionPool.Http != nil && i.Port != nil && !protocol.Parse(i.Port.Protocol).IsHTTP() {
errs = appendWarningf(errs,
"sidecar: HTTP connection pool settings are configured for port %d (%q) but its protocol is not HTTP (%s); only TCP settings will apply",
i.Port.Number, i.Port.Name, i.Port.Protocol)
}
}
// Validate top-level connection pool setting
errs = appendValidation(errs, validateConnectionPool(rule.InboundConnectionPool))
portMap = sets.Set[uint32]{}
udsMap := sets.String{}
catchAllEgressListenerFound := false
for index, egress := range rule.Egress {
if egress == nil {
errs = appendValidation(errs, errors.New("egress listener may not be null"))
continue
}
// there can be only one catch all egress listener with empty port, and it should be the last listener.
if egress.Port == nil {
if !catchAllEgressListenerFound {
if index == len(rule.Egress)-1 {
catchAllEgressListenerFound = true
} else {
errs = appendValidation(errs, fmt.Errorf("sidecar: the egress listener with empty port should be the last listener in the list"))
}
} else {
errs = appendValidation(errs, fmt.Errorf("sidecar: egress can have only one listener with empty port"))
continue
}
} else {
// nolint: staticcheck
if egress.Port.TargetPort > 0 {
errs = appendValidation(errs, fmt.Errorf("targetPort has no impact on Sidecars"))
}
bind := egress.GetBind()
captureMode := egress.GetCaptureMode()
errs = appendValidation(errs, validateSidecarEgressPortBindAndCaptureMode(egress.Port, bind, captureMode))
if egress.Port.Number == 0 {
if _, found := udsMap[bind]; found {
errs = appendValidation(errs, fmt.Errorf("sidecar: unix domain socket values for listeners must be unique"))
}
udsMap[bind] = struct{}{}
} else {
if portMap.Contains(egress.Port.Number) {
errs = appendValidation(errs, fmt.Errorf("sidecar: ports on IP bound listeners must be unique"))
}
portMap.Insert(egress.Port.Number)
}
}
// validate that the hosts field is a slash separated value
// of form ns1/host, or */host, or */*, or ns1/*, or ns1/*.example.com
if len(egress.Hosts) == 0 {
errs = appendValidation(errs, fmt.Errorf("sidecar: egress listener must contain at least one host"))
} else {
nssSvcs := map[string]map[string]bool{}
for _, hostname := range egress.Hosts {
parts := strings.SplitN(hostname, "/", 2)
if len(parts) == 2 {
ns := parts[0]
svc := parts[1]
if ns == "." {
ns = cfg.Namespace
}
if _, ok := nssSvcs[ns]; !ok {
nssSvcs[ns] = map[string]bool{}
}
// test/a
// test/a
// test/*
if svc != "*" {
if _, ok := nssSvcs[ns][svc]; ok || nssSvcs[ns]["*"] {
// already exists
// TODO: prevent this invalid setting, maybe in 1.12+
errs = appendValidation(errs, WrapWarning(fmt.Errorf("duplicated egress host: %s", hostname)))
}
} else {
if len(nssSvcs[ns]) != 0 {
errs = appendValidation(errs, WrapWarning(fmt.Errorf("duplicated egress host: %s", hostname)))
}
}
nssSvcs[ns][svc] = true
}
errs = appendValidation(errs, validateNamespaceSlashWildcardHostname(hostname, false, false))
}
// */*
// test/a
if nssSvcs["*"]["*"] && len(nssSvcs) != 1 {
errs = appendValidation(errs, WrapWarning(fmt.Errorf("`*/*` host select all resources, no other hosts can be added")))
}
}
}
errs = appendValidation(errs, validateSidecarOutboundTrafficPolicy(rule.OutboundTrafficPolicy))
return errs.Unwrap()
})
func validateSidecarOutboundTrafficPolicy(tp *networking.OutboundTrafficPolicy) (errs error) {
if tp == nil {
return
}
mode := tp.GetMode()
if tp.EgressProxy != nil {
if mode != networking.OutboundTrafficPolicy_ALLOW_ANY {
errs = appendErrors(errs, fmt.Errorf("sidecar: egress_proxy must be set only with ALLOW_ANY outbound_traffic_policy mode"))
return
}
errs = appendErrors(errs, ValidateFQDN(tp.EgressProxy.GetHost()))
if tp.EgressProxy.Port == nil {
errs = appendErrors(errs, fmt.Errorf("sidecar: egress_proxy port must be non-nil"))
return
}
errs = appendErrors(errs, validateDestination(tp.EgressProxy))
}
return
}
func validateSidecarEgressPortBindAndCaptureMode(port *networking.SidecarPort, bind string,
captureMode networking.CaptureMode,
) (errs error) {
// Port name is optional. Validate if exists.
if len(port.Name) > 0 {
errs = appendErrors(errs, ValidatePortName(port.Name))
}
// Handle Unix domain sockets
if port.Number == 0 {
// require bind to be a unix domain socket
errs = appendErrors(errs,
ValidateProtocol(port.Protocol))
if !strings.HasPrefix(bind, UnixAddressPrefix) {
errs = appendErrors(errs, fmt.Errorf("sidecar: ports with 0 value must have a unix domain socket bind address"))
} else {
errs = appendErrors(errs, ValidateUnixAddress(strings.TrimPrefix(bind, UnixAddressPrefix)))
}
if captureMode != networking.CaptureMode_DEFAULT && captureMode != networking.CaptureMode_NONE {
errs = appendErrors(errs, fmt.Errorf("sidecar: captureMode must be DEFAULT/NONE for unix domain socket listeners"))
}
} else {
errs = appendErrors(errs,
ValidateProtocol(port.Protocol),
ValidatePort(int(port.Number)))
if len(bind) != 0 {
errs = appendErrors(errs, ValidateIPAddress(bind))
}
}
return
}
func validateSidecarIngressPortAndBind(port *networking.SidecarPort, bind string) (errs error) {
// Port name is optional. Validate if exists.
if len(port.Name) > 0 {
errs = appendErrors(errs, ValidatePortName(port.Name))
}
errs = appendErrors(errs,
ValidateProtocol(port.Protocol),
ValidatePort(int(port.Number)))
if len(bind) != 0 {
errs = appendErrors(errs, ValidateIPAddress(bind))
}
return
}
func validateTrafficPolicy(policy *networking.TrafficPolicy) Validation {
if policy == nil {
return Validation{}
}
if policy.OutlierDetection == nil && policy.ConnectionPool == nil &&
policy.LoadBalancer == nil && policy.Tls == nil && policy.PortLevelSettings == nil && policy.Tunnel == nil && policy.ProxyProtocol == nil {
return WrapError(fmt.Errorf("traffic policy must have at least one field"))
}
if policy.Tunnel != nil && policy.ProxyProtocol != nil {
return WrapError(fmt.Errorf("tunnel and proxyProtocol must not be set together"))
}
return appendValidation(validateOutlierDetection(policy.OutlierDetection),
validateConnectionPool(policy.ConnectionPool),
validateLoadBalancer(policy.LoadBalancer, policy.OutlierDetection),
validateTLS(policy.Tls),
validatePortTrafficPolicies(policy.PortLevelSettings),
validateTunnelSettings(policy.Tunnel),
validateProxyProtocol(policy.ProxyProtocol))
}
func validateProxyProtocol(proxyProtocol *networking.TrafficPolicy_ProxyProtocol) (errs error) {
if proxyProtocol == nil {
return
}
if proxyProtocol.Version != 0 && proxyProtocol.Version != 1 {
errs = appendErrors(errs, fmt.Errorf("proxy protocol version is invalid: %d", proxyProtocol.Version))
}
return
}
func validateTunnelSettings(tunnel *networking.TrafficPolicy_TunnelSettings) (errs error) {
if tunnel == nil {
return
}
if tunnel.Protocol != "" && tunnel.Protocol != "CONNECT" && tunnel.Protocol != "POST" {
errs = appendErrors(errs, fmt.Errorf("tunnel protocol must be \"CONNECT\" or \"POST\""))
}
fqdnErr := ValidateFQDN(tunnel.TargetHost)
ipErr := ValidateIPAddress(tunnel.TargetHost)
if fqdnErr != nil && ipErr != nil {
errs = appendErrors(errs, fmt.Errorf("tunnel target host must be valid FQDN or IP address: %s; %s", fqdnErr, ipErr))
}
if err := ValidatePort(int(tunnel.TargetPort)); err != nil {
errs = appendErrors(errs, fmt.Errorf("tunnel target port is invalid: %s", err))
}
return
}
func validateOutlierDetection(outlier *networking.OutlierDetection) (errs Validation) {
if outlier == nil {
return
}
if outlier.BaseEjectionTime != nil {
errs = appendValidation(errs, ValidateDuration(outlier.BaseEjectionTime))
}
// nolint: staticcheck
if outlier.ConsecutiveErrors != 0 {
warn := "outlier detection consecutive errors is deprecated, use consecutiveGatewayErrors or consecutive5xxErrors instead"
scope.Warnf(warn)
errs = appendValidation(errs, WrapWarning(errors.New(warn)))
}
if !outlier.SplitExternalLocalOriginErrors && outlier.ConsecutiveLocalOriginFailures.GetValue() > 0 {
err := "outlier detection consecutive local origin failures is specified, but split external local origin errors is set to false"
errs = appendValidation(errs, errors.New(err))
}
if outlier.Interval != nil {
errs = appendValidation(errs, ValidateDuration(outlier.Interval))
}
errs = appendValidation(errs, ValidatePercent(outlier.MaxEjectionPercent), ValidatePercent(outlier.MinHealthPercent))
return
}
func validateConnectionPool(settings *networking.ConnectionPoolSettings) (errs error) {
if settings == nil {
return
}
if settings.Http == nil && settings.Tcp == nil {
return fmt.Errorf("connection pool must have at least one field")
}
if httpSettings := settings.Http; httpSettings != nil {
if httpSettings.Http1MaxPendingRequests < 0 {
errs = appendErrors(errs, fmt.Errorf("http1 max pending requests must be non-negative"))
}
if httpSettings.Http2MaxRequests < 0 {
errs = appendErrors(errs, fmt.Errorf("http2 max requests must be non-negative"))
}
if httpSettings.MaxRequestsPerConnection < 0 {
errs = appendErrors(errs, fmt.Errorf("max requests per connection must be non-negative"))
}
if httpSettings.MaxRetries < 0 {
errs = appendErrors(errs, fmt.Errorf("max retries must be non-negative"))
}
if httpSettings.IdleTimeout != nil {
errs = appendErrors(errs, ValidateDuration(httpSettings.IdleTimeout))
}
if httpSettings.H2UpgradePolicy == networking.ConnectionPoolSettings_HTTPSettings_UPGRADE && httpSettings.UseClientProtocol {
errs = appendErrors(errs, fmt.Errorf("use client protocol must not be true when H2UpgradePolicy is UPGRADE"))
}
if httpSettings.MaxConcurrentStreams < 0 {
errs = appendErrors(errs, fmt.Errorf("max concurrent streams must be non-negative"))
}
}
if tcp := settings.Tcp; tcp != nil {
if tcp.MaxConnections < 0 {
errs = appendErrors(errs, fmt.Errorf("max connections must be non-negative"))
}
if tcp.ConnectTimeout != nil {
errs = appendErrors(errs, ValidateDuration(tcp.ConnectTimeout))
}
if tcp.MaxConnectionDuration != nil {
errs = appendErrors(errs, ValidateDuration(tcp.MaxConnectionDuration))
}
}
return
}
func validateLoadBalancer(settings *networking.LoadBalancerSettings, outlier *networking.OutlierDetection) (errs Validation) {
if settings == nil {
return
}
// simple load balancing is always valid
consistentHash := settings.GetConsistentHash()
if consistentHash != nil {
httpCookie := consistentHash.GetHttpCookie()
if httpCookie != nil && httpCookie.GetName() == "" {
errs = appendValidation(errs, fmt.Errorf("name required for HttpCookie"))
}
if consistentHash.MinimumRingSize != 0 { // nolint: staticcheck
warn := "consistent hash MinimumRingSize is deprecated, use ConsistentHashLB's RingHash configuration instead"
scope.Warnf(warn)
errs = appendValidation(errs, WrapWarning(errors.New(warn)))
}
// nolint: staticcheck
if consistentHash.MinimumRingSize != 0 && consistentHash.GetHashAlgorithm() != nil {
errs = appendValidation(errs, fmt.Errorf("only one of MinimumRingSize or Maglev/Ringhash can be specified"))
}
}
errs = appendValidation(errs, validateLocalityLbSetting(settings.LocalityLbSetting, outlier))
return
}
func validateTLS(settings *networking.ClientTLSSettings) (errs error) {
if settings == nil {
return
}
if settings.GetInsecureSkipVerify().GetValue() {
if settings.Mode == networking.ClientTLSSettings_SIMPLE {
// In tls simple mode, we can specify ca cert by CaCertificates or CredentialName.
if settings.CaCertificates != "" || settings.CredentialName != "" || settings.SubjectAltNames != nil {
errs = appendErrors(errs, fmt.Errorf("cannot specify CaCertificates or CredentialName or SubjectAltNames when InsecureSkipVerify is set true"))
}
}
if settings.Mode == networking.ClientTLSSettings_MUTUAL {
// In tls mutual mode, we can specify both client cert and ca cert by CredentialName.
// However, here we can not distinguish whether user specify ca cert by CredentialName or not.
if settings.CaCertificates != "" || settings.SubjectAltNames != nil {
errs = appendErrors(errs, fmt.Errorf("cannot specify CaCertificates or SubjectAltNames when InsecureSkipVerify is set true"))
}
}
}
if (settings.Mode == networking.ClientTLSSettings_SIMPLE || settings.Mode == networking.ClientTLSSettings_MUTUAL) &&
settings.CredentialName != "" {
if settings.ClientCertificate != "" || settings.CaCertificates != "" || settings.PrivateKey != "" {
errs = appendErrors(errs,
fmt.Errorf("cannot specify client certificates or CA certificate If credentialName is set"))
}
// If tls mode is SIMPLE or MUTUAL, and CredentialName is specified, credentials are fetched
// remotely. ServerCertificate and CaCertificates fields are not required.
return
}
if settings.Mode == networking.ClientTLSSettings_MUTUAL {
if settings.ClientCertificate == "" {
errs = appendErrors(errs, fmt.Errorf("client certificate required for mutual tls"))
}
if settings.PrivateKey == "" {
errs = appendErrors(errs, fmt.Errorf("private key required for mutual tls"))
}
}
return
}
func validateSubset(subset *networking.Subset) error {
return appendErrors(validateSubsetName(subset.Name),
labels.Instance(subset.Labels).Validate(),
validateTrafficPolicy(subset.TrafficPolicy))
}
func validatePortTrafficPolicies(pls []*networking.TrafficPolicy_PortTrafficPolicy) (errs error) {
for _, t := range pls {
if t == nil {
errs = appendErrors(errs, fmt.Errorf("traffic policy may not be null"))
continue
}
if t.Port == nil {
errs = appendErrors(errs, fmt.Errorf("portTrafficPolicy must have valid port"))
}
if t.OutlierDetection == nil && t.ConnectionPool == nil &&
t.LoadBalancer == nil && t.Tls == nil {
errs = appendErrors(errs, fmt.Errorf("port traffic policy must have at least one field"))
} else {
errs = appendErrors(errs, validateOutlierDetection(t.OutlierDetection),
validateConnectionPool(t.ConnectionPool),
validateLoadBalancer(t.LoadBalancer, t.OutlierDetection),
validateTLS(t.Tls))
}
}
return
}
// ValidateProxyAddress checks that a network address is well-formed
func ValidateProxyAddress(hostAddr string) error {
hostname, p, err := net.SplitHostPort(hostAddr)
if err != nil {
return fmt.Errorf("unable to split %q: %v", hostAddr, err)
}
port, err := strconv.Atoi(p)
if err != nil {
return fmt.Errorf("port (%s) is not a number: %v", p, err)
}
if err = ValidatePort(port); err != nil {
return err
}
if err = ValidateFQDN(hostname); err != nil {
if !netutil.IsValidIPAddress(hostname) {
return fmt.Errorf("%q is not a valid hostname or an IP address", hostname)
}
}
return nil
}
// ValidateDuration checks that a proto duration is well-formed
func ValidateDuration(pd *durationpb.Duration) error {
dur := pd.AsDuration()
if dur < time.Millisecond {
return errors.New("duration must be greater than 1ms")
}
if dur%time.Millisecond != 0 {
return errors.New("only durations to ms precision are supported")
}
return nil
}
// ValidateDurationRange verifies range is in specified duration
func ValidateDurationRange(dur, min, max time.Duration) error {
if dur > max || dur < min {
return fmt.Errorf("time %v must be >%v and <%v", dur.String(), min.String(), max.String())
}
return nil
}
// ValidateDrainDuration checks that parent and drain durations are valid
func ValidateDrainDuration(drainTime *durationpb.Duration) (errs error) {
if err := ValidateDuration(drainTime); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid drain duration:"))
}
if errs != nil {
return
}
drainDuration := drainTime.AsDuration()
if drainDuration%time.Second != 0 {
errs = multierror.Append(errs,
errors.New("drain time only supports durations to seconds precision"))
}
if drainDuration > drainTimeMax {
errs = multierror.Append(errs,
fmt.Errorf("drain time %v must be <%v", drainDuration.String(), drainTimeMax.String()))
}
return
}
// ValidateLightstepCollector validates the configuration for sending envoy spans to LightStep
func ValidateLightstepCollector(ls *meshconfig.Tracing_Lightstep) error {
var errs error
if ls.GetAddress() == "" {
errs = multierror.Append(errs, errors.New("address is required"))
}
if err := ValidateProxyAddress(ls.GetAddress()); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid lightstep address:"))
}
if ls.GetAccessToken() == "" {
errs = multierror.Append(errs, errors.New("access token is required"))
}
return errs
}
// validateCustomTags validates that tracing CustomTags map does not contain any nil items
func validateCustomTags(tags map[string]*meshconfig.Tracing_CustomTag) error {
for tagName, tagVal := range tags {
if tagVal == nil {
return fmt.Errorf("encountered nil value for custom tag: %s", tagName)
}
}
return nil
}
// ValidateZipkinCollector validates the configuration for sending envoy spans to Zipkin
func ValidateZipkinCollector(z *meshconfig.Tracing_Zipkin) error {
return ValidateProxyAddress(strings.Replace(z.GetAddress(), "$(HOST_IP)", "127.0.0.1", 1))
}
// ValidateDatadogCollector validates the configuration for sending envoy spans to Datadog
func ValidateDatadogCollector(d *meshconfig.Tracing_Datadog) error {
// If the address contains $(HOST_IP), replace it with a valid IP before validation.
return ValidateProxyAddress(strings.Replace(d.GetAddress(), "$(HOST_IP)", "127.0.0.1", 1))
}
// ValidateConnectTimeout validates the envoy connection timeout
func ValidateConnectTimeout(timeout *durationpb.Duration) error {
if err := ValidateDuration(timeout); err != nil {
return err
}
err := ValidateDurationRange(timeout.AsDuration(), connectTimeoutMin, connectTimeoutMax)
return err
}
// ValidateProtocolDetectionTimeout validates the envoy protocol detection timeout
func ValidateProtocolDetectionTimeout(timeout *durationpb.Duration) error {
dur := timeout.AsDuration()
// 0s is a valid value if trying to disable protocol detection timeout
if dur == time.Second*0 {
return nil
}
if dur%time.Millisecond != 0 {
return errors.New("only durations to ms precision are supported")
}
return nil
}
// ValidateMaxServerConnectionAge validate negative duration
func ValidateMaxServerConnectionAge(in time.Duration) error {
if err := IsNegativeDuration(in); err != nil {
return fmt.Errorf("%v: --keepaliveMaxServerConnectionAge only accepts positive duration eg: 30m", err)
}
return nil
}
// IsNegativeDuration check if the duration is negative
func IsNegativeDuration(in time.Duration) error {
if in < 0 {
return fmt.Errorf("invalid duration: %s", in.String())
}
return nil
}
// ValidateMeshConfig checks that the mesh config is well-formed
func ValidateMeshConfig(mesh *meshconfig.MeshConfig) (Warning, error) {
v := Validation{}
if err := ValidatePort(int(mesh.ProxyListenPort)); err != nil {
v = appendValidation(v, multierror.Prefix(err, "invalid proxy listen port:"))
}
if err := ValidateConnectTimeout(mesh.ConnectTimeout); err != nil {
v = appendValidation(v, multierror.Prefix(err, "invalid connect timeout:"))
}
if err := ValidateProtocolDetectionTimeout(mesh.ProtocolDetectionTimeout); err != nil {
v = appendValidation(v, multierror.Prefix(err, "invalid protocol detection timeout:"))
}
if mesh.DefaultConfig == nil {
v = appendValidation(v, errors.New("missing default config"))
} else {
v = appendValidation(v, ValidateMeshConfigProxyConfig(mesh.DefaultConfig))
}
v = appendValidation(v, validateLocalityLbSetting(mesh.LocalityLbSetting, &networking.OutlierDetection{}))
v = appendValidation(v, validateServiceSettings(mesh))
v = appendValidation(v, validateTrustDomainConfig(mesh))
if err := validateExtensionProvider(mesh); err != nil {
scope.Warnf("found invalid extension provider (can be ignored if the given extension provider is not used): %v", err)
}
v = appendValidation(v, ValidateMeshTLSConfig(mesh))
v = appendValidation(v, ValidateMeshTLSDefaults(mesh))
return v.Unwrap()
}
func validateTrustDomainConfig(config *meshconfig.MeshConfig) (errs error) {
if err := ValidateTrustDomain(config.TrustDomain); err != nil {
errs = multierror.Append(errs, fmt.Errorf("trustDomain: %v", err))
}
for i, tda := range config.TrustDomainAliases {
if err := ValidateTrustDomain(tda); err != nil {
errs = multierror.Append(errs, fmt.Errorf("trustDomainAliases[%d], domain `%s` : %v", i, tda, err))
}
}
return
}
func ValidateMeshTLSConfig(mesh *meshconfig.MeshConfig) (errs error) {
if meshMTLS := mesh.MeshMTLS; meshMTLS != nil {
if meshMTLS.EcdhCurves != nil {
errs = multierror.Append(errs, errors.New("mesh TLS does not support ECDH curves configuration"))
}
}
return errs
}
func ValidateMeshTLSDefaults(mesh *meshconfig.MeshConfig) (v Validation) {
unrecognizedECDHCurves := sets.New[string]()
validECDHCurves := sets.New[string]()
duplicateECDHCurves := sets.New[string]()
if tlsDefaults := mesh.TlsDefaults; tlsDefaults != nil {
for _, cs := range tlsDefaults.EcdhCurves {
if !security.IsValidECDHCurve(cs) {
unrecognizedECDHCurves.Insert(cs)
} else if validECDHCurves.InsertContains(cs) {
duplicateECDHCurves.Insert(cs)
}
}
}
if len(unrecognizedECDHCurves) > 0 {
v = appendWarningf(v, "detected unrecognized ECDH curves: %v", sets.SortedList(unrecognizedECDHCurves))
}
if len(duplicateECDHCurves) > 0 {
v = appendWarningf(v, "detected duplicate ECDH curves: %v", sets.SortedList(duplicateECDHCurves))
}
return
}
func validateServiceSettings(config *meshconfig.MeshConfig) (errs error) {
for sIndex, s := range config.ServiceSettings {
for _, h := range s.Hosts {
if err := ValidateWildcardDomain(h); err != nil {
errs = multierror.Append(errs, fmt.Errorf("serviceSettings[%d], host `%s`: %v", sIndex, h, err))
}
}
}
return
}
func validatePrivateKeyProvider(pkpConf *meshconfig.PrivateKeyProvider) error {
var errs error
if pkpConf.GetProvider() == nil {
errs = multierror.Append(errs, errors.New("private key provider configuration is required"))
}
switch pkpConf.GetProvider().(type) {
case *meshconfig.PrivateKeyProvider_Cryptomb:
cryptomb := pkpConf.GetCryptomb()
if cryptomb == nil {
errs = multierror.Append(errs, errors.New("cryptomb configuration is required"))
} else {
pollDelay := cryptomb.GetPollDelay()
if pollDelay == nil {
errs = multierror.Append(errs, errors.New("pollDelay is required"))
} else if pollDelay.GetSeconds() == 0 && pollDelay.GetNanos() == 0 {
errs = multierror.Append(errs, errors.New("pollDelay must be non zero"))
}
}
case *meshconfig.PrivateKeyProvider_Qat:
qatConf := pkpConf.GetQat()
if qatConf == nil {
errs = multierror.Append(errs, errors.New("qat configuration is required"))
} else {
pollDelay := qatConf.GetPollDelay()
if pollDelay == nil {
errs = multierror.Append(errs, errors.New("pollDelay is required"))
} else if pollDelay.GetSeconds() == 0 && pollDelay.GetNanos() == 0 {
errs = multierror.Append(errs, errors.New("pollDelay must be non zero"))
}
}
default:
errs = multierror.Append(errs, errors.New("unknown private key provider"))
}
return errs
}
// ValidateMeshConfigProxyConfig checks that the mesh config is well-formed
func ValidateMeshConfigProxyConfig(config *meshconfig.ProxyConfig) (errs error) {
if config.ConfigPath == "" {
errs = multierror.Append(errs, errors.New("config path must be set"))
}
if config.BinaryPath == "" {
errs = multierror.Append(errs, errors.New("binary path must be set"))
}
clusterName := config.GetClusterName()
switch naming := clusterName.(type) {
case *meshconfig.ProxyConfig_ServiceCluster:
if naming.ServiceCluster == "" {
errs = multierror.Append(errs, errors.New("service cluster must be specified"))
}
case *meshconfig.ProxyConfig_TracingServiceName_: // intentionally left empty for now
default:
errs = multierror.Append(errs, errors.New("oneof service cluster or tracing service name must be specified"))
}
if err := ValidateDrainDuration(config.DrainDuration); err != nil {
errs = multierror.Append(errs, err)
}
// discovery address is mandatory since mutual TLS relies on CDS.
// strictly speaking, proxies can operate without RDS/CDS and with hot restarts
// but that requires additional test validation
if config.DiscoveryAddress == "" {
errs = multierror.Append(errs, errors.New("discovery address must be set to the proxy discovery service"))
} else if err := ValidateProxyAddress(config.DiscoveryAddress); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid discovery address:"))
}
if tracer := config.GetTracing().GetLightstep(); tracer != nil {
if err := ValidateLightstepCollector(tracer); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid lightstep config:"))
}
}
if tracer := config.GetTracing().GetZipkin(); tracer != nil {
if err := ValidateZipkinCollector(tracer); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid zipkin config:"))
}
}
if tracer := config.GetTracing().GetDatadog(); tracer != nil {
if err := ValidateDatadogCollector(tracer); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid datadog config:"))
}
}
if tracer := config.GetTracing().GetTlsSettings(); tracer != nil {
if err := validateTLS(tracer); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid tracing TLS config:"))
}
}
if tracerCustomTags := config.GetTracing().GetCustomTags(); tracerCustomTags != nil {
if err := validateCustomTags(tracerCustomTags); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid tracing custom tags:"))
}
}
if config.StatsdUdpAddress != "" {
if err := ValidateProxyAddress(config.StatsdUdpAddress); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, fmt.Sprintf("invalid statsd udp address %q:", config.StatsdUdpAddress)))
}
}
// nolint: staticcheck
if config.EnvoyMetricsServiceAddress != "" {
if err := ValidateProxyAddress(config.EnvoyMetricsServiceAddress); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, fmt.Sprintf("invalid envoy metrics service address %q:", config.EnvoyMetricsServiceAddress)))
} else {
scope.Warnf("EnvoyMetricsServiceAddress is deprecated, use EnvoyMetricsService instead.") // nolint: stylecheck
}
}
if config.EnvoyMetricsService != nil && config.EnvoyMetricsService.Address != "" {
if err := ValidateProxyAddress(config.EnvoyMetricsService.Address); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, fmt.Sprintf("invalid envoy metrics service address %q:", config.EnvoyMetricsService.Address)))
}
}
if config.EnvoyAccessLogService != nil && config.EnvoyAccessLogService.Address != "" {
if err := ValidateProxyAddress(config.EnvoyAccessLogService.Address); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, fmt.Sprintf("invalid envoy access log service address %q:", config.EnvoyAccessLogService.Address)))
}
}
if err := ValidatePort(int(config.ProxyAdminPort)); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid proxy admin port:"))
}
if err := ValidateControlPlaneAuthPolicy(config.ControlPlaneAuthPolicy); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid authentication policy:"))
}
if err := ValidatePort(int(config.StatusPort)); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid status port:"))
}
if pkpConf := config.GetPrivateKeyProvider(); pkpConf != nil {
if err := validatePrivateKeyProvider(pkpConf); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, "invalid private key provider configuration:"))
}
}
return
}
func ValidateControlPlaneAuthPolicy(policy meshconfig.AuthenticationPolicy) error {
if policy == meshconfig.AuthenticationPolicy_NONE || policy == meshconfig.AuthenticationPolicy_MUTUAL_TLS {
return nil
}
return fmt.Errorf("unrecognized control plane auth policy %q", policy)
}
func validatePolicyTargetReference(targetRef *type_beta.PolicyTargetReference) (v Validation) {
if targetRef == nil {
return
}
if targetRef.Name == "" {
v = appendErrorf(v, "targetRef name must be set")
}
if targetRef.Namespace != "" {
v = appendErrorf(v, "targetRef namespace must not be set")
}
// Currently, gateway.networking.k8s.io is the only valid Group and gateway.networking.k8s.io/Gateway the only valid Kind.
if targetRef.Group != gvk.KubernetesGateway.Group || targetRef.Kind != gvk.KubernetesGateway.Kind {
v = appendErrorf(v, "targetRef Group and/or Kind don't match; expected: [Group: %s, Kind: %s], got: [Group: %s, Kind: %s]",
gvk.KubernetesGateway.Group, gvk.KubernetesGateway.Kind, targetRef.Group, targetRef.Kind)
}
return
}
func validateWorkloadSelector(selector *type_beta.WorkloadSelector) Validation {
validation := Validation{}
if selector != nil {
for k, v := range selector.MatchLabels {
if k == "" {
err := fmt.Errorf("empty key is not supported in selector: %q", fmt.Sprintf("%s=%s", k, v))
validation = appendValidation(validation, err)
}
if strings.Contains(k, "*") || strings.Contains(v, "*") {
err := fmt.Errorf("wildcard is not supported in selector: %q", fmt.Sprintf("%s=%s", k, v))
validation = appendValidation(validation, err)
}
}
if len(selector.MatchLabels) == 0 {
warning := fmt.Errorf("workload selector specified without labels") // nolint: stylecheck
validation = appendValidation(validation, WrapWarning(warning))
}
}
return validation
}
func validateOneOfSelectorType(selector *type_beta.WorkloadSelector, targetRef *type_beta.PolicyTargetReference) (v Validation) {
if selector != nil && targetRef != nil {
v = appendErrorf(v, "only one of targetRef or workloadSelector can be set")
}
return
}
// ValidateAuthorizationPolicy checks that AuthorizationPolicy is well-formed.
var ValidateAuthorizationPolicy = registerValidateFunc("ValidateAuthorizationPolicy",
func(cfg config.Config) (Warning, error) {
in, ok := cfg.Spec.(*security_beta.AuthorizationPolicy)
if !ok {
return nil, fmt.Errorf("cannot cast to AuthorizationPolicy")
}
var errs error
var warnings Warning
selectorTypeValidation := validateOneOfSelectorType(in.GetSelector(), in.GetTargetRef())
workloadSelectorValidation := validateWorkloadSelector(in.GetSelector())
targetRefValidation := validatePolicyTargetReference(in.GetTargetRef())
errs = appendErrors(errs, selectorTypeValidation, workloadSelectorValidation, targetRefValidation)
warnings = appendErrors(warnings, workloadSelectorValidation.Warning)
if in.Action == security_beta.AuthorizationPolicy_CUSTOM {
if in.Rules == nil {
errs = appendErrors(errs, fmt.Errorf("CUSTOM action without `rules` is meaningless as it will never be triggered, "+
"add an empty rule `{}` if you want it be triggered for every request"))
} else {
if in.GetProvider() == nil || in.GetProvider().GetName() == "" {
errs = appendErrors(errs, fmt.Errorf("`provider.name` must not be empty"))
}
}
// TODO(yangminzhu): Add support for more matching rules.
for _, rule := range in.GetRules() {
check := func(invalid bool, name string) error {
if invalid {
return fmt.Errorf("%s is currently not supported with CUSTOM action", name)
}
return nil
}
for _, from := range rule.GetFrom() {
if src := from.GetSource(); src != nil {
errs = appendErrors(errs, check(len(src.Namespaces) != 0, "From.Namespaces"))
errs = appendErrors(errs, check(len(src.NotNamespaces) != 0, "From.NotNamespaces"))
errs = appendErrors(errs, check(len(src.Principals) != 0, "From.Principals"))
errs = appendErrors(errs, check(len(src.NotPrincipals) != 0, "From.NotPrincipals"))
errs = appendErrors(errs, check(len(src.RequestPrincipals) != 0, "From.RequestPrincipals"))
errs = appendErrors(errs, check(len(src.NotRequestPrincipals) != 0, "From.NotRequestPrincipals"))
}
}
for _, when := range rule.GetWhen() {
if when == nil {
errs = appendErrors(errs, fmt.Errorf("when field cannot be nil"))
continue
}
errs = appendErrors(errs, check(when.Key == "source.namespace", when.Key))
errs = appendErrors(errs, check(when.Key == "source.principal", when.Key))
errs = appendErrors(errs, check(strings.HasPrefix(when.Key, "request.auth."), when.Key))
}
}
}
if in.GetProvider() != nil && in.Action != security_beta.AuthorizationPolicy_CUSTOM {
errs = appendErrors(errs, fmt.Errorf("`provider` must not be with non CUSTOM action, found %s", in.Action))
}
if in.Action == security_beta.AuthorizationPolicy_DENY && in.Rules == nil {
errs = appendErrors(errs, fmt.Errorf("DENY action without `rules` is meaningless as it will never be triggered, "+
"add an empty rule `{}` if you want it be triggered for every request"))
}
for i, rule := range in.GetRules() {
if rule == nil {
errs = appendErrors(errs, fmt.Errorf("`rule` must not be nil, found at rule %d", i))
continue
}
if rule.From != nil && len(rule.From) == 0 {
errs = appendErrors(errs, fmt.Errorf("`from` must not be empty, found at rule %d", i))
}
tcpRulesInFrom := false
tcpRulesInTo := false
fromRuleExist := false
toRuleExist := false
for _, from := range rule.From {
if from == nil {
errs = appendErrors(errs, fmt.Errorf("`from` must not be nil, found at rule %d", i))
continue
}
if from.Source == nil {
errs = appendErrors(errs, fmt.Errorf("`from.source` must not be nil, found at rule %d", i))
} else {
fromRuleExist = true
src := from.Source
if len(src.Principals) == 0 && len(src.RequestPrincipals) == 0 && len(src.Namespaces) == 0 && len(src.IpBlocks) == 0 &&
len(src.RemoteIpBlocks) == 0 && len(src.NotPrincipals) == 0 && len(src.NotRequestPrincipals) == 0 && len(src.NotNamespaces) == 0 &&
len(src.NotIpBlocks) == 0 && len(src.NotRemoteIpBlocks) == 0 {
errs = appendErrors(errs, fmt.Errorf("`from.source` must not be empty, found at rule %d", i))
}
errs = appendErrors(errs, security.ValidateIPs(from.Source.GetIpBlocks()))
errs = appendErrors(errs, security.ValidateIPs(from.Source.GetNotIpBlocks()))
errs = appendErrors(errs, security.ValidateIPs(from.Source.GetRemoteIpBlocks()))
errs = appendErrors(errs, security.ValidateIPs(from.Source.GetNotRemoteIpBlocks()))
errs = appendErrors(errs, security.CheckEmptyValues("Principals", src.Principals))
errs = appendErrors(errs, security.CheckEmptyValues("RequestPrincipals", src.RequestPrincipals))
errs = appendErrors(errs, security.CheckEmptyValues("Namespaces", src.Namespaces))
errs = appendErrors(errs, security.CheckEmptyValues("IpBlocks", src.IpBlocks))
errs = appendErrors(errs, security.CheckEmptyValues("RemoteIpBlocks", src.RemoteIpBlocks))
errs = appendErrors(errs, security.CheckEmptyValues("NotPrincipals", src.NotPrincipals))
errs = appendErrors(errs, security.CheckEmptyValues("NotRequestPrincipals", src.NotRequestPrincipals))
errs = appendErrors(errs, security.CheckEmptyValues("NotNamespaces", src.NotNamespaces))
errs = appendErrors(errs, security.CheckEmptyValues("NotIpBlocks", src.NotIpBlocks))
errs = appendErrors(errs, security.CheckEmptyValues("NotRemoteIpBlocks", src.NotRemoteIpBlocks))
if src.NotPrincipals != nil || src.Principals != nil || src.IpBlocks != nil ||
src.NotIpBlocks != nil || src.Namespaces != nil ||
src.NotNamespaces != nil || src.RemoteIpBlocks != nil || src.NotRemoteIpBlocks != nil {
tcpRulesInFrom = true
}
}
}
if rule.To != nil && len(rule.To) == 0 {
errs = appendErrors(errs, fmt.Errorf("`to` must not be empty, found at rule %d", i))
}
for _, to := range rule.To {
if to == nil {
errs = appendErrors(errs, fmt.Errorf("`to` must not be nil, found at rule %d", i))
continue
}
if to.Operation == nil {
errs = appendErrors(errs, fmt.Errorf("`to.operation` must not be nil, found at rule %d", i))
} else {
toRuleExist = true
op := to.Operation
if len(op.Ports) == 0 && len(op.Methods) == 0 && len(op.Paths) == 0 && len(op.Hosts) == 0 &&
len(op.NotPorts) == 0 && len(op.NotMethods) == 0 && len(op.NotPaths) == 0 && len(op.NotHosts) == 0 {
errs = appendErrors(errs, fmt.Errorf("`to.operation` must not be empty, found at rule %d", i))
}
errs = appendErrors(errs, security.ValidatePorts(to.Operation.GetPorts()))
errs = appendErrors(errs, security.ValidatePorts(to.Operation.GetNotPorts()))
errs = appendErrors(errs, security.CheckEmptyValues("Ports", op.Ports))
errs = appendErrors(errs, security.CheckEmptyValues("Methods", op.Methods))
errs = appendErrors(errs, security.CheckEmptyValues("Paths", op.Paths))
errs = appendErrors(errs, security.CheckEmptyValues("Hosts", op.Hosts))
errs = appendErrors(errs, security.CheckEmptyValues("NotPorts", op.NotPorts))
errs = appendErrors(errs, security.CheckEmptyValues("NotMethods", op.NotMethods))
errs = appendErrors(errs, security.CheckEmptyValues("NotPaths", op.NotPaths))
errs = appendErrors(errs, security.CheckEmptyValues("NotHosts", op.NotHosts))
if op.Ports != nil || op.NotPorts != nil {
tcpRulesInTo = true
}
}
}
for _, condition := range rule.GetWhen() {
key := condition.GetKey()
if key == "" {
errs = appendErrors(errs, fmt.Errorf("`key` must not be empty"))
} else {
if len(condition.GetValues()) == 0 && len(condition.GetNotValues()) == 0 {
errs = appendErrors(errs, fmt.Errorf("at least one of `values` or `notValues` must be set for key %s",
key))
} else {
if err := security.ValidateAttribute(key, condition.GetValues()); err != nil {
errs = appendErrors(errs, fmt.Errorf("invalid `value` for `key` %s: %v", key, err))
}
if err := security.ValidateAttribute(key, condition.GetNotValues()); err != nil {
errs = appendErrors(errs, fmt.Errorf("invalid `notValue` for `key` %s: %v", key, err))
}
}
}
}
if ((fromRuleExist && !toRuleExist && !tcpRulesInFrom) || (toRuleExist && !tcpRulesInTo)) &&
in.Action == security_beta.AuthorizationPolicy_DENY {
warning := fmt.Errorf("configured AuthorizationPolicy will deny all traffic " +
"to TCP ports under its scope due to the use of only HTTP attributes in a DENY rule; " +
"it is recommended to explicitly specify the port")
warnings = appendErrors(warnings, warning)
}
}
return warnings, multierror.Prefix(errs, fmt.Sprintf("invalid policy %s.%s:", cfg.Name, cfg.Namespace))
})
// ValidateRequestAuthentication checks that request authentication spec is well-formed.
var ValidateRequestAuthentication = registerValidateFunc("ValidateRequestAuthentication",
func(cfg config.Config) (Warning, error) {
in, ok := cfg.Spec.(*security_beta.RequestAuthentication)
if !ok {
return nil, errors.New("cannot cast to RequestAuthentication")
}
errs := Validation{}
errs = appendValidation(errs,
validateOneOfSelectorType(in.GetSelector(), in.GetTargetRef()),
validateWorkloadSelector(in.GetSelector()),
validatePolicyTargetReference(in.GetTargetRef()),
)
for _, rule := range in.JwtRules {
errs = appendValidation(errs, validateJwtRule(rule))
}
return errs.Unwrap()
})
func validateJwtRule(rule *security_beta.JWTRule) (errs error) {
if rule == nil {
return nil
}
if len(rule.Issuer) == 0 {
errs = multierror.Append(errs, errors.New("issuer must be set"))
}
for _, audience := range rule.Audiences {
if len(audience) == 0 {
errs = multierror.Append(errs, errors.New("audience must be non-empty string"))
}
}
if len(rule.JwksUri) != 0 {
if _, err := security.ParseJwksURI(rule.JwksUri); err != nil {
errs = multierror.Append(errs, err)
}
}
if rule.Jwks != "" {
_, err := jwk.Parse([]byte(rule.Jwks))
if err != nil {
errs = multierror.Append(errs, fmt.Errorf("jwks parse error: %v", err))
}
}
for _, location := range rule.FromHeaders {
if location == nil {
errs = multierror.Append(errs, errors.New("location header name must be non-null"))
continue
}
if len(location.Name) == 0 {
errs = multierror.Append(errs, errors.New("location header name must be non-empty string"))
}
}
for _, location := range rule.FromParams {
if len(location) == 0 {
errs = multierror.Append(errs, errors.New("location query must be non-empty string"))
}
}
for _, location := range rule.FromCookies {
if len(location) == 0 {
errs = multierror.Append(errs, errors.New("cookie name must be non-empty string"))
}
}
for _, claimAndHeaders := range rule.OutputClaimToHeaders {
if claimAndHeaders == nil {
errs = multierror.Append(errs, errors.New("outputClaimToHeaders must not be null"))
continue
}
if claimAndHeaders.Claim == "" || claimAndHeaders.Header == "" {
errs = multierror.Append(errs, errors.New("outputClaimToHeaders header and claim value must be non-empty string"))
continue
}
if err := ValidateHTTPHeaderValue(claimAndHeaders.Header); err != nil {
errs = multierror.Append(errs, err)
}
}
return
}
// ValidatePeerAuthentication checks that peer authentication spec is well-formed.
var ValidatePeerAuthentication = registerValidateFunc("ValidatePeerAuthentication",
func(cfg config.Config) (Warning, error) {
in, ok := cfg.Spec.(*security_beta.PeerAuthentication)
if !ok {
return nil, errors.New("cannot cast to PeerAuthentication")
}
var errs error
emptySelector := in.Selector == nil || len(in.Selector.MatchLabels) == 0
if emptySelector && len(in.PortLevelMtls) != 0 {
errs = appendErrors(errs,
fmt.Errorf("mesh/namespace peer authentication cannot have port level mTLS"))
}
if in.PortLevelMtls != nil && len(in.PortLevelMtls) == 0 {
errs = appendErrors(errs,
fmt.Errorf("port level mTLS, if defined, must have at least one element"))
}
for port := range in.PortLevelMtls {
if port == 0 {
errs = appendErrors(errs, fmt.Errorf("port cannot be 0"))
}
}
validation := validateWorkloadSelector(in.Selector)
errs = appendErrors(errs, validation)
return validation.Warning, errs
})
// ValidateVirtualService checks that a v1alpha3 route rule is well-formed.
var ValidateVirtualService = registerValidateFunc("ValidateVirtualService",
func(cfg config.Config) (Warning, error) {
virtualService, ok := cfg.Spec.(*networking.VirtualService)
if !ok {
return nil, errors.New("cannot cast to virtual service")
}
errs := Validation{}
if len(virtualService.Hosts) == 0 {
// This must be delegate - enforce delegate validations.
if len(virtualService.Gateways) != 0 {
// meaningless to specify gateways in delegate
errs = appendValidation(errs, fmt.Errorf("delegate virtual service must have no gateways specified"))
}
if len(virtualService.Tls) != 0 {
// meaningless to specify tls in delegate, we do not support tls delegate
errs = appendValidation(errs, fmt.Errorf("delegate virtual service must have no tls route specified"))
}
if len(virtualService.Tcp) != 0 {
// meaningless to specify tls in delegate, we do not support tcp delegate
errs = appendValidation(errs, fmt.Errorf("delegate virtual service must have no tcp route specified"))
}
}
appliesToMesh := false
appliesToGateway := false
if len(virtualService.Gateways) == 0 {
appliesToMesh = true
} else {
errs = appendValidation(errs, validateGatewayNames(virtualService.Gateways))
for _, gatewayName := range virtualService.Gateways {
if gatewayName == constants.IstioMeshGateway {
appliesToMesh = true
} else {
appliesToGateway = true
}
}
}
if !appliesToGateway {
validateJWTClaimRoute := func(headers map[string]*networking.StringMatch) {
for key := range headers {
if jwt.ToRoutingClaim(key).Match {
msg := fmt.Sprintf("JWT claim based routing (key: %s) is only supported for gateway, found no gateways: %v", key, virtualService.Gateways)
errs = appendValidation(errs, errors.New(msg))
}
}
}
for _, http := range virtualService.GetHttp() {
for _, m := range http.GetMatch() {
validateJWTClaimRoute(m.GetHeaders())
validateJWTClaimRoute(m.GetWithoutHeaders())
}
}
}
allHostsValid := true
for _, virtualHost := range virtualService.Hosts {
if err := ValidateWildcardDomain(virtualHost); err != nil {
if !netutil.IsValidIPAddress(virtualHost) {
errs = appendValidation(errs, err)
allHostsValid = false
}
} else if appliesToMesh && virtualHost == "*" {
errs = appendValidation(errs, fmt.Errorf("wildcard host * is not allowed for virtual services bound to the mesh gateway"))
allHostsValid = false
}
}
// Check for duplicate hosts
// Duplicates include literal duplicates as well as wildcard duplicates
// E.g., *.foo.com, and *.com are duplicates in the same virtual service
if allHostsValid {
for i := 0; i < len(virtualService.Hosts); i++ {
hostI := host.Name(virtualService.Hosts[i])
for j := i + 1; j < len(virtualService.Hosts); j++ {
hostJ := host.Name(virtualService.Hosts[j])
if hostI.Matches(hostJ) {
errs = appendValidation(errs, fmt.Errorf("duplicate hosts in virtual service: %s & %s", hostI, hostJ))
}
}
}
}
if len(virtualService.Http) == 0 && len(virtualService.Tcp) == 0 && len(virtualService.Tls) == 0 {
errs = appendValidation(errs, errors.New("http, tcp or tls must be provided in virtual service"))
}
gatewaySemantics := cfg.Annotations[constants.InternalRouteSemantics] == constants.RouteSemanticsGateway
for _, httpRoute := range virtualService.Http {
if httpRoute == nil {
errs = appendValidation(errs, errors.New("http route may not be null"))
continue
}
errs = appendValidation(errs, validateHTTPRoute(httpRoute, len(virtualService.Hosts) == 0, gatewaySemantics))
}
for _, tlsRoute := range virtualService.Tls {
errs = appendValidation(errs, validateTLSRoute(tlsRoute, virtualService, gatewaySemantics))
}
for _, tcpRoute := range virtualService.Tcp {
errs = appendValidation(errs, validateTCPRoute(tcpRoute, gatewaySemantics))
}
errs = appendValidation(errs, validateExportTo(cfg.Namespace, virtualService.ExportTo, false, false))
warnUnused := func(ruleno, reason string) {
errs = appendValidation(errs, WrapWarning(&AnalysisAwareError{
Type: "VirtualServiceUnreachableRule",
Msg: fmt.Sprintf("virtualService rule %v not used (%s)", ruleno, reason),
Parameters: []any{ruleno, reason},
}))
}
warnIneffective := func(ruleno, matchno, dupno string) {
errs = appendValidation(errs, WrapWarning(&AnalysisAwareError{
Type: "VirtualServiceIneffectiveMatch",
Msg: fmt.Sprintf("virtualService rule %v match %v is not used (duplicate/overlapping match in rule %v)", ruleno, matchno, dupno),
Parameters: []any{ruleno, matchno, dupno},
}))
}
analyzeUnreachableHTTPRules(virtualService.Http, warnUnused, warnIneffective)
analyzeUnreachableTCPRules(virtualService.Tcp, warnUnused, warnIneffective)
analyzeUnreachableTLSRules(virtualService.Tls, warnUnused, warnIneffective)
return errs.Unwrap()
})
func assignExactOrPrefix(exact, prefix string) string {
if exact != "" {
return matchExact + exact
}
if prefix != "" {
return matchPrefix + prefix
}
return ""
}
// genMatchHTTPRoutes build the match rules into struct OverlappingMatchValidationForHTTPRoute
// based on particular HTTPMatchRequest, according to comments on https://github.com/istio/istio/pull/32701
// only support Match's port, method, authority, headers, query params and nonheaders for now.
func genMatchHTTPRoutes(route *networking.HTTPRoute, match *networking.HTTPMatchRequest,
rulen, matchn int,
) (matchHTTPRoutes *OverlappingMatchValidationForHTTPRoute) {
// skip current match if no match field for current route
if match == nil {
return nil
}
// skip current match if no URI field
if match.Uri == nil {
return nil
}
// store all httproute with prefix match uri
tmpPrefix := match.Uri.GetPrefix()
if tmpPrefix != "" {
// set Method
methodExact := match.Method.GetExact()
methodPrefix := match.Method.GetPrefix()
methodMatch := assignExactOrPrefix(methodExact, methodPrefix)
// if no method information, it should be GET by default
if methodMatch == "" {
methodMatch = matchExact + "GET"
}
// set Authority
authorityExact := match.Authority.GetExact()
authorityPrefix := match.Authority.GetPrefix()
authorityMatch := assignExactOrPrefix(authorityExact, authorityPrefix)
// set Headers
headerMap := make(map[string]string)
for hkey, hvalue := range match.Headers {
hvalueExact := hvalue.GetExact()
hvaluePrefix := hvalue.GetPrefix()
hvalueMatch := assignExactOrPrefix(hvalueExact, hvaluePrefix)
headerMap[hkey] = hvalueMatch
}
// set QueryParams
QPMap := make(map[string]string)
for qpkey, qpvalue := range match.QueryParams {
qpvalueExact := qpvalue.GetExact()
qpvaluePrefix := qpvalue.GetPrefix()
qpvalueMatch := assignExactOrPrefix(qpvalueExact, qpvaluePrefix)
QPMap[qpkey] = qpvalueMatch
}
// set WithoutHeaders
noHeaderMap := make(map[string]string)
for nhkey, nhvalue := range match.WithoutHeaders {
nhvalueExact := nhvalue.GetExact()
nhvaluePrefix := nhvalue.GetPrefix()
nhvalueMatch := assignExactOrPrefix(nhvalueExact, nhvaluePrefix)
noHeaderMap[nhkey] = nhvalueMatch
}
matchHTTPRoutes = &OverlappingMatchValidationForHTTPRoute{
routeName(route, rulen),
requestName(match, matchn),
tmpPrefix,
match.Port,
methodMatch,
authorityMatch,
headerMap,
QPMap,
noHeaderMap,
}
return
}
return nil
}
// coveredValidation validate the overlapping match between two instance of OverlappingMatchValidationForHTTPRoute
func coveredValidation(vA, vB *OverlappingMatchValidationForHTTPRoute) bool {
// check the URI overlapping match, such as vB.Prefix is '/debugs' and vA.Prefix is '/debug'
if strings.HasPrefix(vB.Prefix, vA.Prefix) {
// check the port field
if vB.MatchPort != vA.MatchPort {
return false
}
// check the match method
if vA.MatchMethod != vB.MatchMethod {
if !strings.HasPrefix(vA.MatchMethod, vB.MatchMethod) {
return false
}
}
// check the match authority
if vA.MatchAuthority != vB.MatchAuthority {
if !strings.HasPrefix(vA.MatchAuthority, vB.MatchAuthority) {
return false
}
}
// check the match Headers
vAHeaderLen := len(vA.MatchHeaders)
vBHeaderLen := len(vB.MatchHeaders)
if vAHeaderLen != vBHeaderLen {
return false
}
for hdKey, hdValue := range vA.MatchHeaders {
vBhdValue, ok := vB.MatchHeaders[hdKey]
if !ok {
return false
} else if hdValue != vBhdValue {
if !strings.HasPrefix(hdValue, vBhdValue) {
return false
}
}
}
// check the match QueryParams
vAQPLen := len(vA.MatchQueryParams)
vBQPLen := len(vB.MatchQueryParams)
if vAQPLen != vBQPLen {
return false
}
for qpKey, qpValue := range vA.MatchQueryParams {
vBqpValue, ok := vB.MatchQueryParams[qpKey]
if !ok {
return false
} else if qpValue != vBqpValue {
if !strings.HasPrefix(qpValue, vBqpValue) {
return false
}
}
}
// check the match NonHeaders
vANonHDLen := len(vA.MatchNonHeaders)
vBNonHDLen := len(vB.MatchNonHeaders)
if vANonHDLen != vBNonHDLen {
return false
}
for nhKey, nhValue := range vA.MatchNonHeaders {
vBnhValue, ok := vB.MatchNonHeaders[nhKey]
if !ok {
return false
} else if nhValue != vBnhValue {
if !strings.HasPrefix(nhValue, vBnhValue) {
return false
}
}
}
} else {
// no URI overlapping match
return false
}
return true
}
func analyzeUnreachableHTTPRules(routes []*networking.HTTPRoute,
reportUnreachable func(ruleno, reason string), reportIneffective func(ruleno, matchno, dupno string),
) {
matchesEncountered := make(map[string]int)
emptyMatchEncountered := -1
var matchHTTPRoutes []*OverlappingMatchValidationForHTTPRoute
for rulen, route := range routes {
if route == nil {
continue
}
if len(route.Match) == 0 {
if emptyMatchEncountered >= 0 {
reportUnreachable(routeName(route, rulen), "only the last rule can have no matches")
}
emptyMatchEncountered = rulen
continue
}
duplicateMatches := 0
for matchn, match := range route.Match {
dupn, ok := matchesEncountered[asJSON(match)]
if ok {
reportIneffective(routeName(route, rulen), requestName(match, matchn), routeName(routes[dupn], dupn))
duplicateMatches++
// no need to handle for totally duplicated match rules
continue
}
matchesEncountered[asJSON(match)] = rulen
// build the match rules into struct OverlappingMatchValidationForHTTPRoute based on current match
matchHTTPRoute := genMatchHTTPRoutes(route, match, rulen, matchn)
if matchHTTPRoute != nil {
matchHTTPRoutes = append(matchHTTPRoutes, matchHTTPRoute)
}
}
if duplicateMatches == len(route.Match) {
reportUnreachable(routeName(route, rulen), "all matches used by prior rules")
}
}
// at least 2 prefix matched routes for overlapping match validation
if len(matchHTTPRoutes) > 1 {
// check the overlapping match from the first prefix information
for routeIndex, routePrefix := range matchHTTPRoutes {
for rIndex := routeIndex + 1; rIndex < len(matchHTTPRoutes); rIndex++ {
// exclude the duplicate-match cases which have been validated above
if strings.Compare(matchHTTPRoutes[rIndex].Prefix, routePrefix.Prefix) == 0 {
continue
}
// Validate former prefix match does not cover the latter one.
if coveredValidation(routePrefix, matchHTTPRoutes[rIndex]) {
prefixMatchA := matchHTTPRoutes[rIndex].MatchStr + " of prefix " + matchHTTPRoutes[rIndex].Prefix
prefixMatchB := routePrefix.MatchStr + " of prefix " + routePrefix.Prefix + " on " + routePrefix.RouteStr
reportIneffective(matchHTTPRoutes[rIndex].RouteStr, prefixMatchA, prefixMatchB)
}
}
}
}
}
// NOTE: This method identical to analyzeUnreachableHTTPRules.
func analyzeUnreachableTCPRules(routes []*networking.TCPRoute,
reportUnreachable func(ruleno, reason string), reportIneffective func(ruleno, matchno, dupno string),
) {
matchesEncountered := make(map[string]int)
emptyMatchEncountered := -1
for rulen, route := range routes {
if route == nil {
continue
}
if len(route.Match) == 0 {
if emptyMatchEncountered >= 0 {
reportUnreachable(routeName(route, rulen), "only the last rule can have no matches")
}
emptyMatchEncountered = rulen
continue
}
duplicateMatches := 0
for matchn, match := range route.Match {
dupn, ok := matchesEncountered[asJSON(match)]
if ok {
reportIneffective(routeName(route, rulen), requestName(match, matchn), routeName(routes[dupn], dupn))
duplicateMatches++
} else {
matchesEncountered[asJSON(match)] = rulen
}
}
if duplicateMatches == len(route.Match) {
reportUnreachable(routeName(route, rulen), "all matches used by prior rules")
}
}
}
// NOTE: This method identical to analyzeUnreachableHTTPRules.
func analyzeUnreachableTLSRules(routes []*networking.TLSRoute,
reportUnreachable func(ruleno, reason string), reportIneffective func(ruleno, matchno, dupno string),
) {
matchesEncountered := make(map[string]int)
emptyMatchEncountered := -1
for rulen, route := range routes {
if route == nil {
continue
}
if len(route.Match) == 0 {
if emptyMatchEncountered >= 0 {
reportUnreachable(routeName(route, rulen), "only the last rule can have no matches")
}
emptyMatchEncountered = rulen
continue
}
duplicateMatches := 0
for matchn, match := range route.Match {
dupn, ok := matchesEncountered[asJSON(match)]
if ok {
reportIneffective(routeName(route, rulen), requestName(match, matchn), routeName(routes[dupn], dupn))
duplicateMatches++
} else {
matchesEncountered[asJSON(match)] = rulen
}
}
if duplicateMatches == len(route.Match) {
reportUnreachable(routeName(route, rulen), "all matches used by prior rules")
}
}
}
// asJSON() creates a JSON serialization of a match, to use for match comparison. We don't use the JSON itself.
func asJSON(data any) string {
// Remove the name, so we can create a serialization that only includes traffic routing config
switch mr := data.(type) {
case *networking.HTTPMatchRequest:
if mr != nil && mr.Name != "" {
cl := &networking.HTTPMatchRequest{}
protomarshal.ShallowCopy(cl, mr)
cl.Name = ""
data = cl
}
}
b, err := json.Marshal(data)
if err != nil {
return err.Error()
}
return string(b)
}
func routeName(route any, routen int) string {
switch r := route.(type) {
case *networking.HTTPRoute:
if r.Name != "" {
return fmt.Sprintf("%q", r.Name)
}
// TCP and TLS routes have no names
}
return fmt.Sprintf("#%d", routen)
}
func requestName(match any, matchn int) string {
switch mr := match.(type) {
case *networking.HTTPMatchRequest:
if mr != nil && mr.Name != "" {
return fmt.Sprintf("%q", mr.Name)
}
// TCP and TLS matches have no names
}
return fmt.Sprintf("#%d", matchn)
}
func validateTLSRoute(tls *networking.TLSRoute, context *networking.VirtualService, gatewaySemantics bool) (errs Validation) {
if tls == nil {
return
}
if len(tls.Match) == 0 {
errs = appendValidation(errs, errors.New("TLS route must have at least one match condition"))
}
for _, match := range tls.Match {
errs = appendValidation(errs, validateTLSMatch(match, context))
}
if len(tls.Route) == 0 {
errs = appendValidation(errs, errors.New("TLS route is required"))
}
errs = appendValidation(errs, validateRouteDestinations(tls.Route, gatewaySemantics))
return errs
}
func validateTLSMatch(match *networking.TLSMatchAttributes, context *networking.VirtualService) (errs Validation) {
if match == nil {
errs = appendValidation(errs, errors.New("TLS match may not be null"))
return
}
if len(match.SniHosts) == 0 {
errs = appendValidation(errs, fmt.Errorf("TLS match must have at least one SNI host"))
} else {
for _, sniHost := range match.SniHosts {
errs = appendValidation(errs, validateSniHost(sniHost, context))
}
}
for _, destinationSubnet := range match.DestinationSubnets {
errs = appendValidation(errs, ValidateIPSubnet(destinationSubnet))
}
if match.Port != 0 {
errs = appendValidation(errs, ValidatePort(int(match.Port)))
}
errs = appendValidation(errs, labels.Instance(match.SourceLabels).Validate())
errs = appendValidation(errs, validateGatewayNames(match.Gateways))
return
}
func validateSniHost(sniHost string, context *networking.VirtualService) (errs Validation) {
if err := ValidateWildcardDomain(sniHost); err != nil {
// Could also be an IP
if netutil.IsValidIPAddress(sniHost) {
errs = appendValidation(errs, WrapWarning(fmt.Errorf("using an IP address (%q) goes against SNI spec and most clients do not support this", sniHost)))
return
}
return appendValidation(errs, err)
}
sniHostname := host.Name(sniHost)
for _, hostname := range context.Hosts {
if sniHostname.SubsetOf(host.Name(hostname)) {
return
}
}
return appendValidation(errs, fmt.Errorf("SNI host %q is not a compatible subset of any of the virtual service hosts: [%s]",
sniHost, strings.Join(context.Hosts, ", ")))
}
func validateTCPRoute(tcp *networking.TCPRoute, gatewaySemantics bool) (errs error) {
if tcp == nil {
return nil
}
for _, match := range tcp.Match {
errs = appendErrors(errs, validateTCPMatch(match))
}
if len(tcp.Route) == 0 {
errs = appendErrors(errs, errors.New("TCP route is required"))
}
errs = appendErrors(errs, validateRouteDestinations(tcp.Route, gatewaySemantics))
return
}
func validateTCPMatch(match *networking.L4MatchAttributes) (errs error) {
if match == nil {
errs = multierror.Append(errs, errors.New("tcp match may not be nil"))
return
}
for _, destinationSubnet := range match.DestinationSubnets {
errs = appendErrors(errs, ValidateIPSubnet(destinationSubnet))
}
if match.Port != 0 {
errs = appendErrors(errs, ValidatePort(int(match.Port)))
}
errs = appendErrors(errs, labels.Instance(match.SourceLabels).Validate())
errs = appendErrors(errs, validateGatewayNames(match.Gateways))
return
}
func validateStringMatchRegexp(sm *networking.StringMatch, where string) error {
switch sm.GetMatchType().(type) {
case *networking.StringMatch_Regex:
default:
return nil
}
re := sm.GetRegex()
if re == "" {
return fmt.Errorf("%q: regex string match should not be empty", where)
}
return validateStringRegexp(re, where)
}
func validateStringRegexp(re string, where string) error {
// Envoy enforces a re2.max_program_size.error_level re2 program size is not the same as length,
// but it is always *larger* than length. Because goland does not have a way to evaluate the
// program size, we approximate by the length. To ensure that a program that is smaller than 1024
// length but larger than 1024 size does not enter the system, we program Envoy to allow very large
// regexs to avoid NACKs. See
// https://github.com/jpeach/snippets/blob/889fda84cc8713af09205438b33553eb69dd5355/re2sz.cc to
// evaluate program size.
if len(re) > 1024 {
return fmt.Errorf("%q: regex is too large, max length allowed is 1024", where)
}
_, err := regexp.Compile(re)
if err == nil {
return nil
}
return fmt.Errorf("%q: %w; Istio uses RE2 style regex-based match (https://github.com/google/re2/wiki/Syntax)", where, err)
}
func validateGatewayNames(gatewayNames []string) (errs Validation) {
for _, gatewayName := range gatewayNames {
parts := strings.SplitN(gatewayName, "/", 2)
if len(parts) != 2 {
if strings.Contains(gatewayName, ".") {
// Legacy FQDN style
parts := strings.Split(gatewayName, ".")
recommended := fmt.Sprintf("%s/%s", parts[1], parts[0])
errs = appendValidation(errs, WrapWarning(fmt.Errorf(
"using legacy gatewayName format %q; prefer the <namespace>/<name> format: %q", gatewayName, recommended)))
}
errs = appendValidation(errs, ValidateFQDN(gatewayName))
return
}
if len(parts[0]) == 0 || len(parts[1]) == 0 {
errs = appendValidation(errs, fmt.Errorf("config namespace and gateway name cannot be empty"))
}
// namespace and name must be DNS labels
if !labels.IsDNS1123Label(parts[0]) {
errs = appendValidation(errs, fmt.Errorf("invalid value for namespace: %q", parts[0]))
}
if !labels.IsDNS1123Label(parts[1]) {
errs = appendValidation(errs, fmt.Errorf("invalid value for gateway name: %q", parts[1]))
}
}
return
}
func validateHTTPRouteDestinations(weights []*networking.HTTPRouteDestination, gatewaySemantics bool) (errs error) {
var totalWeight int32
for _, weight := range weights {
if weight == nil {
errs = multierror.Append(errs, errors.New("weight may not be nil"))
continue
}
if weight.Destination == nil {
errs = multierror.Append(errs, errors.New("destination is required"))
}
// header manipulations
for name, val := range weight.Headers.GetRequest().GetAdd() {
errs = appendErrors(errs, ValidateHTTPHeaderWithAuthorityOperationName(name))
errs = appendErrors(errs, ValidateHTTPHeaderValue(val))
}
for name, val := range weight.Headers.GetRequest().GetSet() {
errs = appendErrors(errs, ValidateHTTPHeaderWithAuthorityOperationName(name))
errs = appendErrors(errs, ValidateHTTPHeaderValue(val))
}
for _, name := range weight.Headers.GetRequest().GetRemove() {
errs = appendErrors(errs, ValidateHTTPHeaderOperationName(name))
}
for name, val := range weight.Headers.GetResponse().GetAdd() {
errs = appendErrors(errs, ValidateHTTPHeaderOperationName(name))
errs = appendErrors(errs, ValidateHTTPHeaderValue(val))
}
for name, val := range weight.Headers.GetResponse().GetSet() {
errs = appendErrors(errs, ValidateHTTPHeaderOperationName(name))
errs = appendErrors(errs, ValidateHTTPHeaderValue(val))
}
for _, name := range weight.Headers.GetResponse().GetRemove() {
errs = appendErrors(errs, ValidateHTTPHeaderOperationName(name))
}
if !gatewaySemantics {
errs = appendErrors(errs, validateDestination(weight.Destination))
}
errs = appendErrors(errs, validateWeight(weight.Weight))
totalWeight += weight.Weight
}
if len(weights) > 1 && totalWeight == 0 {
errs = appendErrors(errs, fmt.Errorf("total destination weight = 0"))
}
return
}
func validateRouteDestinations(weights []*networking.RouteDestination, gatewaySemantics bool) (errs error) {
var totalWeight int32
for _, weight := range weights {
if weight == nil {
errs = multierror.Append(errs, errors.New("weight may not be nil"))
continue
}
if weight.Destination == nil {
errs = multierror.Append(errs, errors.New("destination is required"))
}
if !gatewaySemantics {
errs = appendErrors(errs, validateDestination(weight.Destination))
}
errs = appendErrors(errs, validateWeight(weight.Weight))
totalWeight += weight.Weight
}
if len(weights) > 1 && totalWeight == 0 {
errs = appendErrors(errs, fmt.Errorf("total destination weight = 0"))
}
return
}
func validateCORSPolicy(policy *networking.CorsPolicy) (errs error) {
if policy == nil {
return
}
for _, origin := range policy.AllowOrigins {
errs = appendErrors(errs, validateAllowOrigins(origin))
}
for _, method := range policy.AllowMethods {
errs = appendErrors(errs, validateHTTPMethod(method))
}
for _, name := range policy.AllowHeaders {
errs = appendErrors(errs, ValidateHTTPHeaderName(name))
}
for _, name := range policy.ExposeHeaders {
errs = appendErrors(errs, ValidateHTTPHeaderName(name))
}
if policy.MaxAge != nil {
errs = appendErrors(errs, ValidateDuration(policy.MaxAge))
if policy.MaxAge.Nanos > 0 {
errs = multierror.Append(errs, errors.New("max_age duration is accurate only to seconds precision"))
}
}
return
}
func validateAllowOrigins(origin *networking.StringMatch) error {
var match string
switch origin.MatchType.(type) {
case *networking.StringMatch_Exact:
match = origin.GetExact()
case *networking.StringMatch_Prefix:
match = origin.GetPrefix()
case *networking.StringMatch_Regex:
match = origin.GetRegex()
}
if match == "" {
return fmt.Errorf("'%v' is not a valid match type for CORS allow origins", match)
}
return validateStringMatchRegexp(origin, "corsPolicy.allowOrigins")
}
func validateHTTPMethod(method string) error {
if !supportedMethods[method] {
return fmt.Errorf("%q is not a supported HTTP method", method)
}
return nil
}
func validateHTTPFaultInjection(fault *networking.HTTPFaultInjection) (errs error) {
if fault == nil {
return
}
if fault.Abort == nil && fault.Delay == nil {
errs = multierror.Append(errs, errors.New("HTTP fault injection must have an abort and/or a delay"))
}
errs = appendErrors(errs, validateHTTPFaultInjectionAbort(fault.Abort))
errs = appendErrors(errs, validateHTTPFaultInjectionDelay(fault.Delay))
return
}
func validateHTTPFaultInjectionAbort(abort *networking.HTTPFaultInjection_Abort) (errs Validation) {
if abort == nil {
return
}
errs = appendValidation(errs, validatePercentage(abort.Percentage))
switch abort.ErrorType.(type) {
case *networking.HTTPFaultInjection_Abort_GrpcStatus:
errs = appendValidation(errs, validateGRPCStatus(abort.GetGrpcStatus()))
case *networking.HTTPFaultInjection_Abort_Http2Error:
// TODO: HTTP2 error validation
errs = appendValidation(errs, errors.New("HTTP/2 abort fault injection not supported yet"))
case *networking.HTTPFaultInjection_Abort_HttpStatus:
errs = appendValidation(errs, validateHTTPStatus(abort.GetHttpStatus()))
default:
errs = appendWarningf(errs, "abort configured, but error type not set")
}
return
}
func validateHTTPStatus(status int32) error {
if status < 200 || status > 600 {
return fmt.Errorf("HTTP status %d is not in range 200-599", status)
}
return nil
}
func validateGRPCStatus(status string) error {
_, found := grpc.SupportedGRPCStatus[status]
if !found {
return fmt.Errorf("gRPC status %q is not supported. See https://github.com/grpc/grpc/blob/master/doc/statuscodes.md "+
"for a list of supported codes, for example 'NOT_FOUND'", status)
}
return nil
}
func validateHTTPFaultInjectionDelay(delay *networking.HTTPFaultInjection_Delay) (errs error) {
if delay == nil {
return
}
errs = appendErrors(errs, validatePercentage(delay.Percentage))
switch v := delay.HttpDelayType.(type) {
case *networking.HTTPFaultInjection_Delay_FixedDelay:
errs = appendErrors(errs, ValidateDuration(v.FixedDelay))
case *networking.HTTPFaultInjection_Delay_ExponentialDelay:
errs = appendErrors(errs, ValidateDuration(v.ExponentialDelay))
errs = multierror.Append(errs, fmt.Errorf("exponentialDelay not supported yet"))
}
return
}
func validateDestination(destination *networking.Destination) (errs error) {
if destination == nil {
return
}
hostname := destination.Host
if hostname == "*" {
errs = appendErrors(errs, fmt.Errorf("invalid destination host %s", hostname))
} else {
errs = appendErrors(errs, ValidateWildcardDomain(hostname))
}
if destination.Subset != "" {
errs = appendErrors(errs, validateSubsetName(destination.Subset))
}
if destination.Port != nil {
errs = appendErrors(errs, validatePortSelector(destination.Port))
}
return
}
func validateSubsetName(name string) error {
if len(name) == 0 {
return fmt.Errorf("subset name cannot be empty")
}
if !labels.IsDNS1123Label(name) {
return fmt.Errorf("subset name is invalid: %s", name)
}
return nil
}
func validatePortSelector(selector *networking.PortSelector) (errs error) {
if selector == nil {
return nil
}
// port must be a number
number := int(selector.GetNumber())
errs = appendErrors(errs, ValidatePort(number))
return
}
func validateHTTPRetry(retries *networking.HTTPRetry) (errs error) {
if retries == nil {
return
}
if retries.Attempts < 0 {
errs = multierror.Append(errs, errors.New("attempts cannot be negative"))
}
if retries.Attempts == 0 && (retries.PerTryTimeout != nil || retries.RetryOn != "" || retries.RetryRemoteLocalities != nil) {
errs = appendErrors(errs, errors.New("http retry policy configured when attempts are set to 0 (disabled)"))
}
if retries.PerTryTimeout != nil {
errs = appendErrors(errs, ValidateDuration(retries.PerTryTimeout))
}
if retries.RetryOn != "" {
retryOnPolicies := strings.Split(retries.RetryOn, ",")
for _, policy := range retryOnPolicies {
// Try converting it to an integer to see if it's a valid HTTP status code.
i, _ := strconv.Atoi(policy)
if http.StatusText(i) == "" && !supportedRetryOnPolicies[policy] {
errs = appendErrors(errs, fmt.Errorf("%q is not a valid retryOn policy", policy))
}
}
}
return
}
func validateHTTPRedirect(redirect *networking.HTTPRedirect) error {
if redirect == nil {
return nil
}
if redirect.Uri == "" && redirect.Authority == "" && redirect.RedirectPort == nil && redirect.Scheme == "" {
return errors.New("redirect must specify URI, authority, scheme, or port")
}
if redirect.RedirectCode != 0 {
if redirect.RedirectCode < 300 || redirect.RedirectCode > 399 {
return fmt.Errorf("%d is not a valid redirect code, must be 3xx", redirect.RedirectCode)
}
}
if redirect.Scheme != "" && redirect.Scheme != "http" && redirect.Scheme != "https" {
return fmt.Errorf(`invalid redirect scheme, must be "http" or "https"`)
}
if redirect.GetPort() > 0 {
if err := ValidatePort(int(redirect.GetPort())); err != nil {
return err
}
}
return nil
}
func validateHTTPDirectResponse(directResponse *networking.HTTPDirectResponse) (errs Validation) {
if directResponse == nil {
return
}
if directResponse.Body != nil {
size := 0
switch op := directResponse.Body.Specifier.(type) {
case *networking.HTTPBody_String_:
size = len(op.String_)
case *networking.HTTPBody_Bytes:
size = len(op.Bytes)
}
if size > 1*mb {
errs = appendValidation(errs, WrapError(fmt.Errorf("large direct_responses may impact control plane performance, must be less than 1MB")))
} else if size > 100*kb {
errs = appendValidation(errs, WrapWarning(fmt.Errorf("large direct_responses may impact control plane performance")))
}
}
errs = appendValidation(errs, WrapError(validateHTTPStatus(int32(directResponse.Status))))
return
}
func validateHTTPMirrors(mirrors []*networking.HTTPMirrorPolicy) error {
errs := Validation{}
for _, mirror := range mirrors {
if mirror == nil {
errs = appendValidation(errs, errors.New("mirror cannot be null"))
continue
}
if mirror.Destination == nil {
errs = appendValidation(errs, errors.New("destination is required for mirrors"))
continue
}
errs = appendValidation(errs, validateDestination(mirror.Destination))
if mirror.Percentage != nil {
value := mirror.Percentage.GetValue()
if value > 100 {
errs = appendValidation(errs, fmt.Errorf("mirror percentage must have a max value of 100 (it has %f)", value))
}
if value < 0 {
errs = appendValidation(errs, fmt.Errorf("mirror percentage must have a min value of 0 (it has %f)", value))
}
}
}
return errs
}
func validateHTTPRewrite(rewrite *networking.HTTPRewrite) error {
if rewrite == nil {
return nil
}
if rewrite.Uri != "" && rewrite.UriRegexRewrite != nil {
return errors.New("rewrite may only contain one of URI or UriRegexRewrite")
}
if rewrite.Uri == "" && rewrite.UriRegexRewrite == nil && rewrite.Authority == "" {
return errors.New("rewrite must specify at least one of URI, UriRegexRewrite, or authority. Only one of URI or UriRegexRewrite may be specified")
}
if err := validateURIRegexRewrite(rewrite.UriRegexRewrite); err != nil {
return errors.Join(errors.New("UriRegexRewrite has errors"), err)
}
return nil
}
func validateURIRegexRewrite(regexRewrite *networking.RegexRewrite) error {
if regexRewrite == nil {
return nil
}
if regexRewrite.Match == "" || regexRewrite.Rewrite == "" {
return errors.New("UriRegexRewrite requires both Rewrite and Match fields to be specified")
}
return validateStringRegexp(regexRewrite.Match, "HTTPRewrite.UriRegexRewrite.Match")
}
// ValidateWorkloadEntry validates a workload entry.
var ValidateWorkloadEntry = registerValidateFunc("ValidateWorkloadEntry",
func(cfg config.Config) (Warning, error) {
we, ok := cfg.Spec.(*networking.WorkloadEntry)
if !ok {
return nil, fmt.Errorf("cannot cast to workload entry")
}
// TODO: We currently don't validate if we port is part of service port, as that is tricky to do without ServiceEntry
return validateWorkloadEntry(we, nil, true).Unwrap()
})
func validateWorkloadEntry(we *networking.WorkloadEntry, servicePorts map[string]bool, allowFQDNAddresses bool) Validation {
errs := Validation{}
unixEndpoint := false
addr := we.GetAddress()
if addr == "" {
if we.Network == "" {
return appendErrorf(errs, "address is required")
}
errs = appendWarningf(errs, "address is unset with network %q", we.Network)
}
// Since we don't know if it's meant to be DNS or STATIC type without association with a ServiceEntry,
// check based on content and try validations.
// First check if it is a Unix endpoint - this will be specified for STATIC.
if strings.HasPrefix(addr, UnixAddressPrefix) {
unixEndpoint = true
errs = appendValidation(errs, ValidateUnixAddress(strings.TrimPrefix(addr, UnixAddressPrefix)))
if len(we.Ports) != 0 {
errs = appendValidation(errs, fmt.Errorf("unix endpoint %s must not include ports", addr))
}
} else if addr != "" && !netutil.IsValidIPAddress(addr) { // This could be IP (in STATIC resolution) or DNS host name (for DNS).
if !allowFQDNAddresses {
errs = appendValidation(errs, fmt.Errorf("endpoint address %q is not a valid IP address", addr))
} else if err := ValidateFQDN(addr); err != nil { // Otherwise could be an FQDN
errs = appendValidation(errs, fmt.Errorf("endpoint address %q is not a valid FQDN or an IP address", addr))
}
}
errs = appendValidation(errs,
labels.Instance(we.Labels).Validate())
for name, port := range we.Ports {
if servicePorts != nil && !servicePorts[name] {
errs = appendValidation(errs, fmt.Errorf("endpoint port %v is not defined by the service entry", port))
}
errs = appendValidation(errs,
ValidatePortName(name),
ValidatePort(int(port)),
)
}
errs = appendValidation(errs, labels.Instance(we.Labels).Validate())
if unixEndpoint && servicePorts != nil && len(servicePorts) != 1 {
errs = appendValidation(errs, errors.New("exactly 1 service port required for unix endpoints"))
}
return errs
}
// ValidateWorkloadGroup validates a workload group.
var ValidateWorkloadGroup = registerValidateFunc("ValidateWorkloadGroup",
func(cfg config.Config) (warnings Warning, errs error) {
wg, ok := cfg.Spec.(*networking.WorkloadGroup)
if !ok {
return nil, fmt.Errorf("cannot cast to workload entry")
}
if wg.Template == nil {
return nil, fmt.Errorf("template is required")
}
// Do not call validateWorkloadEntry. Some fields, such as address, are required in WorkloadEntry
// but not in the template since they are auto populated
if wg.Metadata != nil {
if err := labels.Instance(wg.Metadata.Labels).Validate(); err != nil {
return nil, fmt.Errorf("invalid labels: %v", err)
}
}
return nil, validateReadinessProbe(wg.Probe)
})
func validateReadinessProbe(probe *networking.ReadinessProbe) (errs error) {
if probe == nil {
return nil
}
if probe.PeriodSeconds < 0 {
errs = appendErrors(errs, fmt.Errorf("periodSeconds must be non-negative"))
}
if probe.InitialDelaySeconds < 0 {
errs = appendErrors(errs, fmt.Errorf("initialDelaySeconds must be non-negative"))
}
if probe.TimeoutSeconds < 0 {
errs = appendErrors(errs, fmt.Errorf("timeoutSeconds must be non-negative"))
}
if probe.SuccessThreshold < 0 {
errs = appendErrors(errs, fmt.Errorf("successThreshold must be non-negative"))
}
if probe.FailureThreshold < 0 {
errs = appendErrors(errs, fmt.Errorf("failureThreshold must be non-negative"))
}
switch m := probe.HealthCheckMethod.(type) {
case *networking.ReadinessProbe_HttpGet:
h := m.HttpGet
if h == nil {
errs = appendErrors(errs, fmt.Errorf("httpGet may not be nil"))
break
}
errs = appendErrors(errs, ValidatePort(int(h.Port)))
if h.Scheme != "" && h.Scheme != string(apimirror.URISchemeHTTPS) && h.Scheme != string(apimirror.URISchemeHTTP) {
errs = appendErrors(errs, fmt.Errorf(`httpGet.scheme must be one of "http", "https"`))
}
for _, header := range h.HttpHeaders {
if header == nil {
errs = appendErrors(errs, fmt.Errorf("invalid nil header"))
continue
}
errs = appendErrors(errs, ValidateHTTPHeaderName(header.Name))
}
case *networking.ReadinessProbe_TcpSocket:
h := m.TcpSocket
if h == nil {
errs = appendErrors(errs, fmt.Errorf("tcpSocket may not be nil"))
break
}
errs = appendErrors(errs, ValidatePort(int(h.Port)))
case *networking.ReadinessProbe_Exec:
h := m.Exec
if h == nil {
errs = appendErrors(errs, fmt.Errorf("exec may not be nil"))
break
}
if len(h.Command) == 0 {
errs = appendErrors(errs, fmt.Errorf("exec.command is required"))
}
default:
errs = appendErrors(errs, fmt.Errorf("unknown health check method %T", m))
}
return errs
}
// ValidateServiceEntry validates a service entry.
var ValidateServiceEntry = registerValidateFunc("ValidateServiceEntry",
func(cfg config.Config) (Warning, error) {
serviceEntry, ok := cfg.Spec.(*networking.ServiceEntry)
if !ok {
return nil, fmt.Errorf("cannot cast to service entry")
}
errs := Validation{}
warning, err := validateAlphaWorkloadSelector(serviceEntry.WorkloadSelector)
if err != nil {
return nil, err
}
// If workloadSelector is defined and labels are not set, it is most likely
// an user error. Marking it as a warning to keep it backwards compatible.
if warning != nil {
errs = appendValidation(errs, WrapWarning(fmt.Errorf("service entry: %s, will be applied to all services in namespace",
warning))) // nolint: stylecheck
}
if serviceEntry.WorkloadSelector != nil && serviceEntry.Endpoints != nil {
errs = appendValidation(errs, fmt.Errorf("only one of WorkloadSelector or Endpoints is allowed in Service Entry"))
}
if len(serviceEntry.Hosts) == 0 {
errs = appendValidation(errs, fmt.Errorf("service entry must have at least one host"))
}
for _, hostname := range serviceEntry.Hosts {
// Full wildcard is not allowed in the service entry.
if hostname == "*" {
errs = appendValidation(errs, fmt.Errorf("invalid host %s", hostname))
} else {
errs = appendValidation(errs, ValidateWildcardDomain(hostname))
errs = appendValidation(errs, WrapWarning(validatePartialWildCard(hostname)))
}
}
cidrFound := false
for _, address := range serviceEntry.Addresses {
cidrFound = cidrFound || strings.Contains(address, "/")
errs = appendValidation(errs, ValidateIPSubnet(address))
}
if cidrFound {
if serviceEntry.Resolution != networking.ServiceEntry_NONE && serviceEntry.Resolution != networking.ServiceEntry_STATIC {
errs = appendValidation(errs, fmt.Errorf("CIDR addresses are allowed only for NONE/STATIC resolution types"))
}
}
servicePortNumbers := make(map[uint32]bool)
servicePorts := make(map[string]bool, len(serviceEntry.Ports))
for _, port := range serviceEntry.Ports {
if port == nil {
errs = appendValidation(errs, fmt.Errorf("service entry port may not be null"))
continue
}
if servicePorts[port.Name] {
errs = appendValidation(errs, fmt.Errorf("service entry port name %q already defined", port.Name))
}
servicePorts[port.Name] = true
if servicePortNumbers[port.Number] {
errs = appendValidation(errs, fmt.Errorf("service entry port %d already defined", port.Number))
}
servicePortNumbers[port.Number] = true
if port.TargetPort != 0 {
errs = appendValidation(errs, ValidatePort(int(port.TargetPort)))
if serviceEntry.Resolution == networking.ServiceEntry_NONE {
errs = appendWarningf(errs, "targetPort has no effect when resolution mode is NONE")
}
}
if len(serviceEntry.Addresses) == 0 {
if port.Protocol == "" || port.Protocol == "TCP" {
errs = appendValidation(errs, WrapWarning(fmt.Errorf("addresses are required for ports serving TCP (or unset) protocol")))
}
}
errs = appendValidation(errs,
ValidatePortName(port.Name),
ValidateProtocol(port.Protocol),
ValidatePort(int(port.Number)))
}
switch serviceEntry.Resolution {
case networking.ServiceEntry_NONE:
if len(serviceEntry.Endpoints) != 0 {
errs = appendValidation(errs, fmt.Errorf("no endpoints should be provided for resolution type none"))
}
case networking.ServiceEntry_STATIC:
for _, endpoint := range serviceEntry.Endpoints {
if endpoint == nil {
errs = appendValidation(errs, errors.New("endpoint cannot be nil"))
continue
}
errs = appendValidation(errs, validateWorkloadEntry(endpoint, servicePorts, false))
}
case networking.ServiceEntry_DNS, networking.ServiceEntry_DNS_ROUND_ROBIN:
if len(serviceEntry.Endpoints) == 0 {
for _, hostname := range serviceEntry.Hosts {
if err := ValidateFQDN(hostname); err != nil {
errs = appendValidation(errs,
fmt.Errorf("hosts must be FQDN if no endpoints are provided for resolution mode %s", serviceEntry.Resolution))
}
}
}
if serviceEntry.Resolution == networking.ServiceEntry_DNS_ROUND_ROBIN && len(serviceEntry.Endpoints) > 1 {
errs = appendValidation(errs,
fmt.Errorf("there must only be 0 or 1 endpoint for resolution mode %s", serviceEntry.Resolution))
}
for _, endpoint := range serviceEntry.Endpoints {
if endpoint == nil {
errs = appendValidation(errs, errors.New("endpoint cannot be nil"))
continue
}
if !netutil.IsValidIPAddress(endpoint.Address) {
if err := ValidateFQDN(endpoint.Address); err != nil { // Otherwise could be an FQDN
errs = appendValidation(errs,
fmt.Errorf("endpoint address %q is not a valid FQDN or an IP address", endpoint.Address))
}
}
errs = appendValidation(errs,
labels.Instance(endpoint.Labels).Validate())
for name, port := range endpoint.Ports {
if !servicePorts[name] {
errs = appendValidation(errs, fmt.Errorf("endpoint port %v is not defined by the service entry", port))
}
errs = appendValidation(errs,
ValidatePortName(name),
ValidatePort(int(port)))
}
}
if len(serviceEntry.Addresses) > 0 {
for _, port := range serviceEntry.Ports {
if port == nil {
errs = appendValidation(errs, errors.New("ports cannot be nil"))
continue
}
p := protocol.Parse(port.Protocol)
if p.IsTCP() {
if len(serviceEntry.Hosts) > 1 {
// TODO: prevent this invalid setting, maybe in 1.11+
errs = appendValidation(errs, WrapWarning(fmt.Errorf("service entry can not have more than one host specified "+
"simultaneously with address and tcp port")))
}
break
}
}
}
default:
errs = appendValidation(errs, fmt.Errorf("unsupported resolution type %s",
networking.ServiceEntry_Resolution_name[int32(serviceEntry.Resolution)]))
}
// multiple hosts and TCP is invalid unless the resolution type is NONE.
// depending on the protocol, we can differentiate between hosts when proxying:
// - with HTTP, the authority header can be used
// - with HTTPS/TLS with SNI, the ServerName can be used
// however, for plain TCP there is no way to differentiate between the
// hosts so we consider it invalid, unless the resolution type is NONE
// (because the hosts are ignored).
if serviceEntry.Resolution != networking.ServiceEntry_NONE && len(serviceEntry.Hosts) > 1 {
for _, port := range serviceEntry.Ports {
if port == nil {
errs = appendValidation(errs, errors.New("ports cannot be nil"))
continue
}
p := protocol.Parse(port.Protocol)
if !p.IsHTTP() && !p.IsTLS() {
errs = appendValidation(errs, fmt.Errorf("multiple hosts provided with non-HTTP, non-TLS ports"))
break
}
}
}
errs = appendValidation(errs, validateExportTo(cfg.Namespace, serviceEntry.ExportTo, true, false))
return errs.Unwrap()
})
// ValidatePortName validates a port name to DNS-1123
func ValidatePortName(name string) error {
if !labels.IsDNS1123Label(name) {
return fmt.Errorf("invalid port name: %s", name)
}
return nil
}
// ValidateProtocol validates a portocol name is known
func ValidateProtocol(protocolStr string) error {
// Empty string is used for protocol sniffing.
if protocolStr != "" && protocol.Parse(protocolStr) == protocol.Unsupported {
return fmt.Errorf("unsupported protocol: %s", protocolStr)
}
return nil
}
// wrapper around multierror.Append that enforces the invariant that if all input errors are nil, the output
// error is nil (allowing validation without branching).
func appendValidation(v Validation, vs ...error) Validation {
appendError := func(err, err2 error) error {
if err == nil {
return err2
} else if err2 == nil {
return err
}
return multierror.Append(err, err2)
}
for _, nv := range vs {
switch t := nv.(type) {
case Validation:
v.Err = appendError(v.Err, t.Err)
v.Warning = appendError(v.Warning, t.Warning)
default:
v.Err = appendError(v.Err, t)
}
}
return v
}
// appendErrorf appends a formatted error string
// nolint: unparam
func appendErrorf(v Validation, format string, a ...any) Validation {
return appendValidation(v, fmt.Errorf(format, a...))
}
// appendWarningf appends a formatted warning string
// nolint: unparam
func appendWarningf(v Validation, format string, a ...any) Validation {
return appendValidation(v, Warningf(format, a...))
}
// wrapper around multierror.Append that enforces the invariant that if all input errors are nil, the output
// error is nil (allowing validation without branching).
func appendErrors(err error, errs ...error) error {
appendError := func(err, err2 error) error {
if err == nil {
return err2
} else if err2 == nil {
return err
}
return multierror.Append(err, err2)
}
for _, err2 := range errs {
switch t := err2.(type) {
case Validation:
err = appendError(err, t.Err)
default:
err = appendError(err, err2)
}
}
return err
}
// validateLocalityLbSetting checks the LocalityLbSetting of MeshConfig
func validateLocalityLbSetting(lb *networking.LocalityLoadBalancerSetting, outlier *networking.OutlierDetection) (errs Validation) {
if lb == nil {
return
}
if len(lb.GetDistribute()) > 0 && len(lb.GetFailover()) > 0 {
errs = appendValidation(errs, fmt.Errorf("can not simultaneously specify 'distribute' and 'failover'"))
return
}
if len(lb.GetFailover()) > 0 && len(lb.GetFailoverPriority()) > 0 {
for _, priorityLabel := range lb.GetFailoverPriority() {
switch priorityLabel {
case label.LabelTopologyRegion, label.LabelTopologyZone, label.LabelTopologySubzone:
errs = appendValidation(errs, fmt.Errorf("can not simultaneously set 'failover' and topology label '%s' in 'failover_priority'", priorityLabel))
return
}
}
}
srcLocalities := make([]string, 0, len(lb.GetDistribute()))
for _, locality := range lb.GetDistribute() {
srcLocalities = append(srcLocalities, locality.From)
var totalWeight uint32
destLocalities := make([]string, 0)
for loc, weight := range locality.To {
destLocalities = append(destLocalities, loc)
if weight <= 0 || weight > 100 {
errs = appendValidation(errs, fmt.Errorf("locality weight must be in range [1, 100]"))
return
}
totalWeight += weight
}
if totalWeight != 100 {
errs = appendValidation(errs, fmt.Errorf("total locality weight %v != 100", totalWeight))
return
}
errs = appendValidation(errs, validateLocalities(destLocalities))
}
errs = appendValidation(errs, validateLocalities(srcLocalities))
if (len(lb.GetFailover()) != 0 || len(lb.GetFailoverPriority()) != 0) && outlier == nil {
errs = appendValidation(errs, WrapWarning(fmt.Errorf("outlier detection policy must be provided for failover")))
}
for _, failover := range lb.GetFailover() {
if failover.From == failover.To {
errs = appendValidation(errs, fmt.Errorf("locality lb failover settings must specify different regions"))
}
if strings.Contains(failover.From, "/") || strings.Contains(failover.To, "/") {
errs = appendValidation(errs, fmt.Errorf("locality lb failover only specify region"))
}
if strings.Contains(failover.To, "*") || strings.Contains(failover.From, "*") {
errs = appendValidation(errs, fmt.Errorf("locality lb failover region should not contain '*' wildcard"))
}
}
return
}
func validateLocalities(localities []string) error {
regionZoneSubZoneMap := map[string]map[string]map[string]bool{}
for _, locality := range localities {
if n := strings.Count(locality, "*"); n > 0 {
if n > 1 || !strings.HasSuffix(locality, "*") {
return fmt.Errorf("locality %s wildcard '*' number can not exceed 1 and must be in the end", locality)
}
}
if _, exist := regionZoneSubZoneMap["*"]; exist {
return fmt.Errorf("locality %s overlap with previous specified ones", locality)
}
region, zone, subZone, localityIndex, err := getLocalityParam(locality)
if err != nil {
return fmt.Errorf("locality %s must not contain empty region/zone/subzone info", locality)
}
switch localityIndex {
case regionIndex:
if _, exist := regionZoneSubZoneMap[region]; exist {
return fmt.Errorf("locality %s overlap with previous specified ones", locality)
}
regionZoneSubZoneMap[region] = map[string]map[string]bool{"*": {"*": true}}
case zoneIndex:
if _, exist := regionZoneSubZoneMap[region]; exist {
if _, exist := regionZoneSubZoneMap[region]["*"]; exist {
return fmt.Errorf("locality %s overlap with previous specified ones", locality)
}
if _, exist := regionZoneSubZoneMap[region][zone]; exist {
return fmt.Errorf("locality %s overlap with previous specified ones", locality)
}
regionZoneSubZoneMap[region][zone] = map[string]bool{"*": true}
} else {
regionZoneSubZoneMap[region] = map[string]map[string]bool{zone: {"*": true}}
}
case subZoneIndex:
if _, exist := regionZoneSubZoneMap[region]; exist {
if _, exist := regionZoneSubZoneMap[region]["*"]; exist {
return fmt.Errorf("locality %s overlap with previous specified ones", locality)
}
if _, exist := regionZoneSubZoneMap[region][zone]; exist {
if regionZoneSubZoneMap[region][zone]["*"] {
return fmt.Errorf("locality %s overlap with previous specified ones", locality)
}
if regionZoneSubZoneMap[region][zone][subZone] {
return fmt.Errorf("locality %s overlap with previous specified ones", locality)
}
regionZoneSubZoneMap[region][zone][subZone] = true
} else {
regionZoneSubZoneMap[region][zone] = map[string]bool{subZone: true}
}
} else {
regionZoneSubZoneMap[region] = map[string]map[string]bool{zone: {subZone: true}}
}
}
}
return nil
}
func getLocalityParam(locality string) (string, string, string, int, error) {
var region, zone, subZone string
items := strings.SplitN(locality, "/", 3)
for i, item := range items {
if item == "" {
return "", "", "", -1, errors.New("item is nil")
}
switch i {
case regionIndex:
region = items[i]
case zoneIndex:
zone = items[i]
case subZoneIndex:
subZone = items[i]
}
}
return region, zone, subZone, len(items) - 1, nil
}
// ValidateMeshNetworks validates meshnetworks.
func ValidateMeshNetworks(meshnetworks *meshconfig.MeshNetworks) (errs error) {
// TODO validate using the same gateway on multiple networks?
for name, network := range meshnetworks.Networks {
if err := validateNetwork(network); err != nil {
errs = multierror.Append(errs, multierror.Prefix(err, fmt.Sprintf("invalid network %v:", name)))
}
}
return
}
func validateNetwork(network *meshconfig.Network) (errs error) {
for _, n := range network.Endpoints {
switch e := n.Ne.(type) {
case *meshconfig.Network_NetworkEndpoints_FromCidr:
if err := ValidateIPSubnet(e.FromCidr); err != nil {
errs = multierror.Append(errs, err)
}
case *meshconfig.Network_NetworkEndpoints_FromRegistry:
if ok := labels.IsDNS1123Label(e.FromRegistry); !ok {
errs = multierror.Append(errs, fmt.Errorf("invalid registry name: %v", e.FromRegistry))
}
}
}
for _, n := range network.Gateways {
switch g := n.Gw.(type) {
case *meshconfig.Network_IstioNetworkGateway_RegistryServiceName:
if err := ValidateFQDN(g.RegistryServiceName); err != nil {
errs = multierror.Append(errs, err)
}
case *meshconfig.Network_IstioNetworkGateway_Address:
if ipErr := ValidateIPAddress(g.Address); ipErr != nil {
if !features.ResolveHostnameGateways {
err := fmt.Errorf("%v (hostname is allowed if RESOLVE_HOSTNAME_GATEWAYS is enabled)", ipErr)
errs = multierror.Append(errs, err)
} else if fqdnErr := ValidateFQDN(g.Address); fqdnErr != nil {
errs = multierror.Append(fmt.Errorf("%v is not a valid IP address or DNS name", g.Address))
}
}
}
if err := ValidatePort(int(n.Port)); err != nil {
errs = multierror.Append(errs, err)
}
}
return
}
func (aae *AnalysisAwareError) Error() string {
return aae.Msg
}
// ValidateProxyConfig validates a ProxyConfig CR (as opposed to the MeshConfig field).
var ValidateProxyConfig = registerValidateFunc("ValidateProxyConfig",
func(cfg config.Config) (Warning, error) {
spec, ok := cfg.Spec.(*networkingv1beta1.ProxyConfig)
if !ok {
return nil, fmt.Errorf("cannot cast to proxyconfig")
}
errs := Validation{}
errs = appendValidation(errs,
validateWorkloadSelector(spec.Selector),
validateConcurrency(spec.Concurrency.GetValue()),
)
return errs.Unwrap()
})
func validateConcurrency(concurrency int32) (v Validation) {
if concurrency < 0 {
v = appendErrorf(v, "concurrency must be greater than or equal to 0")
}
return
}
// ValidateTelemetry validates a Telemetry.
var ValidateTelemetry = registerValidateFunc("ValidateTelemetry",
func(cfg config.Config) (Warning, error) {
spec, ok := cfg.Spec.(*telemetry.Telemetry)
if !ok {
return nil, fmt.Errorf("cannot cast to telemetry")
}
errs := Validation{}
errs = appendValidation(errs,
validateOneOfSelectorType(spec.GetSelector(), spec.GetTargetRef()),
validateWorkloadSelector(spec.GetSelector()),
validatePolicyTargetReference(spec.GetTargetRef()),
validateTelemetryMetrics(spec.Metrics),
validateTelemetryTracing(spec.Tracing),
validateTelemetryAccessLogging(spec.AccessLogging),
)
return errs.Unwrap()
})
func validateTelemetryAccessLogging(logging []*telemetry.AccessLogging) (v Validation) {
for _, l := range logging {
if l == nil {
continue
}
if l.Filter != nil {
v = appendValidation(v, validateTelemetryFilter(l.Filter))
}
v = appendValidation(v, validateTelemetryProviders(l.Providers))
}
return
}
func validateTelemetryTracing(tracing []*telemetry.Tracing) (v Validation) {
if len(tracing) > 1 {
v = appendWarningf(v, "multiple tracing is not currently supported")
}
for _, l := range tracing {
if l == nil {
continue
}
if len(l.Providers) > 1 {
v = appendWarningf(v, "multiple providers is not currently supported")
}
v = appendValidation(v, validateTelemetryProviders(l.Providers))
if l.RandomSamplingPercentage.GetValue() < 0 || l.RandomSamplingPercentage.GetValue() > 100 {
v = appendErrorf(v, "randomSamplingPercentage must be in range [0.0, 100.0]")
}
for name, tag := range l.CustomTags {
if name == "" {
v = appendErrorf(v, "tag name may not be empty")
}
if tag == nil {
v = appendErrorf(v, "tag '%s' may not have a nil value", name)
continue
}
switch t := tag.Type.(type) {
case *telemetry.Tracing_CustomTag_Literal:
if t.Literal.GetValue() == "" {
v = appendErrorf(v, "literal tag value may not be empty")
}
case *telemetry.Tracing_CustomTag_Header:
if t.Header.GetName() == "" {
v = appendErrorf(v, "header tag name may not be empty")
}
case *telemetry.Tracing_CustomTag_Environment:
if t.Environment.GetName() == "" {
v = appendErrorf(v, "environment tag name may not be empty")
}
}
}
}
return
}
func validateTelemetryMetrics(metrics []*telemetry.Metrics) (v Validation) {
for _, l := range metrics {
if l == nil {
continue
}
v = appendValidation(v, validateTelemetryProviders(l.Providers))
for _, o := range l.Overrides {
if o == nil {
v = appendErrorf(v, "tagOverrides may not be null")
continue
}
for tagName, to := range o.TagOverrides {
if tagName == "" {
v = appendWarningf(v, "tagOverrides.name may not be empty")
}
if to == nil {
v = appendErrorf(v, "tagOverrides may not be null")
continue
}
switch to.Operation {
case telemetry.MetricsOverrides_TagOverride_UPSERT:
if to.Value == "" {
v = appendErrorf(v, "tagOverrides.value must be set when operation is UPSERT")
}
case telemetry.MetricsOverrides_TagOverride_REMOVE:
if to.Value != "" {
v = appendErrorf(v, "tagOverrides.value may only be set when operation is UPSERT")
}
}
}
if o.Match != nil {
switch mm := o.Match.MetricMatch.(type) {
case *telemetry.MetricSelector_CustomMetric:
if mm.CustomMetric == "" {
v = appendErrorf(v, "customMetric may not be empty")
}
}
}
}
}
return
}
func validateTelemetryProviders(providers []*telemetry.ProviderRef) error {
for _, p := range providers {
if p == nil || p.Name == "" {
return fmt.Errorf("providers.name may not be empty")
}
}
return nil
}
// ValidateWasmPlugin validates a WasmPlugin.
var ValidateWasmPlugin = registerValidateFunc("ValidateWasmPlugin",
func(cfg config.Config) (Warning, error) {
spec, ok := cfg.Spec.(*extensions.WasmPlugin)
if !ok {
return nil, fmt.Errorf("cannot cast to wasmplugin")
}
// figure out how to add check for targetRef and workload selector is not nil
errs := Validation{}
errs = appendValidation(errs,
validateOneOfSelectorType(spec.GetSelector(), spec.GetTargetRef()),
validateWorkloadSelector(spec.GetSelector()),
validatePolicyTargetReference(spec.GetTargetRef()),
validateWasmPluginURL(spec.Url),
validateWasmPluginSHA(spec),
validateWasmPluginVMConfig(spec.VmConfig),
validateWasmPluginMatch(spec.Match),
)
return errs.Unwrap()
})
func validateWasmPluginURL(pluginURL string) error {
if pluginURL == "" {
return fmt.Errorf("url field needs to be set")
}
validSchemes := map[string]bool{
"": true, "file": true, "http": true, "https": true, "oci": true,
}
u, err := url.Parse(pluginURL)
if err != nil {
return fmt.Errorf("failed to parse url: %s", err)
}
if _, found := validSchemes[u.Scheme]; !found {
return fmt.Errorf("url contains unsupported scheme: %s", u.Scheme)
}
return nil
}
func validateWasmPluginSHA(plugin *extensions.WasmPlugin) error {
if plugin.Sha256 == "" {
return nil
}
if len(plugin.Sha256) != 64 {
return fmt.Errorf("sha256 field must be 64 characters long")
}
for _, r := range plugin.Sha256 {
if !('a' <= r && r <= 'f' || '0' <= r && r <= '9') {
return fmt.Errorf("sha256 field must match [a-f0-9]{64} pattern")
}
}
return nil
}
func validateWasmPluginVMConfig(vm *extensions.VmConfig) error {
if vm == nil || len(vm.Env) == 0 {
return nil
}
keys := sets.New[string]()
for _, env := range vm.Env {
if env == nil {
continue
}
if env.Name == "" {
return fmt.Errorf("spec.vmConfig.env invalid")
}
if keys.InsertContains(env.Name) {
return fmt.Errorf("duplicate env")
}
}
return nil
}
func validateWasmPluginMatch(selectors []*extensions.WasmPlugin_TrafficSelector) error {
if len(selectors) == 0 {
return nil
}
for selIdx, sel := range selectors {
if sel == nil {
return fmt.Errorf("spec.Match[%d] is nil", selIdx)
}
for portIdx, port := range sel.Ports {
if port == nil {
return fmt.Errorf("spec.Match[%d].Ports[%d] is nil", selIdx, portIdx)
}
if port.GetNumber() <= 0 || port.GetNumber() > 65535 {
return fmt.Errorf("spec.Match[%d].Ports[%d] is out of range: %d", selIdx, portIdx, port.GetNumber())
}
}
}
return nil
}
func validatePartialWildCard(host string) error {
if strings.Contains(host, "*") && len(host) != 1 && !strings.HasPrefix(host, "*.") {
return fmt.Errorf("partial wildcard %q not allowed", host)
}
return nil
}
//go:build !agent
// +build !agent
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validation
import (
"fmt"
"github.com/google/cel-go/cel"
telemetry "istio.io/api/telemetry/v1alpha1"
)
func validateTelemetryFilter(filter *telemetry.AccessLogging_Filter) error {
expr := filter.Expression
env, _ := cel.NewEnv()
_, issue := env.Parse(expr)
if issue.Err() != nil {
return fmt.Errorf("must be a valid CEL expression, %w", issue.Err())
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package validation
import (
"errors"
"fmt"
"strings"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/config/labels"
)
type HTTPRouteType int
const (
IndependentRoute = iota
RootRoute
DelegateRoute
)
func getHTTPRouteType(http *networking.HTTPRoute, isDelegate bool) HTTPRouteType {
if isDelegate {
return DelegateRoute
}
// root vs's http route
if http.Delegate != nil {
return RootRoute
}
return IndependentRoute
}
func validateHTTPRoute(http *networking.HTTPRoute, delegate, gatewaySemantics bool) (errs Validation) {
routeType := getHTTPRouteType(http, delegate)
// check for conflicts
errs = WrapError(validateHTTPRouteConflict(http, routeType))
// check http route match requests
errs = appendValidation(errs, validateHTTPRouteMatchRequest(http, routeType))
// header manipulation
for name, val := range http.Headers.GetRequest().GetAdd() {
errs = appendValidation(errs, ValidateHTTPHeaderWithAuthorityOperationName(name))
errs = appendValidation(errs, ValidateHTTPHeaderValue(val))
}
for name, val := range http.Headers.GetRequest().GetSet() {
errs = appendValidation(errs, ValidateHTTPHeaderWithAuthorityOperationName(name))
errs = appendValidation(errs, ValidateHTTPHeaderValue(val))
}
for _, name := range http.Headers.GetRequest().GetRemove() {
errs = appendValidation(errs, ValidateHTTPHeaderOperationName(name))
}
for name, val := range http.Headers.GetResponse().GetAdd() {
errs = appendValidation(errs, ValidateHTTPHeaderOperationName(name))
errs = appendValidation(errs, ValidateHTTPHeaderValue(val))
}
for name, val := range http.Headers.GetResponse().GetSet() {
errs = appendValidation(errs, ValidateHTTPHeaderOperationName(name))
errs = appendValidation(errs, ValidateHTTPHeaderValue(val))
}
for _, name := range http.Headers.GetResponse().GetRemove() {
errs = appendValidation(errs, ValidateHTTPHeaderOperationName(name))
}
errs = appendValidation(errs, validateCORSPolicy(http.CorsPolicy))
errs = appendValidation(errs, validateHTTPFaultInjection(http.Fault))
// nolint: staticcheck
if http.MirrorPercent != nil {
if value := http.MirrorPercent.GetValue(); value > 100 {
errs = appendValidation(errs, fmt.Errorf("mirror_percent must have a max value of 100 (it has %d)", value))
}
errs = appendValidation(errs, WrapWarning(errors.New(`using deprecated setting "mirrorPercent", use "mirrorPercentage" instead`)))
}
if http.MirrorPercentage != nil {
value := http.MirrorPercentage.GetValue()
if value > 100 {
errs = appendValidation(errs, fmt.Errorf("mirror_percentage must have a max value of 100 (it has %f)", value))
}
if value < 0 {
errs = appendValidation(errs, fmt.Errorf("mirror_percentage must have a min value of 0 (it has %f)", value))
}
}
errs = appendValidation(errs, validateDestination(http.Mirror))
errs = appendValidation(errs, validateHTTPMirrors(http.Mirrors))
errs = appendValidation(errs, validateHTTPRedirect(http.Redirect))
errs = appendValidation(errs, validateHTTPDirectResponse(http.DirectResponse))
errs = appendValidation(errs, validateHTTPRetry(http.Retries))
errs = appendValidation(errs, validateHTTPRewrite(http.Rewrite))
errs = appendValidation(errs, validateAuthorityRewrite(http.Rewrite, http.Headers))
errs = appendValidation(errs, validateHTTPRouteDestinations(http.Route, gatewaySemantics))
if http.Timeout != nil {
errs = appendValidation(errs, ValidateDuration(http.Timeout))
}
return
}
// validateAuthorityRewrite ensures we only attempt rewrite authority in a single place.
func validateAuthorityRewrite(rewrite *networking.HTTPRewrite, headers *networking.Headers) error {
current := rewrite.GetAuthority()
for k, v := range headers.GetRequest().GetSet() {
if !isAuthorityHeader(k) {
continue
}
if current != "" {
return fmt.Errorf("authority header cannot be set multiple times: have %q, attempting to set %q", current, v)
}
current = v
}
for k, v := range headers.GetRequest().GetAdd() {
if !isAuthorityHeader(k) {
continue
}
if current != "" {
return fmt.Errorf("authority header cannot be set multiple times: have %q, attempting to set %q", current, v)
}
current = v
}
return nil
}
func validateHTTPRouteMatchRequest(http *networking.HTTPRoute, routeType HTTPRouteType) (errs error) {
if routeType == IndependentRoute {
for _, match := range http.Match {
if match != nil {
for name, header := range match.Headers {
if header == nil {
errs = appendErrors(errs, fmt.Errorf("header match %v cannot be null", name))
}
if _, ok := header.GetMatchType().(*networking.StringMatch_Prefix); ok {
if header.GetPrefix() == "" {
errs = appendErrors(errs, fmt.Errorf("header prefix match %v may not be empty", name))
}
}
errs = appendErrors(errs, ValidateHTTPHeaderName(name))
errs = appendErrors(errs, validateStringMatchRegexp(header, "headers"))
}
errs = appendErrors(errs, validateStringMatchRegexp(match.GetUri(), "uri"))
errs = appendErrors(errs, validateStringMatchRegexp(match.GetScheme(), "scheme"))
errs = appendErrors(errs, validateStringMatchRegexp(match.GetMethod(), "method"))
errs = appendErrors(errs, validateStringMatchRegexp(match.GetAuthority(), "authority"))
for _, qp := range match.GetQueryParams() {
errs = appendErrors(errs, validateStringMatchRegexp(qp, "queryParams"))
}
}
}
} else {
for _, match := range http.Match {
if match != nil {
for name, header := range match.Headers {
if header == nil {
errs = appendErrors(errs, fmt.Errorf("header match %v cannot be null", name))
}
if _, ok := header.GetMatchType().(*networking.StringMatch_Prefix); ok {
if header.GetPrefix() == "" {
errs = appendErrors(errs, fmt.Errorf("header prefix match %v may not be empty", name))
}
}
errs = appendErrors(errs, ValidateHTTPHeaderName(name))
}
for name, param := range match.QueryParams {
if param == nil {
errs = appendErrors(errs, fmt.Errorf("query param match %v cannot be null", name))
}
}
for name, header := range match.WithoutHeaders {
if header == nil {
errs = appendErrors(errs, fmt.Errorf("withoutHeaders match %v cannot be null", name))
}
errs = appendErrors(errs, ValidateHTTPHeaderName(name))
}
}
}
}
for _, match := range http.Match {
if match != nil {
if match.Port != 0 {
errs = appendErrors(errs, ValidatePort(int(match.Port)))
}
errs = appendErrors(errs, labels.Instance(match.SourceLabels).Validate())
errs = appendErrors(errs, validateGatewayNames(match.Gateways))
if match.SourceNamespace != "" {
if !labels.IsDNS1123Label(match.SourceNamespace) {
errs = appendErrors(errs, fmt.Errorf("sourceNamespace match %s is invalid", match.SourceNamespace))
}
}
}
}
return
}
func validateHTTPRouteConflict(http *networking.HTTPRoute, routeType HTTPRouteType) (errs error) {
if routeType == RootRoute {
// This is to check root conflict
// only delegate can be specified
if http.Redirect != nil {
errs = appendErrors(errs, fmt.Errorf("root HTTP route %s must not specify redirect", http.Name))
}
if http.Route != nil {
errs = appendErrors(errs, fmt.Errorf("root HTTP route %s must not specify route", http.Name))
}
return errs
}
// This is to check delegate conflict
if routeType == DelegateRoute {
if http.Delegate != nil {
errs = appendErrors(errs, errors.New("delegate HTTP route cannot contain delegate"))
}
}
// check for conflicts
if http.Redirect != nil {
if len(http.Route) > 0 {
errs = appendErrors(errs, errors.New("HTTP route cannot contain both route and redirect"))
}
if http.Fault != nil {
errs = appendErrors(errs, errors.New("HTTP route cannot contain both fault and redirect"))
}
if http.Rewrite != nil {
errs = appendErrors(errs, errors.New("HTTP route rule cannot contain both rewrite and redirect"))
}
if http.DirectResponse != nil {
errs = appendErrors(errs, errors.New("HTTP route rule cannot contain both direct_response and redirect"))
}
} else if http.DirectResponse != nil {
if len(http.Route) > 0 {
errs = appendErrors(errs, errors.New("HTTP route cannot contain both route and direct_response"))
}
if http.Fault != nil {
errs = appendErrors(errs, errors.New("HTTP route cannot contain both fault and direct_response"))
}
if http.Rewrite != nil {
errs = appendErrors(errs, errors.New("HTTP route rule cannot contain both rewrite and direct_response"))
}
if http.Redirect != nil {
errs = appendErrors(errs, errors.New("HTTP route rule cannot contain both redirect and direct_response"))
}
} else if len(http.Route) == 0 {
errs = appendErrors(errs, errors.New("HTTP route, redirect or direct_response is required"))
}
if http.Mirror != nil && len(http.Mirrors) > 0 {
errs = appendErrors(errs, errors.New("HTTP route cannot contain both mirror and mirrors"))
}
return errs
}
// isInternalHeader returns true if a header refers to an internal value that cannot be modified by Envoy
func isInternalHeader(headerKey string) bool {
return strings.HasPrefix(headerKey, ":") || strings.EqualFold(headerKey, "host")
}
// isAuthorityHeader returns true if a header refers to the authority header
func isAuthorityHeader(headerKey string) bool {
return strings.EqualFold(headerKey, ":authority") || strings.EqualFold(headerKey, "host")
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package visibility
import (
"fmt"
"istio.io/istio/pkg/config/labels"
)
// Instance defines whether a given config or service is exported to local namespace, some set of namespaces, or
// all namespaces or none
type Instance string
const (
// Private implies namespace local config
Private Instance = "."
// Public implies config is visible to all
Public Instance = "*"
// None implies service is visible to no one. Used for services only
None Instance = "~"
)
// Validate a visibility value ( ./*/~/some namespace name which is DNS1123 label)
func (v Instance) Validate() (errs error) {
switch v {
case Private, Public:
return nil
case None:
return fmt.Errorf("exportTo ~ (none) is not allowed for Istio configuration objects")
default:
if !labels.IsDNS1123Label(string(v)) {
return fmt.Errorf("only .,*, or a valid DNS 1123 label is allowed as exportTo entry")
}
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import "istio.io/istio/pkg/wellknown"
var (
// DeprecatedFilterNames is to support both canonical filter names
// and deprecated filter names for backward compatibility. Istiod
// generates canonical filter names.
DeprecatedFilterNames = map[string]string{
wellknown.Buffer: "envoy.buffer",
wellknown.CORS: "envoy.cors",
"envoy.filters.http.csrf": "envoy.csrf",
wellknown.Dynamo: "envoy.http_dynamo_filter",
wellknown.HTTPExternalAuthorization: "envoy.ext_authz",
wellknown.Fault: "envoy.fault",
wellknown.GRPCHTTP1Bridge: "envoy.grpc_http1_bridge",
wellknown.GRPCJSONTranscoder: "envoy.grpc_json_transcoder",
wellknown.GRPCWeb: "envoy.grpc_web",
wellknown.Gzip: "envoy.gzip",
wellknown.HealthCheck: "envoy.health_check",
wellknown.IPTagging: "envoy.ip_tagging",
wellknown.Lua: "envoy.lua",
wellknown.HTTPRateLimit: "envoy.rate_limit",
wellknown.Router: "envoy.router",
wellknown.Squash: "envoy.squash",
wellknown.HTTPInspector: "envoy.listener.http_inspector",
wellknown.OriginalDestination: "envoy.listener.original_dst",
"envoy.filters.listener.original_src": "envoy.listener.original_src",
wellknown.ProxyProtocol: "envoy.listener.proxy_protocol",
wellknown.TLSInspector: "envoy.listener.tls_inspector",
wellknown.ClientSSLAuth: "envoy.client_ssl_auth",
wellknown.ExternalAuthorization: "envoy.ext_authz",
wellknown.HTTPConnectionManager: "envoy.http_connection_manager",
wellknown.MongoProxy: "envoy.mongo_proxy",
wellknown.RateLimit: "envoy.ratelimit",
wellknown.RedisProxy: "envoy.redis_proxy",
wellknown.TCPProxy: "envoy.tcp_proxy",
}
ReverseDeprecatedFilterNames = reverse(DeprecatedFilterNames)
)
func reverse(names map[string]string) map[string]string {
resp := make(map[string]string, len(names))
for k, v := range names {
resp[v] = k
}
return resp
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds
import (
"errors"
"fmt"
bootstrap "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3"
cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
listener "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
route "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
hcm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/structpb"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pkg/util/protomarshal"
)
// nolint: interfacer
func BuildXDSObjectFromStruct(applyTo networking.EnvoyFilter_ApplyTo, value *structpb.Struct, strict bool) (proto.Message, error) {
if value == nil {
// for remove ops
return nil, nil
}
var obj proto.Message
switch applyTo {
case networking.EnvoyFilter_CLUSTER:
obj = &cluster.Cluster{}
case networking.EnvoyFilter_LISTENER:
obj = &listener.Listener{}
case networking.EnvoyFilter_ROUTE_CONFIGURATION:
obj = &route.RouteConfiguration{}
case networking.EnvoyFilter_FILTER_CHAIN:
obj = &listener.FilterChain{}
case networking.EnvoyFilter_HTTP_FILTER:
obj = &hcm.HttpFilter{}
case networking.EnvoyFilter_NETWORK_FILTER:
obj = &listener.Filter{}
case networking.EnvoyFilter_VIRTUAL_HOST:
obj = &route.VirtualHost{}
case networking.EnvoyFilter_HTTP_ROUTE:
obj = &route.Route{}
case networking.EnvoyFilter_EXTENSION_CONFIG:
obj = &core.TypedExtensionConfig{}
case networking.EnvoyFilter_BOOTSTRAP:
obj = &bootstrap.Bootstrap{}
case networking.EnvoyFilter_LISTENER_FILTER:
obj = &listener.ListenerFilter{}
default:
return nil, fmt.Errorf("Envoy filter: unknown object type for applyTo %s", applyTo.String()) // nolint: stylecheck
}
if err := StructToMessage(value, obj, strict); err != nil {
return nil, fmt.Errorf("Envoy filter: %v", err) // nolint: stylecheck
}
return obj, nil
}
func StructToMessage(pbst *structpb.Struct, out proto.Message, strict bool) error {
if pbst == nil {
return errors.New("nil struct")
}
buf, err := protomarshal.MarshalProtoNames(pbst)
if err != nil {
return err
}
// If strict is not set, ignore unknown fields as they may be sending versions of
// the proto we are not internally using
if strict {
return protomarshal.Unmarshal(buf, out)
}
return protomarshal.UnmarshalAllowUnknown(buf, out)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package assets
import (
"embed"
"html/template"
)
// FS embeds the templates
//
//go:embed templates/* static/*
var FS embed.FS
func ParseTemplate(l *template.Template, name string) *template.Template {
b, err := FS.ReadFile(name)
if err != nil {
panic(err)
}
return template.Must(l.Parse(string(b)))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ctrlz implements Istio's introspection facility. When components
// integrate with ControlZ, they automatically gain an IP port which allows operators
// to visualize and control a number of aspects of each process, including controlling
// logging scopes, viewing command-line options, memory use, etc. Additionally,
// the port implements a REST API allowing access and control over the same state.
//
// ControlZ is designed around the idea of "topics". A topic corresponds to the different
// parts of the UI. There are a set of built-in topics representing the core introspection
// functionality, and each component that uses ControlZ can add new topics specialized
// for their purpose.
package ctrlz
import (
"fmt"
"html/template"
"net"
"net/http"
"os"
"strings"
"sync"
"time"
"github.com/gorilla/mux"
"istio.io/istio/pkg/ctrlz/assets"
"istio.io/istio/pkg/ctrlz/fw"
"istio.io/istio/pkg/ctrlz/topics"
"istio.io/istio/pkg/log"
)
var coreTopics = []fw.Topic{
topics.ScopeTopic(),
topics.MemTopic(),
topics.EnvTopic(),
topics.ProcTopic(),
topics.ArgsTopic(),
topics.VersionTopic(),
topics.SignalsTopic(),
}
var (
allTopics []fw.Topic
topicMutex sync.Mutex
listeningTestProbe func()
)
// Server represents a running ControlZ instance.
type Server struct {
listener net.Listener
shutdown sync.WaitGroup
httpServer http.Server
}
func augmentLayout(layout *template.Template, page string) *template.Template {
return assets.ParseTemplate(layout, page)
}
func registerTopic(router *mux.Router, layout *template.Template, t fw.Topic) {
htmlRouter := router.NewRoute().PathPrefix("/" + t.Prefix() + "z").Subrouter()
jsonRouter := router.NewRoute().PathPrefix("/" + t.Prefix() + "j").Subrouter()
tmpl := template.Must(template.Must(layout.Clone()).Parse("{{ define \"title\" }}" + t.Title() + "{{ end }}"))
t.Activate(fw.NewContext(htmlRouter, jsonRouter, tmpl))
}
// getLocalIP returns a non loopback local IP of the host
func getLocalIP() string {
addrs, err := net.InterfaceAddrs()
if err != nil {
return ""
}
for _, address := range addrs {
// check the address type and if it is not a loopback then return it
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String()
}
}
}
return ""
}
type topic struct {
Name string
URL string
}
func getTopics() []topic {
topicMutex.Lock()
defer topicMutex.Unlock()
topics := make([]topic, 0, len(allTopics))
for _, t := range allTopics {
topics = append(topics, topic{Name: t.Title(), URL: "/" + t.Prefix() + "z/"})
}
return topics
}
func normalize(input string) string {
return strings.Replace(input, "/", "-", -1)
}
// RegisterTopic registers a new Control-Z topic for the current process.
func RegisterTopic(t fw.Topic) {
topicMutex.Lock()
defer topicMutex.Unlock()
allTopics = append(allTopics, t)
}
// Run starts up the ControlZ listeners.
//
// ControlZ uses the set of standard core topics, the
// supplied custom topics, as well as any topics registered
// via the RegisterTopic function.
func Run(o *Options, customTopics []fw.Topic) (*Server, error) {
topicMutex.Lock()
allTopics = append(allTopics, coreTopics...)
allTopics = append(allTopics, customTopics...)
topicMutex.Unlock()
exec, _ := os.Executable()
instance := exec + " - " + getLocalIP()
funcs := template.FuncMap{
"getTopics": getTopics,
"normalize": normalize,
}
baseLayout := assets.ParseTemplate(template.New("base"), "templates/layouts/base.html")
baseLayout = baseLayout.Funcs(funcs)
baseLayout = template.Must(baseLayout.Parse("{{ define \"instance\" }}" + instance + "{{ end }}"))
_ = augmentLayout(baseLayout, "templates/modules/header.html")
_ = augmentLayout(baseLayout, "templates/modules/sidebar.html")
_ = augmentLayout(baseLayout, "templates/modules/last-refresh.html")
mainLayout := augmentLayout(template.Must(baseLayout.Clone()), "templates/layouts/main.html")
router := mux.NewRouter()
for _, t := range allTopics {
registerTopic(router, mainLayout, t)
}
registerHome(router, mainLayout)
addr := o.Address
if addr == "*" {
addr = ""
}
// Canonicalize the address and resolve a dynamic port if necessary
listener, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addr, o.Port))
if err != nil {
log.Errorf("Unable to start ControlZ: %v", err)
return nil, err
}
s := &Server{
listener: listener,
httpServer: http.Server{
Addr: listener.Addr().(*net.TCPAddr).String(),
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
MaxHeaderBytes: 1 << 20,
Handler: router,
},
}
s.shutdown.Add(1)
go s.listen()
return s, nil
}
func (s *Server) listen() {
log.Infof("ControlZ available at %s", s.httpServer.Addr)
if listeningTestProbe != nil {
go listeningTestProbe()
}
err := s.httpServer.Serve(s.listener)
log.Infof("ControlZ terminated: %v", err)
s.shutdown.Done()
}
// Close terminates ControlZ.
//
// Close is not normally used by programs that expose ControlZ, it is primarily intended to be
// used by tests.
func (s *Server) Close() {
log.Info("Closing ControlZ")
if s.listener != nil {
if err := s.listener.Close(); err != nil {
log.Warnf("Error closing ControlZ: %v", err)
}
s.shutdown.Wait()
}
}
func (s *Server) Address() string {
return s.httpServer.Addr
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fw
import (
"html/template"
"github.com/gorilla/mux"
)
// Topic is used to describe a single major ControlZ functional area.
type Topic interface {
// Title returns the title for the area, which will be used in the sidenav and window title.
Title() string
// Prefix is the name used to reference this functionality in URLs.
Prefix() string
// Activate triggers a topic to register itself to receive traffic.
Activate(TopicContext)
}
// TopicContext provides support objects needed to register a topic.
type TopicContext interface {
// HTMLRouter is used to control HTML traffic delivered to this topic.
HTMLRouter() *mux.Router
// JSONRouter is used to control HTML traffic delivered to this topic.
JSONRouter() *mux.Router
// Layout is the template used as the primary layout for the topic's HTML content.
Layout() *template.Template
}
type context struct {
htmlRouter *mux.Router
jsonRouter *mux.Router
layout *template.Template
}
// NewContext creates a new TopicContext.
func NewContext(htmlRouter *mux.Router, jsonRouter *mux.Router, layout *template.Template) TopicContext {
return context{
htmlRouter: htmlRouter,
jsonRouter: jsonRouter,
layout: layout,
}
}
func (c context) HTMLRouter() *mux.Router {
return c.htmlRouter
}
func (c context) JSONRouter() *mux.Router {
return c.jsonRouter
}
func (c context) Layout() *template.Template {
return c.layout
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fw
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"net/http"
)
// RenderError outputs an error message
func RenderError(w http.ResponseWriter, statusCode int, err error) {
w.WriteHeader(statusCode)
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
_, _ = fmt.Fprintf(w, "%v", err)
}
// RenderHTML executes the given template, sending the output to the supplied response writer
func RenderHTML(w http.ResponseWriter, t *template.Template, data any) {
b := &bytes.Buffer{}
if err := t.Execute(b, data); err != nil {
RenderError(w, http.StatusInternalServerError, err)
return
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "text/html; charset=utf-8")
_, _ = b.WriteTo(w)
}
// RenderJSON outputs the given data as JSON
func RenderJSON(w http.ResponseWriter, statusCode int, data any) {
w.WriteHeader(statusCode)
w.Header().Set("Content-Type", "application/json; charset=utf-8")
if err := json.NewEncoder(w).Encode(data); err != nil {
RenderError(w, http.StatusInternalServerError, err)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ctrlz
import (
"html/template"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/gorilla/mux"
"istio.io/istio/pkg/ctrlz/assets"
"istio.io/istio/pkg/ctrlz/fw"
)
var mimeTypes = map[string]string{
".css": "text/css; charset=utf-8",
".svg": "image/svg+xml; charset=utf-8",
".ico": "image/x-icon",
".png": "image/png",
".js": "application/javascript",
}
type homeInfo struct {
ProcessName string
HeapSize uint64
NumGC uint32
CurrentTime int64
Hostname string
IP string
}
func getHomeInfo() *homeInfo {
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
hostName, _ := os.Hostname()
return &homeInfo{
ProcessName: os.Args[0],
HeapSize: ms.HeapAlloc,
NumGC: ms.NumGC,
CurrentTime: time.Now().UnixNano(),
Hostname: hostName,
IP: getLocalIP(),
}
}
func registerHome(router *mux.Router, layout *template.Template) {
homeTmpl := assets.ParseTemplate(template.Must(layout.Clone()), "templates/home.html")
errorTmpl := assets.ParseTemplate(template.Must(layout.Clone()), "templates/404.html")
_ = router.NewRoute().PathPrefix("/").Methods("GET").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
if req.URL.Path == "/" {
// home page
fw.RenderHTML(w, homeTmpl, getHomeInfo())
} else if req.URL.Path == "/homej" || req.URL.Path == "/homej/" {
fw.RenderJSON(w, http.StatusOK, getHomeInfo())
} else if a, err := assets.FS.ReadFile("static" + req.URL.Path); err == nil {
// static asset
ext := strings.ToLower(filepath.Ext(req.URL.Path))
if mime, ok := mimeTypes[ext]; ok {
w.Header().Set("Content-Type", mime)
}
_, _ = w.Write(a)
} else {
// 'not found' page
w.WriteHeader(http.StatusNotFound)
fw.RenderHTML(w, errorTmpl, nil)
}
})
_ = router.NewRoute().Methods("PUT").Path("/homej/exit").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusAccepted)
time.AfterFunc(1*time.Second, func() {
os.Exit(0)
})
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ctrlz
import (
"github.com/spf13/cobra"
)
// Options defines the set of options supported by Istio's ControlZ component introspection package.
type Options struct {
// The IP port to use for ctrlz.
Port uint16
// The IP address to listen on for ctrlz.
Address string
}
// DefaultOptions returns a new set of options, initialized to the defaults
func DefaultOptions() *Options {
return &Options{
Port: 9876,
Address: "localhost",
}
}
// AttachCobraFlags attaches a set of Cobra flags to the given Cobra command.
//
// Cobra is the command-line processor that Istio uses. This command attaches
// the necessary set of flags to expose a CLI to let the user control all
// introspection options.
func (o *Options) AttachCobraFlags(cmd *cobra.Command) {
cmd.PersistentFlags().Uint16Var(&o.Port, "ctrlz_port", o.Port,
"The IP port to use for the ControlZ introspection facility")
cmd.PersistentFlags().StringVar(&o.Address, "ctrlz_address", o.Address,
"The IP Address to listen on for the ControlZ introspection facility. Use '*' to indicate all addresses.")
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package topics
import (
"net/http"
"os"
"istio.io/istio/pkg/ctrlz/fw"
"istio.io/istio/pkg/ctrlz/topics/assets"
)
type argsTopic struct{}
// ArgsTopic returns a ControlZ topic that allows visualization of process command-line arguments.
func ArgsTopic() fw.Topic {
return argsTopic{}
}
func (argsTopic) Title() string {
return "Command-Line Arguments"
}
func (argsTopic) Prefix() string {
return "arg"
}
func (argsTopic) Activate(context fw.TopicContext) {
tmpl := assets.ParseTemplate(context.Layout(), "templates/args.html")
_ = context.HTMLRouter().StrictSlash(true).NewRoute().Path("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
fw.RenderHTML(w, tmpl, os.Args)
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package assets
import (
"embed"
"html/template"
)
// FS embeds the templates
//
//go:embed templates/*
var FS embed.FS
func ParseTemplate(l *template.Template, name string) *template.Template {
b, err := FS.ReadFile(name)
if err != nil {
panic(err)
}
return template.Must(l.Parse(string(b)))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package topics
import (
"fmt"
"html/template"
"net/http"
"sort"
"strings"
"sigs.k8s.io/yaml"
"istio.io/istio/pkg/ctrlz/fw"
"istio.io/istio/pkg/ctrlz/topics/assets"
)
// ReadableCollection is a staticCollection collection of entries to be exposed via CtrlZ.
type ReadableCollection interface {
Name() string
Keys() (keys []string, err error)
Get(id string) (any, error)
}
// collection topic is a Topic fw.implementation that exposes a set of collections through CtrlZ.
type collectionTopic struct {
title string
prefix string
collections []ReadableCollection
mainTmpl *template.Template
listTmpl *template.Template
itemTmpl *template.Template
}
var _ fw.Topic = &collectionTopic{}
// Title is implementation of Topic.Title.
func (c *collectionTopic) Title() string {
return c.title
}
// Prefix is implementation of Topic.Prefix.
func (c *collectionTopic) Prefix() string {
return c.prefix
}
// Activate is implementation of Topic.Activate.
func (c *collectionTopic) Activate(context fw.TopicContext) {
l := template.Must(context.Layout().Clone())
c.mainTmpl = assets.ParseTemplate(l, "templates/collection/main.html")
l = template.Must(context.Layout().Clone())
c.listTmpl = assets.ParseTemplate(l, "templates/collection/list.html")
l = template.Must(context.Layout().Clone())
c.itemTmpl = assets.ParseTemplate(l, "templates/collection/item.html")
_ = context.HTMLRouter().
StrictSlash(true).
NewRoute().
PathPrefix("/").
Methods("GET").
HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
parts := strings.SplitN(req.URL.Path, "/", 4)
parts = parts[2:] // Skip the empty and title parts.
switch len(parts) {
case 1:
if parts[0] == "" {
c.handleMain(w, req)
} else {
c.handleCollection(w, req, parts[0])
}
case 2:
c.handleItem(w, req, parts[0], parts[1])
default:
c.handleError(w, req, fmt.Sprintf("InvalidUrl %s", req.URL.Path))
}
})
}
// mainContext is passed to the template processor and carries information that is used by the main template.
type mainContext struct {
Title string
Collections []string
Error string
}
func (c *collectionTopic) handleMain(w http.ResponseWriter, _ *http.Request) {
context := mainContext{}
names := make([]string, 0, len(c.collections))
for _, n := range c.collections {
names = append(names, n.Name())
}
sort.Strings(names)
context.Collections = names
context.Title = c.title
fw.RenderHTML(w, c.mainTmpl, context)
}
// listContext is passed to the template processor and carries information that is used by the list template.
type listContext struct {
Collection string
Keys []string
Error string
}
func (c *collectionTopic) handleCollection(w http.ResponseWriter, _ *http.Request, collection string) {
k, err := c.listCollection(collection)
context := listContext{}
if err == nil {
context.Collection = collection
context.Keys = k
} else {
context.Error = err.Error()
}
fw.RenderHTML(w, c.listTmpl, context)
}
// itemContext is passed to the template processor and carries information that is used by the list template.
type itemContext struct {
Collection string
Key string
Value any
Error string
}
func (c *collectionTopic) handleItem(w http.ResponseWriter, _ *http.Request, collection, key string) {
v, err := c.getItem(collection, key)
context := itemContext{}
if err == nil {
switch v.(type) {
case string:
default:
var b []byte
if b, err = yaml.Marshal(v); err != nil {
context.Error = err.Error()
break
}
v = string(b)
}
context.Collection = collection
context.Key = key
context.Value = v
} else {
context.Error = err.Error()
}
fw.RenderHTML(w, c.itemTmpl, context)
}
func (c *collectionTopic) handleError(w http.ResponseWriter, _ *http.Request, errorText string) {
fw.RenderHTML(w, c.mainTmpl, mainContext{Error: errorText})
}
func (c *collectionTopic) listCollection(name string) ([]string, error) {
for _, col := range c.collections {
if col.Name() == name {
return col.Keys()
}
}
return nil, fmt.Errorf("collection not found: %q", name)
}
func (c *collectionTopic) getItem(collection string, id string) (any, error) {
for _, col := range c.collections {
if col.Name() == collection {
return col.Get(id)
}
}
return nil, fmt.Errorf("collection not found: %q", collection)
}
// NewCollectionTopic creates a new custom topic that exposes the provided collections.
func NewCollectionTopic(title string, prefix string, collections ...ReadableCollection) fw.Topic {
return &collectionTopic{
title: title,
prefix: prefix,
collections: collections,
}
}
// NewStaticCollection creates a static collection from the given set of items.
func NewStaticCollection(name string, items map[string]any) ReadableCollection {
return &staticCollection{
name: name,
items: items,
}
}
// staticCollection is a ReadableCollection implementation that operates on static data that is supplied
// during construction.
type staticCollection struct {
name string
items map[string]any
}
var _ ReadableCollection = &staticCollection{}
// Name is implementation of ReadableCollection.Name.
func (r *staticCollection) Name() string {
return r.name
}
// Keys is implementation of ReadableCollection.Keys.
func (r *staticCollection) Keys() ([]string, error) {
keys := make([]string, 0, len(r.items))
for k := range r.items {
keys = append(keys, k)
}
sort.Strings(keys)
return keys, nil
}
// Get is implementation of ReadableCollection.Get.
func (r *staticCollection) Get(id string) (any, error) {
return r.items[id], nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package topics
import (
"net/http"
"os"
"sort"
"strings"
"istio.io/istio/pkg/ctrlz/fw"
"istio.io/istio/pkg/ctrlz/topics/assets"
)
type envTopic struct{}
// EnvTopic returns a ControlZ topic that allows visualization of process environment variables.
func EnvTopic() fw.Topic {
return envTopic{}
}
func (envTopic) Title() string {
return "Environment Variables"
}
func (envTopic) Prefix() string {
return "env"
}
type envVar struct {
Name string `json:"name"`
Value string `json:"value"`
}
func getVars() []envVar {
env := os.Environ()
sort.Strings(env)
result := []envVar{}
for _, v := range env {
eq := strings.Index(v, "=")
name := v[:eq] //nolint
value := v[eq+1:]
result = append(result, envVar{Name: name, Value: value})
}
return result
}
func (envTopic) Activate(context fw.TopicContext) {
tmpl := assets.ParseTemplate(context.Layout(), "templates/env.html")
_ = context.HTMLRouter().StrictSlash(true).NewRoute().Path("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
fw.RenderHTML(w, tmpl, getVars())
})
_ = context.JSONRouter().StrictSlash(true).NewRoute().Methods("GET").Path("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
fw.RenderJSON(w, http.StatusOK, getVars())
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package topics defines several canonical ControlZ topics.
package topics
import (
"net/http"
"runtime"
"istio.io/istio/pkg/ctrlz/fw"
"istio.io/istio/pkg/ctrlz/topics/assets"
)
type memTopic struct{}
// MemTopic returns a ControlZ topic that allows visualization of process memory usage.
func MemTopic() fw.Topic {
return memTopic{}
}
func (memTopic) Title() string {
return "Memory Usage"
}
func (memTopic) Prefix() string {
return "mem"
}
func (memTopic) Activate(context fw.TopicContext) {
tmpl := assets.ParseTemplate(context.Layout(), "templates/mem.html")
_ = context.HTMLRouter().StrictSlash(true).NewRoute().Path("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ms := &runtime.MemStats{}
runtime.ReadMemStats(ms)
fw.RenderHTML(w, tmpl, ms)
})
_ = context.JSONRouter().StrictSlash(true).NewRoute().Methods("GET").Path("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
ms := &runtime.MemStats{}
runtime.ReadMemStats(ms)
fw.RenderJSON(w, http.StatusOK, ms)
})
_ = context.JSONRouter().StrictSlash(true).NewRoute().Methods("PUT").Path("/forcecollection").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
runtime.GC()
w.WriteHeader(http.StatusAccepted)
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package topics
import (
"net/http"
"os"
"runtime"
"istio.io/istio/pkg/ctrlz/fw"
"istio.io/istio/pkg/ctrlz/topics/assets"
)
type procTopic struct{}
// ProcTopic returns a ControlZ topic that allows visualization of process state.
func ProcTopic() fw.Topic {
return procTopic{}
}
func (procTopic) Title() string {
return "Process Info"
}
func (procTopic) Prefix() string {
return "proc"
}
type procInfo struct {
Egid int `json:"egid"`
Euid int `json:"euid"`
GID int `json:"gid"`
Groups []int `json:"groups"`
Pid int `json:"pid"`
Ppid int `json:"ppid"`
UID int `json:"uid"`
Wd string `json:"wd"`
Hostname string `json:"hostname"`
TempDir string `json:"tempdir"`
Threads int `json:"threads"`
Goroutines int `json:"goroutines"`
}
func getProcInfo() *procInfo {
pi := procInfo{
Egid: os.Getegid(),
Euid: os.Geteuid(),
GID: os.Getgid(),
Pid: os.Getpid(),
Ppid: os.Getppid(),
UID: os.Getuid(),
TempDir: os.TempDir(),
Goroutines: runtime.NumGoroutine(),
}
pi.Groups, _ = os.Getgroups()
pi.Wd, _ = os.Hostname()
pi.Hostname, _ = os.Hostname()
pi.Threads, _ = runtime.ThreadCreateProfile(nil)
return &pi
}
func (procTopic) Activate(context fw.TopicContext) {
tmpl := assets.ParseTemplate(context.Layout(), "templates/proc.html")
_ = context.HTMLRouter().StrictSlash(true).NewRoute().Path("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
fw.RenderHTML(w, tmpl, getProcInfo())
})
_ = context.JSONRouter().StrictSlash(true).NewRoute().Methods("GET").Path("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
fw.RenderJSON(w, http.StatusOK, getProcInfo())
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package topics
import (
"encoding/json"
"fmt"
"net/http"
"sort"
"github.com/gorilla/mux"
"istio.io/istio/pkg/ctrlz/fw"
"istio.io/istio/pkg/ctrlz/topics/assets"
"istio.io/istio/pkg/log"
)
type scopeTopic struct{}
type scopeInfo struct {
Name string `json:"name"`
Description string `json:"description"`
OutputLevel string `json:"output_level"`
StackTraceLevel string `json:"stack_trace_level"`
LogCallers bool `json:"log_callers"`
}
var levelToString = map[log.Level]string{
log.DebugLevel: "debug",
log.InfoLevel: "info",
log.WarnLevel: "warn",
log.ErrorLevel: "error",
log.NoneLevel: "none",
}
var stringToLevel = map[string]log.Level{
"debug": log.DebugLevel,
"info": log.InfoLevel,
"warn": log.WarnLevel,
"error": log.ErrorLevel,
"none": log.NoneLevel,
}
// ScopeTopic returns a ControlZ topic that allows visualization of process logging scopes.
func ScopeTopic() fw.Topic {
return scopeTopic{}
}
func (scopeTopic) Title() string {
return "Logging Scopes"
}
func (scopeTopic) Prefix() string {
return "scope"
}
func getScopeInfo(s *log.Scope) *scopeInfo {
return &scopeInfo{
Name: s.Name(),
Description: s.Description(),
OutputLevel: levelToString[s.GetOutputLevel()],
StackTraceLevel: levelToString[s.GetStackTraceLevel()],
LogCallers: s.GetLogCallers(),
}
}
func (scopeTopic) Activate(context fw.TopicContext) {
tmpl := assets.ParseTemplate(context.Layout(), "templates/scopes.html")
_ = context.HTMLRouter().NewRoute().HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
allScopes := log.Scopes()
s := make([]scopeInfo, 0, len(allScopes))
for _, scope := range allScopes {
s = append(s, *getScopeInfo(scope))
}
sort.Slice(s, func(i, j int) bool {
return s[i].Name < s[j].Name
})
fw.RenderHTML(w, tmpl, s)
})
_ = context.JSONRouter().StrictSlash(true).NewRoute().Methods("GET").Path("/").HandlerFunc(getAllScopes)
_ = context.JSONRouter().NewRoute().Methods("GET").Path("/{scope}").HandlerFunc(getScope)
_ = context.JSONRouter().NewRoute().Methods("PUT").Path("/{scope}").HandlerFunc(putScope)
}
func getAllScopes(w http.ResponseWriter, _ *http.Request) {
allScopes := log.Scopes()
scopeInfos := make([]scopeInfo, 0, len(allScopes))
for _, s := range allScopes {
scopeInfos = append(scopeInfos, *getScopeInfo(s))
}
sort.Slice(scopeInfos, func(i, j int) bool {
return scopeInfos[i].Name < scopeInfos[j].Name
})
fw.RenderJSON(w, http.StatusOK, scopeInfos)
}
func getScope(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
name := vars["scope"]
if s := log.FindScope(name); s != nil {
fw.RenderJSON(w, http.StatusOK, getScopeInfo(s))
return
}
fw.RenderError(w, http.StatusBadRequest, fmt.Errorf("unknown scope name: %s", name))
}
func putScope(w http.ResponseWriter, req *http.Request) {
vars := mux.Vars(req)
name := vars["scope"]
var info scopeInfo
if err := json.NewDecoder(req.Body).Decode(&info); err != nil {
fw.RenderError(w, http.StatusBadRequest, fmt.Errorf("unable to decode request: %v", err))
return
}
if s := log.FindScope(name); s != nil {
level, ok := stringToLevel[info.OutputLevel]
if ok {
s.SetOutputLevel(level)
}
level, ok = stringToLevel[info.StackTraceLevel]
if ok {
s.SetStackTraceLevel(level)
}
s.SetLogCallers(info.LogCallers)
w.WriteHeader(http.StatusAccepted)
return
}
fw.RenderError(w, http.StatusBadRequest, fmt.Errorf("unknown scope name: %s", name))
}
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package topics
import (
"fmt"
"net/http"
"syscall"
"istio.io/istio/pkg/appsignals"
"istio.io/istio/pkg/ctrlz/fw"
"istio.io/istio/pkg/ctrlz/topics/assets"
)
type signalsTopic struct{}
// SignalsTopic returns a ControlZ topic that sends command signals to the process
func SignalsTopic() fw.Topic {
return signalsTopic{}
}
func (signalsTopic) Title() string {
return "Signals"
}
func (signalsTopic) Prefix() string {
return "signal"
}
func (signalsTopic) Activate(context fw.TopicContext) {
tmpl := assets.ParseTemplate(context.Layout(), "templates/signals.html")
_ = context.HTMLRouter().StrictSlash(true).NewRoute().Path("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
fw.RenderHTML(w, tmpl, nil)
})
_ = context.JSONRouter().StrictSlash(true).NewRoute().Methods("PUT", "POST").Path("/SIGUSR1").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
appsignals.Notify(fmt.Sprintf("Remote: %v", req.RemoteAddr), syscall.SIGUSR1)
w.WriteHeader(http.StatusAccepted)
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package topics
import (
"net/http"
"istio.io/istio/pkg/ctrlz/fw"
"istio.io/istio/pkg/ctrlz/topics/assets"
"istio.io/istio/pkg/version"
)
type versionTopic struct{}
// VersionTopic returns a ControlZ topic that allows visualization of versioning info.
func VersionTopic() fw.Topic {
return versionTopic{}
}
func (versionTopic) Title() string {
return "Version Info"
}
func (versionTopic) Prefix() string {
return "version"
}
func (versionTopic) Activate(context fw.TopicContext) {
tmpl := assets.ParseTemplate(context.Layout(), "templates/version.html")
_ = context.HTMLRouter().StrictSlash(true).NewRoute().Path("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
fw.RenderHTML(w, tmpl, &version.Info)
})
_ = context.JSONRouter().StrictSlash(true).NewRoute().Methods("GET").Path("/").HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
fw.RenderJSON(w, http.StatusOK, &version.Info)
})
}
// Copyright Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc (unknown)
// source: dns/proto/nds.proto
package istio_networking_nds_v1
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Table of hostnames and their IPs to br used for DNS resolution at the agent
// Sent by istiod to istio agents via xds
type NameTable struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Map of hostname to resolution attributes.
Table map[string]*NameTable_NameInfo `protobuf:"bytes,1,rep,name=table,proto3" json:"table,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *NameTable) Reset() {
*x = NameTable{}
if protoimpl.UnsafeEnabled {
mi := &file_dns_proto_nds_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NameTable) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NameTable) ProtoMessage() {}
func (x *NameTable) ProtoReflect() protoreflect.Message {
mi := &file_dns_proto_nds_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NameTable.ProtoReflect.Descriptor instead.
func (*NameTable) Descriptor() ([]byte, []int) {
return file_dns_proto_nds_proto_rawDescGZIP(), []int{0}
}
func (x *NameTable) GetTable() map[string]*NameTable_NameInfo {
if x != nil {
return x.Table
}
return nil
}
type NameTable_NameInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// List of IPs for the host.
Ips []string `protobuf:"bytes,1,rep,name=ips,proto3" json:"ips,omitempty"`
// The name of the service registry containing the service (e.g. 'Kubernetes').
Registry string `protobuf:"bytes,2,opt,name=registry,proto3" json:"registry,omitempty"`
// The k8s service name. Only applies when registry=`Kubernetes`
Shortname string `protobuf:"bytes,3,opt,name=shortname,proto3" json:"shortname,omitempty"`
// The k8s namespace for the service. Only applies when registry=`Kubernetes`
Namespace string `protobuf:"bytes,4,opt,name=namespace,proto3" json:"namespace,omitempty"`
// Deprecated. Was added for experimentation only.
//
// Deprecated: Marked as deprecated in dns/proto/nds.proto.
AltHosts []string `protobuf:"bytes,5,rep,name=alt_hosts,json=altHosts,proto3" json:"alt_hosts,omitempty"`
}
func (x *NameTable_NameInfo) Reset() {
*x = NameTable_NameInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_dns_proto_nds_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NameTable_NameInfo) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NameTable_NameInfo) ProtoMessage() {}
func (x *NameTable_NameInfo) ProtoReflect() protoreflect.Message {
mi := &file_dns_proto_nds_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NameTable_NameInfo.ProtoReflect.Descriptor instead.
func (*NameTable_NameInfo) Descriptor() ([]byte, []int) {
return file_dns_proto_nds_proto_rawDescGZIP(), []int{0, 0}
}
func (x *NameTable_NameInfo) GetIps() []string {
if x != nil {
return x.Ips
}
return nil
}
func (x *NameTable_NameInfo) GetRegistry() string {
if x != nil {
return x.Registry
}
return ""
}
func (x *NameTable_NameInfo) GetShortname() string {
if x != nil {
return x.Shortname
}
return ""
}
func (x *NameTable_NameInfo) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
// Deprecated: Marked as deprecated in dns/proto/nds.proto.
func (x *NameTable_NameInfo) GetAltHosts() []string {
if x != nil {
return x.AltHosts
}
return nil
}
var File_dns_proto_nds_proto protoreflect.FileDescriptor
var file_dns_proto_nds_proto_rawDesc = []byte{
0x0a, 0x13, 0x64, 0x6e, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6e, 0x64, 0x73, 0x2e,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x17, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x6e, 0x65, 0x74,
0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x2e, 0x6e, 0x64, 0x73, 0x2e, 0x76, 0x31, 0x22, 0xcf,
0x02, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x43, 0x0a, 0x05,
0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x69, 0x73,
0x74, 0x69, 0x6f, 0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x2e, 0x6e,
0x64, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e,
0x54, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x74, 0x61, 0x62, 0x6c,
0x65, 0x1a, 0x95, 0x01, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10,
0x0a, 0x03, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x69, 0x70, 0x73,
0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09,
0x73, 0x68, 0x6f, 0x72, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x09, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61,
0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e,
0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x09, 0x61, 0x6c, 0x74, 0x5f,
0x68, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52,
0x08, 0x61, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x1a, 0x65, 0x0a, 0x0a, 0x54, 0x61, 0x62,
0x6c, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x41, 0x0a, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f,
0x2e, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x2e, 0x6e, 0x64, 0x73, 0x2e,
0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x4e, 0x61, 0x6d,
0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
0x42, 0x36, 0x5a, 0x34, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x69, 0x6f, 0x2f, 0x69, 0x73, 0x74,
0x69, 0x6f, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x64, 0x6e, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x2f, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x69, 0x6e,
0x67, 0x5f, 0x6e, 0x64, 0x73, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_dns_proto_nds_proto_rawDescOnce sync.Once
file_dns_proto_nds_proto_rawDescData = file_dns_proto_nds_proto_rawDesc
)
func file_dns_proto_nds_proto_rawDescGZIP() []byte {
file_dns_proto_nds_proto_rawDescOnce.Do(func() {
file_dns_proto_nds_proto_rawDescData = protoimpl.X.CompressGZIP(file_dns_proto_nds_proto_rawDescData)
})
return file_dns_proto_nds_proto_rawDescData
}
var file_dns_proto_nds_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_dns_proto_nds_proto_goTypes = []interface{}{
(*NameTable)(nil), // 0: istio.networking.nds.v1.NameTable
(*NameTable_NameInfo)(nil), // 1: istio.networking.nds.v1.NameTable.NameInfo
nil, // 2: istio.networking.nds.v1.NameTable.TableEntry
}
var file_dns_proto_nds_proto_depIdxs = []int32{
2, // 0: istio.networking.nds.v1.NameTable.table:type_name -> istio.networking.nds.v1.NameTable.TableEntry
1, // 1: istio.networking.nds.v1.NameTable.TableEntry.value:type_name -> istio.networking.nds.v1.NameTable.NameInfo
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_dns_proto_nds_proto_init() }
func file_dns_proto_nds_proto_init() {
if File_dns_proto_nds_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_dns_proto_nds_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NameTable); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_dns_proto_nds_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NameTable_NameInfo); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_dns_proto_nds_proto_rawDesc,
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_dns_proto_nds_proto_goTypes,
DependencyIndexes: file_dns_proto_nds_proto_depIdxs,
MessageInfos: file_dns_proto_nds_proto_msgTypes,
}.Build()
File_dns_proto_nds_proto = out.File
file_dns_proto_nds_proto_rawDesc = nil
file_dns_proto_nds_proto_goTypes = nil
file_dns_proto_nds_proto_depIdxs = nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"strings"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/provider"
"istio.io/istio/pkg/config/constants"
dnsProto "istio.io/istio/pkg/dns/proto"
netutil "istio.io/istio/pkg/util/net"
)
// Config for building the name table.
type Config struct {
Node *model.Proxy
Push *model.PushContext
// MulticlusterHeadlessEnabled if true, the DNS name table for a headless service will resolve to
// same-network endpoints in any cluster.
MulticlusterHeadlessEnabled bool
}
// BuildNameTable produces a table of hostnames and their associated IPs that can then
// be used by the agent to resolve DNS. This logic is always active. However, local DNS resolution
// will only be effective if DNS capture is enabled in the proxy
func BuildNameTable(cfg Config) *dnsProto.NameTable {
if cfg.Node.Type != model.SidecarProxy {
// DNS resolution is only for sidecars
return nil
}
out := &dnsProto.NameTable{
Table: make(map[string]*dnsProto.NameTable_NameInfo),
}
for _, svc := range cfg.Node.SidecarScope.Services() {
svcAddress := svc.GetAddressForProxy(cfg.Node)
var addressList []string
hostName := svc.Hostname
if svcAddress != constants.UnspecifiedIP {
// Filter out things we cannot parse as IP. Generally this means CIDRs, as anything else
// should be caught in validation.
if !netutil.IsValidIPAddress(svcAddress) {
continue
}
addressList = append(addressList, svcAddress)
} else {
// The IP will be unspecified here if its headless service or if the auto
// IP allocation logic for service entry was unable to allocate an IP.
if svc.Resolution == model.Passthrough && len(svc.Ports) > 0 {
for _, instance := range cfg.Push.ServiceEndpointsByPort(svc, svc.Ports[0].Port, nil) {
// empty addresses are possible here
if !netutil.IsValidIPAddress(instance.Address) {
continue
}
// TODO(stevenctl): headless across-networks https://github.com/istio/istio/issues/38327
sameNetwork := cfg.Node.InNetwork(instance.Network)
sameCluster := cfg.Node.InCluster(instance.Locality.ClusterID)
// For all k8s headless services, populate the dns table with the endpoint IPs as k8s does.
// And for each individual pod, populate the dns table with the endpoint IP with a manufactured host name.
if instance.SubDomain != "" && sameNetwork {
// Follow k8s pods dns naming convention of "<hostname>.<subdomain>.<pod namespace>.svc.<cluster domain>"
// i.e. "mysql-0.mysql.default.svc.cluster.local".
parts := strings.SplitN(hostName.String(), ".", 2)
if len(parts) != 2 {
continue
}
address := []string{instance.Address}
shortName := instance.HostName + "." + instance.SubDomain
host := shortName + "." + parts[1] // Add cluster domain.
nameInfo := &dnsProto.NameTable_NameInfo{
Ips: address,
Registry: string(svc.Attributes.ServiceRegistry),
Namespace: svc.Attributes.Namespace,
Shortname: shortName,
}
if _, f := out.Table[host]; !f || sameCluster {
// We may have the same pod in two clusters (ie mysql-0 deployed in both places).
// We can only return a single IP for these queries. We should prefer the local cluster,
// so if the entry already exists only overwrite it if the instance is in our own cluster.
out.Table[host] = nameInfo
}
}
skipForMulticluster := !cfg.MulticlusterHeadlessEnabled && !sameCluster
if skipForMulticluster || !sameNetwork {
// We take only cluster-local endpoints. While this seems contradictory to
// our logic other parts of the code, where cross-cluster is the default.
// However, this only impacts the DNS response. If we were to send all
// endpoints, cross network routing would break, as we do passthrough LB and
// don't go through the network gateway. While we could, hypothetically, send
// "network-local" endpoints, this would still make enabling DNS give vastly
// different load balancing than without, so its probably best to filter.
// This ends up matching the behavior of Kubernetes DNS.
continue
}
// TODO: should we skip the node's own IP like we do in listener?
addressList = append(addressList, instance.Address)
}
}
if len(addressList) == 0 {
// could not reliably determine the addresses of endpoints of headless service
// or this is not a k8s service
continue
}
}
if ni, f := out.Table[hostName.String()]; !f {
nameInfo := &dnsProto.NameTable_NameInfo{
Ips: addressList,
Registry: string(svc.Attributes.ServiceRegistry),
}
if svc.Attributes.ServiceRegistry == provider.Kubernetes &&
!strings.HasSuffix(hostName.String(), "."+constants.DefaultClusterSetLocalDomain) {
// The agent will take care of resolving a, a.ns, a.ns.svc, etc.
// No need to provide a DNS entry for each variant.
//
// NOTE: This is not done for Kubernetes Multi-Cluster Services (MCS) hosts, in order
// to avoid conflicting with the entries for the regular (cluster.local) service.
nameInfo.Namespace = svc.Attributes.Namespace
nameInfo.Shortname = svc.Attributes.Name
}
out.Table[hostName.String()] = nameInfo
} else if provider.ID(ni.Registry) != provider.Kubernetes {
// 2 possible cases:
// 1. If the SE has multiple addresses(vips) specified, merge the ips
// 2. If the previous SE is a decorator of the k8s service, give precedence to the k8s service
if svc.Attributes.ServiceRegistry == provider.Kubernetes {
ni.Ips = addressList
ni.Registry = string(provider.Kubernetes)
if !strings.HasSuffix(hostName.String(), "."+constants.DefaultClusterSetLocalDomain) {
ni.Namespace = svc.Attributes.Namespace
ni.Shortname = svc.Attributes.Name
}
} else {
ni.Ips = append(ni.Ips, addressList...)
}
}
}
return out
}
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package env makes it possible to track use of environment variables within a procress
// in order to generate documentation for these uses.
package env
import (
"encoding/json"
"fmt"
"os"
"sort"
"strconv"
"sync"
"time"
"istio.io/istio/pkg/log"
)
// The type of a variable's value
type VarType byte
const (
// Variable holds a free-form string.
STRING VarType = iota
// Variable holds a boolean value.
BOOL
// Variable holds a signed integer.
INT
// Variables holds a floating point value.
FLOAT
// Variable holds a time duration.
DURATION
// Variable holds a dynamic unknown type.
OTHER
)
// Var describes a single environment variable
type Var struct {
// The name of the environment variable.
Name string
// The optional default value of the environment variable.
DefaultValue string
// Description of the environment variable's purpose.
Description string
// Hide the existence of this variable when outputting usage information.
Hidden bool
// Mark this variable as deprecated when generating usage information.
Deprecated bool
// The type of the variable's value
Type VarType
// The underlying Go type of the variable
GoType string
}
// StringVar represents a single string environment variable.
type StringVar struct {
Var
}
// BoolVar represents a single boolean environment variable.
type BoolVar struct {
Var
}
// IntVar represents a single integer environment variable.
type IntVar struct {
Var
}
// FloatVar represents a single floating-point environment variable.
type FloatVar struct {
Var
}
// DurationVar represents a single duration environment variable.
type DurationVar struct {
Var
}
var (
allVars = make(map[string]Var)
mutex sync.Mutex
)
// VarDescriptions returns a description of this process' environment variables, sorted by name.
func VarDescriptions() []Var {
mutex.Lock()
sorted := make([]Var, 0, len(allVars))
for _, v := range allVars {
sorted = append(sorted, v)
}
mutex.Unlock()
sort.Slice(sorted, func(i, j int) bool {
return sorted[i].Name < sorted[j].Name
})
return sorted
}
type Parseable interface {
comparable
}
type GenericVar[T Parseable] struct {
Var
delegate specializedVar[T]
}
func Register[T Parseable](name string, defaultValue T, description string) GenericVar[T] {
// Specialized cases
// In the future, once only Register() remains, we can likely drop most of these.
// however, time.Duration is needed still as it doesn't implement json
switch d := any(defaultValue).(type) {
case time.Duration:
v := RegisterDurationVar(name, d, description)
return GenericVar[T]{v.Var, any(v).(specializedVar[T])}
case string:
v := RegisterStringVar(name, d, description)
return GenericVar[T]{v.Var, any(v).(specializedVar[T])}
case float64:
v := RegisterFloatVar(name, d, description)
return GenericVar[T]{v.Var, any(v).(specializedVar[T])}
case int:
v := RegisterIntVar(name, d, description)
return GenericVar[T]{v.Var, any(v).(specializedVar[T])}
case bool:
v := RegisterBoolVar(name, d, description)
return GenericVar[T]{v.Var, any(v).(specializedVar[T])}
}
b, _ := json.Marshal(defaultValue)
v := Var{Name: name, DefaultValue: string(b), Description: description, Type: STRING, GoType: fmt.Sprintf("%T", defaultValue)}
RegisterVar(v)
return GenericVar[T]{getVar(name), nil}
}
// RegisterStringVar registers a new string environment variable.
func RegisterStringVar(name string, defaultValue string, description string) StringVar {
v := Var{Name: name, DefaultValue: defaultValue, Description: description, Type: STRING}
RegisterVar(v)
return StringVar{getVar(name)}
}
// RegisterBoolVar registers a new boolean environment variable.
func RegisterBoolVar(name string, defaultValue bool, description string) BoolVar {
v := Var{Name: name, DefaultValue: strconv.FormatBool(defaultValue), Description: description, Type: BOOL}
RegisterVar(v)
return BoolVar{getVar(name)}
}
// RegisterIntVar registers a new integer environment variable.
func RegisterIntVar(name string, defaultValue int, description string) IntVar {
v := Var{Name: name, DefaultValue: strconv.FormatInt(int64(defaultValue), 10), Description: description, Type: INT}
RegisterVar(v)
return IntVar{getVar(name)}
}
// RegisterFloatVar registers a new floating-point environment variable.
func RegisterFloatVar(name string, defaultValue float64, description string) FloatVar {
v := Var{Name: name, DefaultValue: strconv.FormatFloat(defaultValue, 'G', -1, 64), Description: description, Type: FLOAT}
RegisterVar(v)
return FloatVar{v}
}
// RegisterDurationVar registers a new duration environment variable.
func RegisterDurationVar(name string, defaultValue time.Duration, description string) DurationVar {
v := Var{Name: name, DefaultValue: defaultValue.String(), Description: description, Type: DURATION}
RegisterVar(v)
return DurationVar{getVar(name)}
}
// RegisterVar registers a generic environment variable.
func RegisterVar(v Var) {
mutex.Lock()
if old, ok := allVars[v.Name]; ok {
if v.Description != "" {
allVars[v.Name] = v // last one with a description wins if the same variable name is registered multiple times
}
if old.Description != v.Description || old.DefaultValue != v.DefaultValue || old.Type != v.Type || old.Deprecated != v.Deprecated || old.Hidden != v.Hidden {
log.Warnf("The environment variable %s was registered multiple times using different metadata: %v, %v", v.Name, old, v)
}
} else {
allVars[v.Name] = v
}
mutex.Unlock()
}
func getVar(name string) Var {
mutex.Lock()
result := allVars[name]
mutex.Unlock()
return result
}
// Get retrieves the value of the environment variable.
// It returns the value, which will be the default if the variable is not present.
// To distinguish between an empty value and an unset value, use Lookup.
func (v StringVar) Get() string {
result, _ := v.Lookup()
return result
}
// Lookup retrieves the value of the environment variable. If the
// variable is present in the environment the
// value (which may be empty) is returned and the boolean is true.
// Otherwise the returned value will be the default and the boolean will
// be false.
func (v StringVar) Lookup() (string, bool) {
result, ok := os.LookupEnv(v.Name)
if !ok {
result = v.DefaultValue
}
return result, ok
}
// Get retrieves the value of the environment variable.
// It returns the value, which will be the default if the variable is not present.
// To distinguish between an empty value and an unset value, use Lookup.
func (v BoolVar) Get() bool {
result, _ := v.Lookup()
return result
}
// Lookup retrieves the value of the environment variable. If the
// variable is present in the environment the
// value (which may be empty) is returned and the boolean is true.
// Otherwise the returned value will be the default and the boolean will
// be false.
func (v BoolVar) Lookup() (bool, bool) {
result, ok := os.LookupEnv(v.Name)
if !ok {
result = v.DefaultValue
}
b, err := strconv.ParseBool(result)
if err != nil {
log.Warnf("Invalid environment variable value `%s`, expecting true/false, defaulting to %v", result, v.DefaultValue)
b, _ = strconv.ParseBool(v.DefaultValue)
}
return b, ok
}
// Get retrieves the value of the environment variable.
// It returns the value, which will be the default if the variable is not present.
// To distinguish between an empty value and an unset value, use Lookup.
func (v IntVar) Get() int {
result, _ := v.Lookup()
return result
}
// Lookup retrieves the value of the environment variable. If the
// variable is present in the environment the
// value (which may be empty) is returned and the boolean is true.
// Otherwise the returned value will be the default and the boolean will
// be false.
func (v IntVar) Lookup() (int, bool) {
result, ok := os.LookupEnv(v.Name)
if !ok {
result = v.DefaultValue
}
i, err := strconv.Atoi(result)
if err != nil {
log.Warnf("Invalid environment variable value `%s`, expecting an integer, defaulting to %v", result, v.DefaultValue)
i, _ = strconv.Atoi(v.DefaultValue)
}
return i, ok
}
// Get retrieves the value of the environment variable.
// It returns the value, which will be the default if the variable is not present.
// To distinguish between an empty value and an unset value, use Lookup.
func (v FloatVar) Get() float64 {
result, _ := v.Lookup()
return result
}
// Lookup retrieves the value of the environment variable. If the
// variable is present in the environment the
// value (which may be empty) is returned and the boolean is true.
// Otherwise the returned value will be the default and the boolean will
// be false.
func (v FloatVar) Lookup() (float64, bool) {
result, ok := os.LookupEnv(v.Name)
if !ok {
result = v.DefaultValue
}
f, err := strconv.ParseFloat(result, 64)
if err != nil {
log.Warnf("Invalid environment variable value `%s`, expecting a floating-point value, defaulting to %v", result, v.DefaultValue)
f, _ = strconv.ParseFloat(v.DefaultValue, 64)
}
return f, ok
}
// Get retrieves the value of the environment variable.
// It returns the value, which will be the default if the variable is not present.
// To distinguish between an empty value and an unset value, use Lookup.
func (v DurationVar) Get() time.Duration {
result, _ := v.Lookup()
return result
}
// Lookup retrieves the value of the environment variable. If the
// variable is present in the environment the
// value (which may be empty) is returned and the boolean is true.
// Otherwise the returned value will be the default and the boolean will
// be false.
func (v DurationVar) Lookup() (time.Duration, bool) {
result, ok := os.LookupEnv(v.Name)
if !ok {
result = v.DefaultValue
}
d, err := time.ParseDuration(result)
if err != nil {
log.Warnf("Invalid environment variable value `%s`, expecting a duration, defaulting to %v", result, v.DefaultValue)
d, _ = time.ParseDuration(v.DefaultValue)
}
return d, ok
}
// Get retrieves the value of the environment variable.
// It returns the value, which will be the default if the variable is not present.
// To distinguish between an empty value and an unset value, use Lookup.
func (v GenericVar[T]) Get() T {
if v.delegate != nil {
return v.delegate.Get()
}
result, _ := v.Lookup()
return result
}
// Lookup retrieves the value of the environment variable. If the
// variable is present in the environment the
// value (which may be empty) is returned and the boolean is true.
// Otherwise the returned value will be the default and the boolean will
// be false.
func (v GenericVar[T]) Lookup() (T, bool) {
if v.delegate != nil {
return v.delegate.Lookup()
}
result, ok := os.LookupEnv(v.Name)
if !ok {
result = v.DefaultValue
}
res := new(T)
if err := json.Unmarshal([]byte(result), res); err != nil {
log.Warnf("Invalid environment variable value `%s` defaulting to %v: %v", result, v.DefaultValue, err)
_ = json.Unmarshal([]byte(v.DefaultValue), res)
}
return *res, ok
}
func (v GenericVar[T]) IsSet() bool {
_, ok := v.Lookup()
return ok
}
func (v GenericVar[T]) GetName() string {
return v.Var.Name
}
// specializedVar represents a var that can Get/Lookup
type specializedVar[T any] interface {
Lookup() (T, bool)
Get() T
}
// VariableInfo provides generic information about a variable. All Variables implement this interface.
// This is largely to workaround lack of covariance in Go.
type VariableInfo interface {
GetName() string
IsSet() bool
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package file
import (
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
io2 "github.com/AdamKorcz/bugdetectors/io"
)
// AtomicCopy copies file by reading the file then writing atomically into the target directory
func AtomicCopy(srcFilepath, targetDir, targetFilename string) error {
in, err := os.Open(srcFilepath)
if err != nil {
return err
}
defer in.Close()
perm, err := in.Stat()
if err != nil {
return err
}
input, err := io2.ReadAll(in, "/src/istio/pkg/file/file.go:39:16 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
return err
}
return AtomicWrite(filepath.Join(targetDir, targetFilename), input, perm.Mode())
}
func Copy(srcFilepath, targetDir, targetFilename string) error {
in, err := os.Open(srcFilepath)
if err != nil {
return err
}
defer in.Close()
perm, err := in.Stat()
if err != nil {
return err
}
out, err := os.OpenFile(filepath.Join(targetDir, targetFilename), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm.Mode())
if err != nil {
return err
}
defer out.Close()
if _, err := io.Copy(out, in); err != nil {
return err
}
return nil
}
// Write atomically by writing to a temporary file in the same directory then renaming
func AtomicWrite(path string, data []byte, mode os.FileMode) (err error) {
tmpFile, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path)+".tmp.")
if err != nil {
return
}
defer func() {
if Exists(tmpFile.Name()) {
if rmErr := os.Remove(tmpFile.Name()); rmErr != nil {
if err != nil {
err = fmt.Errorf("%s: %w", rmErr.Error(), err)
} else {
err = rmErr
}
}
}
}()
if err = os.Chmod(tmpFile.Name(), mode); err != nil {
return
}
_, err = tmpFile.Write(data)
if err != nil {
if closeErr := tmpFile.Close(); closeErr != nil {
err = fmt.Errorf("%s: %w", closeErr.Error(), err)
}
return
}
if err = tmpFile.Close(); err != nil {
return
}
err = os.Rename(tmpFile.Name(), path)
return
}
func Exists(name string) bool {
// We must explicitly check if the error is due to the file not existing (as opposed to a
// permissions error).
_, err := os.Stat(name)
return !errors.Is(err, fs.ErrNotExist)
}
const (
// PrivateFileMode grants owner to read/write a file.
PrivateFileMode = 0o600
)
// DirEquals check if two directories are referring to the same directory
func DirEquals(a, b string) (bool, error) {
aa, err := filepath.Abs(a)
if err != nil {
return false, err
}
bb, err := filepath.Abs(b)
if err != nil {
return false, err
}
return aa == bb, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filewatcher
import (
"errors"
"fmt"
"sync"
"github.com/fsnotify/fsnotify"
)
// NewFileWatcherFunc returns a function which creates a new file
// watcher. This may be used to provide test hooks for using the
// FakeWatcher implementation below.
type NewFileWatcherFunc func() FileWatcher
// FakeWatcher provides a fake file watcher implementation for unit
// tests. Production code should use the `NewWatcher()`.
type FakeWatcher struct {
sync.Mutex
events map[string]chan fsnotify.Event
errors map[string]chan error
changedFunc func(path string, added bool)
}
// InjectEvent injects an event into the fake file watcher.
func (w *FakeWatcher) InjectEvent(path string, event fsnotify.Event) {
w.Lock()
ch, ok := w.events[path]
w.Unlock()
if ok {
ch <- event
}
}
// InjectError injects an error into the fake file watcher.
func (w *FakeWatcher) InjectError(path string, err error) {
w.Lock()
ch, ok := w.errors[path]
w.Unlock()
if ok {
ch <- err
}
}
// NewFakeWatcher returns a function which creates a new fake watcher for unit
// testing. This allows observe callers to inject events and errors per-watched
// path. changedFunc() provides a callback notification when a new watch is added
// or removed. Production code should use `NewWatcher()`.
func NewFakeWatcher(changedFunc func(path string, added bool)) (NewFileWatcherFunc, *FakeWatcher) {
w := &FakeWatcher{
events: make(map[string]chan fsnotify.Event),
errors: make(map[string]chan error),
changedFunc: changedFunc,
}
return func() FileWatcher {
return w
}, w
}
// Add is a fake implementation of the FileWatcher interface.
func (w *FakeWatcher) Add(path string) error {
w.Lock()
// w.events and w.errors are always updated togeather. We only check
// the first to determine existence.
if _, ok := w.events[path]; ok {
w.Unlock()
return fmt.Errorf("path %v already exists", path)
}
w.events[path] = make(chan fsnotify.Event, 1000)
w.errors[path] = make(chan error, 1000)
w.Unlock()
if w.changedFunc != nil {
w.changedFunc(path, true)
}
return nil
}
// Remove is a fake implementation of the FileWatcher interface.
func (w *FakeWatcher) Remove(path string) error {
w.Lock()
defer w.Unlock()
if _, ok := w.events[path]; !ok {
return errors.New("path doesn't exist")
}
delete(w.events, path)
delete(w.errors, path)
if w.changedFunc != nil {
w.changedFunc(path, false)
}
return nil
}
// Close is a fake implementation of the FileWatcher interface.
func (w *FakeWatcher) Close() error {
w.Lock()
for path, ch := range w.events {
close(ch)
delete(w.events, path)
}
for path, ch := range w.errors {
close(ch)
delete(w.errors, path)
}
defer w.Unlock()
return nil
}
// Events is a fake implementation of the FileWatcher interface.
func (w *FakeWatcher) Events(path string) chan fsnotify.Event {
w.Lock()
defer w.Unlock()
return w.events[path]
}
// Errors is a fake implementation of the FileWatcher interface.
func (w *FakeWatcher) Errors(path string) chan error {
w.Lock()
defer w.Unlock()
return w.errors[path]
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filewatcher
import (
"errors"
"fmt"
"path/filepath"
"sync"
"github.com/fsnotify/fsnotify"
)
// FileWatcher is an interface that watches a set of files,
// delivering events to related channel.
type FileWatcher interface {
// Start watching a path. Calling Add multiple times on the same path panics.
Add(path string) error
// Stop watching a path. Removing a path that's not currently being watched panics.
Remove(path string) error
Close() error
Events(path string) chan fsnotify.Event
Errors(path string) chan error
}
type fileWatcher struct {
mu sync.RWMutex
// The watcher maintain a map of workers,
// keyed by watched dir (parent dir of watched files).
workers map[string]*workerState
funcs *patchTable
}
type workerState struct {
worker *worker
count int
}
// functions that can be replaced in a test setting
type patchTable struct {
newWatcher func() (*fsnotify.Watcher, error)
addWatcherPath func(*fsnotify.Watcher, string) error
}
// NewWatcher return with a FileWatcher instance that implemented with fsnotify.
func NewWatcher() FileWatcher {
return &fileWatcher{
workers: map[string]*workerState{},
// replaceable functions for tests
funcs: &patchTable{
newWatcher: fsnotify.NewWatcher,
addWatcherPath: func(watcher *fsnotify.Watcher, path string) error {
return watcher.Add(path)
},
},
}
}
// Close releases all resources associated with the watcher
func (fw *fileWatcher) Close() error {
fw.mu.Lock()
defer fw.mu.Unlock()
for _, ws := range fw.workers {
ws.worker.terminate()
}
fw.workers = nil
return nil
}
// Add a path to watch
func (fw *fileWatcher) Add(path string) error {
fw.mu.Lock()
defer fw.mu.Unlock()
ws, cleanedPath, _, err := fw.getWorker(path)
if err != nil {
return err
}
if err = ws.worker.addPath(cleanedPath); err == nil {
ws.count++
}
return err
}
// Stop watching a path
func (fw *fileWatcher) Remove(path string) error {
fw.mu.Lock()
defer fw.mu.Unlock()
ws, cleanedPath, parentPath, err := fw.getWorker(path)
if err != nil {
return err
}
if err = ws.worker.removePath(cleanedPath); err == nil {
ws.count--
if ws.count == 0 {
ws.worker.terminate()
delete(fw.workers, parentPath)
}
}
return err
}
// Events returns an event notification channel for a path
func (fw *fileWatcher) Events(path string) chan fsnotify.Event {
fw.mu.RLock()
defer fw.mu.RUnlock()
ws, cleanedPath, err := fw.findWorker(path)
if err != nil {
return nil
}
return ws.worker.eventChannel(cleanedPath)
}
// Errors returns an error notification channel for a path
func (fw *fileWatcher) Errors(path string) chan error {
fw.mu.RLock()
defer fw.mu.RUnlock()
ws, cleanedPath, err := fw.findWorker(path)
if err != nil {
return nil
}
return ws.worker.errorChannel(cleanedPath)
}
func (fw *fileWatcher) getWorker(path string) (*workerState, string, string, error) {
if fw.workers == nil {
return nil, "", "", errors.New("using a closed watcher")
}
cleanedPath := filepath.Clean(path)
parentPath, _ := filepath.Split(cleanedPath)
ws, workerExists := fw.workers[parentPath]
if !workerExists {
wk, err := newWorker(parentPath, fw.funcs)
if err != nil {
return nil, "", "", err
}
ws = &workerState{
worker: wk,
}
fw.workers[parentPath] = ws
}
return ws, cleanedPath, parentPath, nil
}
func (fw *fileWatcher) findWorker(path string) (*workerState, string, error) {
if fw.workers == nil {
return nil, "", errors.New("using a closed watcher")
}
cleanedPath := filepath.Clean(path)
parentPath, _ := filepath.Split(cleanedPath)
ws, workerExists := fw.workers[parentPath]
if !workerExists {
return nil, "", fmt.Errorf("no path registered for %s", path)
}
return ws, cleanedPath, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filewatcher
import (
"bufio"
"bytes"
"crypto/md5"
"fmt"
"io"
"os"
"sync"
"github.com/fsnotify/fsnotify"
)
type worker struct {
mu sync.RWMutex
// watcher is an fsnotify watcher that watches the parent
// dir of watchedFiles.
dirWatcher *fsnotify.Watcher
// The worker maintains a map of channels keyed by watched file path.
// The worker watches parent path of given path,
// and filters out events of given path, then redirect
// to the result channel.
// Note that for symlink files, the content in received events
// do not have to be related to the file itself.
watchedFiles map[string]*fileTracker
// tracker lifecycle
retireTrackerCh chan *fileTracker
// tells the worker to exit
terminateCh chan bool
}
type fileTracker struct {
events chan fsnotify.Event
errors chan error
// md5 sum to indicate if a file has been updated.
md5Sum []byte
}
func newWorker(path string, funcs *patchTable) (*worker, error) {
dirWatcher, err := funcs.newWatcher()
if err != nil {
return nil, err
}
if err = funcs.addWatcherPath(dirWatcher, path); err != nil {
_ = dirWatcher.Close()
return nil, err
}
wk := &worker{
dirWatcher: dirWatcher,
watchedFiles: make(map[string]*fileTracker),
retireTrackerCh: make(chan *fileTracker),
terminateCh: make(chan bool),
}
go wk.listen()
return wk, nil
}
func (wk *worker) listen() {
wk.loop()
_ = wk.dirWatcher.Close()
// drain any retiring trackers that may be pending
wk.drainRetiringTrackers()
// clean up the rest
for _, ft := range wk.watchedFiles {
retireTracker(ft)
}
}
func (wk *worker) loop() {
for {
select {
case event := <-wk.dirWatcher.Events:
// work on a copy of the watchedFiles map, so that we don't interfere
// with the caller's use of the map
for path, ft := range wk.getTrackers() {
if ft.events == nil {
// tracker has been retired, skip it
continue
}
sum := getMd5Sum(path)
if !bytes.Equal(sum, ft.md5Sum) {
ft.md5Sum = sum
select {
case ft.events <- event:
// nothing to do
case ft := <-wk.retireTrackerCh:
retireTracker(ft)
case <-wk.terminateCh:
return
}
}
}
case err := <-wk.dirWatcher.Errors:
for _, ft := range wk.getTrackers() {
if ft.errors == nil {
// tracker has been retired, skip it
continue
}
select {
case ft.errors <- err:
// nothing to do
case ft := <-wk.retireTrackerCh:
retireTracker(ft)
case <-wk.terminateCh:
return
}
}
case ft := <-wk.retireTrackerCh:
retireTracker(ft)
case <-wk.terminateCh:
return
}
}
}
// used only by the worker goroutine
func (wk *worker) drainRetiringTrackers() {
// cleanup any trackers that were in the process
// of being retired, but didn't get processed due
// to termination
for {
select {
case ft := <-wk.retireTrackerCh:
retireTracker(ft)
default:
return
}
}
}
// make a local copy of the set of trackers to avoid contention with callers
// used only by the worker goroutine
func (wk *worker) getTrackers() map[string]*fileTracker {
wk.mu.RLock()
result := make(map[string]*fileTracker, len(wk.watchedFiles))
for k, v := range wk.watchedFiles {
result[k] = v
}
wk.mu.RUnlock()
return result
}
// used only by the worker goroutine
func retireTracker(ft *fileTracker) {
close(ft.events)
close(ft.errors)
ft.events = nil
ft.errors = nil
}
func (wk *worker) terminate() {
wk.terminateCh <- true
}
func (wk *worker) addPath(path string) error {
wk.mu.Lock()
ft := wk.watchedFiles[path]
if ft != nil {
wk.mu.Unlock()
return fmt.Errorf("path %s is already being watched", path)
}
ft = &fileTracker{
events: make(chan fsnotify.Event),
errors: make(chan error),
md5Sum: getMd5Sum(path),
}
wk.watchedFiles[path] = ft
wk.mu.Unlock()
return nil
}
func (wk *worker) removePath(path string) error {
wk.mu.Lock()
ft := wk.watchedFiles[path]
if ft == nil {
wk.mu.Unlock()
return fmt.Errorf("path %s not found", path)
}
delete(wk.watchedFiles, path)
wk.mu.Unlock()
wk.retireTrackerCh <- ft
return nil
}
func (wk *worker) eventChannel(path string) chan fsnotify.Event {
wk.mu.RLock()
defer wk.mu.RUnlock()
if ft := wk.watchedFiles[path]; ft != nil {
return ft.events
}
return nil
}
func (wk *worker) errorChannel(path string) chan error {
wk.mu.RLock()
defer wk.mu.RUnlock()
if ft := wk.watchedFiles[path]; ft != nil {
return ft.errors
}
return nil
}
// gets the MD5 of the given file, or nil if there's a problem
// nolint: gosec
// not security sensitive code
func getMd5Sum(file string) []byte {
f, err := os.Open(file)
if err != nil {
return nil
}
defer f.Close()
r := bufio.NewReader(f)
h := md5.New()
_, _ = io.Copy(h, r)
return h.Sum(nil)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package h2c
import (
"net/http"
"net/textproto"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c" // nolint: depguard
)
// NewHandler returns an http.Handler that wraps h, intercepting any h2c
// traffic. See h2c.NewHandler for details.
// Unlike the normal handler, this handler prevents h2c Upgrades, which are not safe in Go's implementation;
// see https://github.com/golang/go/issues/56352.
// This means we allow only HTTP/1.1 or HTTP/2 prior knowledge.
func NewHandler(h http.Handler, s *http2.Server) http.Handler {
return denyH2cUpgrade(h2c.NewHandler(h, s))
}
func denyH2cUpgrade(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if isH2CUpgrade(r.Header) {
w.WriteHeader(http.StatusMethodNotAllowed)
_, _ = w.Write([]byte("h2c upgrade not allowed"))
return
}
h.ServeHTTP(w, r)
})
}
func isH2CUpgrade(h http.Header) bool {
return httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Upgrade")], "h2c") &&
httpguts.HeaderValuesContainsToken(h[textproto.CanonicalMIMEHeaderKey("Connection")], "HTTP2-Settings")
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package http
import (
"bytes"
"fmt"
"io"
"net/http"
"time"
)
const requestTimeout = time.Second * 1 // Default timeout.
func DoHTTPGetWithTimeout(requestURL string, t time.Duration) (*bytes.Buffer, error) {
return request("GET", requestURL, t, nil)
}
func DoHTTPGet(requestURL string) (*bytes.Buffer, error) {
return DoHTTPGetWithTimeout(requestURL, requestTimeout)
}
func GET(requestURL string, t time.Duration, headers map[string]string) (*bytes.Buffer, error) {
return request("GET", requestURL, t, headers)
}
func PUT(requestURL string, t time.Duration, headers map[string]string) (*bytes.Buffer, error) {
return request("PUT", requestURL, t, headers)
}
func request(method, requestURL string, t time.Duration, headers map[string]string) (*bytes.Buffer, error) {
httpClient := &http.Client{
Timeout: t,
}
req, err := http.NewRequest(method, requestURL, nil)
if err != nil {
return nil, err
}
for k, v := range headers {
req.Header.Set(k, v)
}
response, err := httpClient.Do(req)
if err != nil {
return nil, err
}
defer response.Body.Close()
if response.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status %d", response.StatusCode)
}
var b bytes.Buffer
if _, err := io.Copy(&b, response.Body); err != nil {
return nil, err
}
return &b, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package grpcxds
import (
"encoding/json"
"fmt"
"os"
"path"
"time"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/structpb"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/file"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/protomarshal"
)
const (
ServerListenerNamePrefix = "xds.istio.io/grpc/lds/inbound/"
// ServerListenerNameTemplate for the name of the Listener resource to subscribe to for a gRPC
// server. If the token `%s` is present in the string, all instances of the
// token will be replaced with the server's listening "IP:port" (e.g.,
// "0.0.0.0:8080", "[::]:8080").
ServerListenerNameTemplate = ServerListenerNamePrefix + "%s"
)
// Bootstrap contains the general structure of what's expected by GRPC's XDS implementation.
// See https://github.com/grpc/grpc-go/blob/master/xds/internal/xdsclient/bootstrap/bootstrap.go
// TODO use structs from gRPC lib if created/exported
type Bootstrap struct {
XDSServers []XdsServer `json:"xds_servers,omitempty"`
Node *core.Node `json:"node,omitempty"`
CertProviders map[string]CertificateProvider `json:"certificate_providers,omitempty"`
ServerListenerNameTemplate string `json:"server_listener_resource_name_template,omitempty"`
}
type ChannelCreds struct {
Type string `json:"type,omitempty"`
Config any `json:"config,omitempty"`
}
type XdsServer struct {
ServerURI string `json:"server_uri,omitempty"`
ChannelCreds []ChannelCreds `json:"channel_creds,omitempty"`
ServerFeatures []string `json:"server_features,omitempty"`
}
type CertificateProvider struct {
PluginName string `json:"plugin_name,omitempty"`
Config any `json:"config,omitempty"`
}
func (cp *CertificateProvider) UnmarshalJSON(data []byte) error {
var dat map[string]*json.RawMessage
if err := json.Unmarshal(data, &dat); err != nil {
return err
}
*cp = CertificateProvider{}
if pluginNameVal, ok := dat["plugin_name"]; ok {
if err := json.Unmarshal(*pluginNameVal, &cp.PluginName); err != nil {
log.Warnf("failed parsing plugin_name in certificate_provider: %v", err)
}
} else {
log.Warnf("did not find plugin_name in certificate_provider")
}
if configVal, ok := dat["config"]; ok {
var err error
switch cp.PluginName {
case FileWatcherCertProviderName:
config := FileWatcherCertProviderConfig{}
err = json.Unmarshal(*configVal, &config)
cp.Config = config
default:
config := FileWatcherCertProviderConfig{}
err = json.Unmarshal(*configVal, &config)
cp.Config = config
}
if err != nil {
log.Warnf("failed parsing config in certificate_provider: %v", err)
}
} else {
log.Warnf("did not find config in certificate_provider")
}
return nil
}
const FileWatcherCertProviderName = "file_watcher"
type FileWatcherCertProviderConfig struct {
CertificateFile string `json:"certificate_file,omitempty"`
PrivateKeyFile string `json:"private_key_file,omitempty"`
CACertificateFile string `json:"ca_certificate_file,omitempty"`
RefreshDuration json.RawMessage `json:"refresh_interval,omitempty"`
}
func (c *FileWatcherCertProviderConfig) FilePaths() []string {
return []string{c.CertificateFile, c.PrivateKeyFile, c.CACertificateFile}
}
// FileWatcherProvider returns the FileWatcherCertProviderConfig if one exists in CertProviders
func (b *Bootstrap) FileWatcherProvider() *FileWatcherCertProviderConfig {
if b == nil || b.CertProviders == nil {
return nil
}
for _, provider := range b.CertProviders {
if provider.PluginName == FileWatcherCertProviderName {
cfg, ok := provider.Config.(FileWatcherCertProviderConfig)
if !ok {
return nil
}
return &cfg
}
}
return nil
}
// LoadBootstrap loads a Bootstrap from the given file path.
func LoadBootstrap(file string) (*Bootstrap, error) {
data, err := os.ReadFile(file)
if err != nil {
return nil, err
}
b := &Bootstrap{}
if err := json.Unmarshal(data, b); err != nil {
return nil, err
}
return b, err
}
type GenerateBootstrapOptions struct {
Node *model.Node
XdsUdsPath string
DiscoveryAddress string
CertDir string
}
// GenerateBootstrap generates the bootstrap structure for gRPC XDS integration.
func GenerateBootstrap(opts GenerateBootstrapOptions) (*Bootstrap, error) {
xdsMeta, err := extractMeta(opts.Node)
if err != nil {
return nil, fmt.Errorf("failed extracting xds metadata: %v", err)
}
// TODO direct to CP should use secure channel (most likely JWT + TLS, but possibly allow mTLS)
serverURI := opts.DiscoveryAddress
if opts.XdsUdsPath != "" {
serverURI = fmt.Sprintf("unix:///%s", opts.XdsUdsPath)
}
bootstrap := Bootstrap{
XDSServers: []XdsServer{{
ServerURI: serverURI,
// connect locally via agent
ChannelCreds: []ChannelCreds{{Type: "insecure"}},
ServerFeatures: []string{"xds_v3"},
}},
Node: &core.Node{
Id: opts.Node.ID,
Locality: opts.Node.Locality,
Metadata: xdsMeta,
},
ServerListenerNameTemplate: ServerListenerNameTemplate,
}
if opts.CertDir != "" {
// TODO use a more appropriate interval
refresh, err := protomarshal.Marshal(durationpb.New(15 * time.Minute))
if err != nil {
return nil, err
}
bootstrap.CertProviders = map[string]CertificateProvider{
"default": {
PluginName: "file_watcher",
Config: FileWatcherCertProviderConfig{
PrivateKeyFile: path.Join(opts.CertDir, "key.pem"),
CertificateFile: path.Join(opts.CertDir, "cert-chain.pem"),
CACertificateFile: path.Join(opts.CertDir, "root-cert.pem"),
RefreshDuration: refresh,
},
},
}
}
return &bootstrap, err
}
func extractMeta(node *model.Node) (*structpb.Struct, error) {
bytes, err := json.Marshal(node.Metadata)
if err != nil {
return nil, err
}
rawMeta := map[string]any{}
if err := json.Unmarshal(bytes, &rawMeta); err != nil {
return nil, err
}
xdsMeta, err := structpb.NewStruct(rawMeta)
if err != nil {
return nil, err
}
return xdsMeta, nil
}
// GenerateBootstrapFile generates and writes atomically as JSON to the given file path.
func GenerateBootstrapFile(opts GenerateBootstrapOptions, path string) (*Bootstrap, error) {
bootstrap, err := GenerateBootstrap(opts)
if err != nil {
return nil, err
}
jsonData, err := json.MarshalIndent(bootstrap, "", " ")
if err != nil {
return nil, err
}
if err := file.AtomicWrite(path, jsonData, os.FileMode(0o644)); err != nil {
return nil, fmt.Errorf("failed writing to %s: %v", path, err)
}
return bootstrap, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jwt
const (
PolicyThirdParty = "third-party-jwt"
PolicyFirstParty = "first-party-jwt"
)
type JwksFetchMode int
const (
// Istiod is used to indicate Istiod ALWAYS fetches the JWKs server
Istiod JwksFetchMode = iota
// Hybrid is used to indicate Envoy fetches the JWKs server when there is a cluster entry,
// otherwise fallback to Istiod
Hybrid
// Envoy is used to indicate Envoy ALWAYS fetches the JWKs server
Envoy
)
// String converts JwksFetchMode to readable string.
func (mode JwksFetchMode) String() string {
switch mode {
case Istiod:
return "Istiod"
case Hybrid:
return "Hybrid"
case Envoy:
return "Envoy"
default:
return "Unset"
}
}
// ConvertToJwksFetchMode converts from string value mode to enum JwksFetchMode value.
// true and false are kept for backwards compatibility.
func ConvertToJwksFetchMode(mode string) JwksFetchMode {
switch mode {
case "istiod", "false":
return Istiod
case "hybrid", "true":
return Hybrid
case "envoy":
return Envoy
default:
return Istiod
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jwt
import (
"strings"
)
// HeaderJWTClaim is the special header name used in virtual service for routing based on JWT claims.
const HeaderJWTClaim = "@request.auth.claims"
type Separator int
const (
Dot Separator = iota
Square
)
type RoutingClaim struct {
Match bool
Separator Separator
Claims []string
}
func ToRoutingClaim(headerName string) RoutingClaim {
rc := RoutingClaim{}
if !strings.HasPrefix(strings.ToLower(headerName), HeaderJWTClaim) {
return rc
}
name := headerName[len(HeaderJWTClaim):]
if strings.HasPrefix(name, ".") && len(name) > 1 {
// using `.` as a separator
rc.Match = true
rc.Separator = Dot
rc.Claims = strings.Split(name[1:], ".")
} else if strings.HasPrefix(name, "[") && strings.HasSuffix(name, "]") && len(name) > 2 {
// using `[]` as a separator
rc.Match = true
rc.Separator = Square
rc.Claims = strings.Split(name[1:len(name)-1], "][")
}
return rc
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package keepalive
import (
"math"
"time"
"github.com/spf13/cobra"
"google.golang.org/grpc"
"google.golang.org/grpc/keepalive"
"istio.io/istio/pkg/env"
)
const (
// Infinity is the maximum possible duration for keepalive values
Infinity = time.Duration(math.MaxInt64)
)
var (
// grpcKeepaliveInterval sets the gRPC KeepAlive Interval
grpcKeepaliveInterval = env.Register("GRPC_KEEPALIVE_INTERVAL", 30*time.Second, "gRPC Keepalive Interval").Get()
// grpcKeepAliveTimeout sets the gRPC KeepAlive Timeout
grpcKeepaliveTimeout = env.Register("GRPC_KEEPALIVE_TIMEOUT", 10*time.Second, "gRPC Keepalive Timeout").Get()
)
// Options defines the set of options used for grpc keepalive.
// The Time and Timeout options are used for both client and server connections,
// whereas MaxServerConnectionAge* options are applicable on the server side only
// (as implied by the options' name...)
type Options struct {
// After a duration of this time if the server/client doesn't see any activity it pings the peer to see if the transport is still alive.
Time time.Duration
// After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that
// the connection is closed.
Timeout time.Duration
// MaxServerConnectionAge is a duration for the maximum amount of time a
// connection may exist before it will be closed by the server sending a GoAway.
// A random jitter is added to spread out connection storms.
// See https://github.com/grpc/grpc-go/blob/bd0b3b2aa2a9c87b323ee812359b0e9cda680dad/keepalive/keepalive.go#L49
MaxServerConnectionAge time.Duration // default value is infinity
// MaxServerConnectionAgeGrace is an additive period after MaxServerConnectionAge
// after which the connection will be forcibly closed by the server.
MaxServerConnectionAgeGrace time.Duration // default value 10s
}
// ConvertToClientOption convert Options to grpc client option used for grpc keepalive.
func (o *Options) ConvertToClientOption() grpc.DialOption {
return grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: o.Time,
Timeout: o.Timeout,
})
}
// DefaultOption returns the default keepalive options.
func DefaultOption() *Options {
return &Options{
Time: grpcKeepaliveInterval,
Timeout: grpcKeepaliveTimeout,
MaxServerConnectionAge: Infinity,
MaxServerConnectionAgeGrace: 10 * time.Second,
}
}
// AttachCobraFlags attaches a set of Cobra flags to the given Cobra command.
//
// Cobra is the command-line processor that Istio uses. This command attaches
// the necessary set of flags to configure the grpc keepalive options.
func (o *Options) AttachCobraFlags(cmd *cobra.Command) {
cmd.PersistentFlags().DurationVar(&o.Time, "keepaliveInterval", o.Time,
"The time interval if no activity on the connection it pings the peer to see if the transport is alive")
cmd.PersistentFlags().DurationVar(&o.Timeout, "keepaliveTimeout", o.Timeout,
"After having pinged for keepalive check, the client/server waits for a duration of keepaliveTimeout "+
"and if no activity is seen even after that the connection is closed.")
cmd.PersistentFlags().DurationVar(&o.MaxServerConnectionAge, "keepaliveMaxServerConnectionAge",
o.MaxServerConnectionAge, "Maximum duration a connection will be kept open on the server before a graceful close.")
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"fmt"
admissionv1 "k8s.io/api/admission/v1"
kubeApiAdmissionv1beta1 "k8s.io/api/admission/v1beta1"
authenticationv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
)
const (
// APIVersion constants
admissionAPIV1 = "admission.k8s.io/v1"
admissionAPIV1beta1 = "admission.k8s.io/v1beta1"
// Operation constants
Create string = "CREATE"
Update string = "UPDATE"
Delete string = "DELETE"
Connect string = "CONNECT"
)
// AdmissionReview describes an admission review request/response.
type AdmissionReview struct {
// TypeMeta describes an individual object in an API response or request
// with strings representing the type of the object and its API schema version.
// Structures that are versioned or persisted should inline TypeMeta.
metav1.TypeMeta
// Request describes the attributes for the admission request.
Request *AdmissionRequest `json:"request,omitempty"`
// Response describes the attributes for the admission response.
Response *AdmissionResponse `json:"response,omitempty"`
}
// AdmissionRequest describes the admission.Attributes for the admission request.
type AdmissionRequest struct {
// UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
// otherwise identical (parallel requests, requests when earlier requests did not modify etc)
// The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
// It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
UID types.UID `json:"uid"`
// Kind is the fully-qualified type of object being submitted (for example, v1.Pod or autoscaling.v1.Scale)
Kind metav1.GroupVersionKind `json:"kind"`
// Resource is the fully-qualified resource being requested (for example, v1.pods)
Resource metav1.GroupVersionResource `json:"resource"`
// SubResource is the subresource being requested, if any (for example, "status" or "scale")
SubResource string `json:"subResource,omitempty"`
// RequestKind is the fully-qualified type of the original API request (for example, v1.Pod or autoscaling.v1.Scale).
// If this is specified and differs from the value in "kind", an equivalent match and conversion was performed.
//
// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
// `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
// an API request to apps/v1beta1 deployments would be converted and sent to the webhook
// with `kind: {group:"apps", version:"v1", kind:"Deployment"}` (matching the rule the webhook registered for),
// and `requestKind: {group:"apps", version:"v1beta1", kind:"Deployment"}` (indicating the kind of the original API request).
//
RequestKind *metav1.GroupVersionKind `json:"requestKind,omitempty"`
// RequestResource is the fully-qualified resource of the original API request (for example, v1.pods).
// If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
//
// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
// `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
// an API request to apps/v1beta1 deployments would be converted and sent to the webhook
// with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for),
// and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request).
//
RequestResource *metav1.GroupVersionResource `json:"requestResource,omitempty"`
// RequestSubResource is the name of the subresource of the original API request, if any (for example, "status" or "scale")
// If this is specified and differs from the value in "subResource", an equivalent match and conversion was performed.
RequestSubResource string `json:"requestSubResource,omitempty"`
// UserInfo is information about the requesting user
UserInfo authenticationv1.UserInfo `json:"userInfo"`
// Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
// rely on the server to generate the name. If that is the case, this field will contain an empty string.
Name string `json:"name,omitempty"`
// Namespace is the namespace associated with the request (if any).
Namespace string `json:"namespace,omitempty"`
// Operation is the operation being performed. This may be different than the operation
// requested. e.g. a patch can result in either a CREATE or UPDATE Operation.
Operation string `json:"operation"`
// Object is the object from the incoming request.
Object runtime.RawExtension `json:"object,omitempty"`
// OldObject is the existing object. Only populated for DELETE and UPDATE requests.
OldObject runtime.RawExtension `json:"oldObject,omitempty"`
// DryRun indicates that modifications will definitely not be persisted for this request.
// Defaults to false.
DryRun *bool `json:"dryRun,omitempty"`
// Options is the operation option structure of the operation being performed.
// e.g. `meta.k8s.io/v1.DeleteOptions` or `meta.k8s.io/v1.CreateOptions`. This may be
// different than the options the caller provided. e.g. for a patch request the performed
// Operation might be a CREATE, in which case the Options will a
// `meta.k8s.io/v1.CreateOptions` even though the caller provided `meta.k8s.io/v1.PatchOptions`.
Options runtime.RawExtension `json:"options,omitempty"`
}
// AdmissionResponse describes an admission response.
type AdmissionResponse struct {
// UID is an identifier for the individual request/response.
// This should be copied over from the corresponding AdmissionRequest.
UID types.UID `json:"uid"`
// Allowed indicates whether or not the admission request was permitted.
Allowed bool `json:"allowed"`
// Result contains extra details into why an admission request was denied.
// This field IS NOT consulted in any way if "Allowed" is "true".
Result *metav1.Status `json:"status,omitempty"`
// The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
Patch []byte `json:"patch,omitempty"`
// The type of Patch. Currently we only allow "JSONPatch".
PatchType *string `json:"patchType,omitempty"`
// AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
// MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
// admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
// the admission webhook to add additional context to the audit log for this request.
AuditAnnotations map[string]string `json:"auditAnnotations,omitempty"`
// warnings is a list of warning messages to return to the requesting API client.
// Warning messages describe a problem the client making the API request should correct or be aware of.
// Limit warnings to 120 characters if possible.
// Warnings over 256 characters and large numbers of warnings may be truncated.
Warnings []string `json:"warnings,omitempty"`
}
func AdmissionReviewKubeToAdapter(object runtime.Object) (*AdmissionReview, error) {
var typeMeta metav1.TypeMeta
var req *AdmissionRequest
var resp *AdmissionResponse
switch obj := object.(type) {
case *kubeApiAdmissionv1beta1.AdmissionReview:
typeMeta = obj.TypeMeta
arv1beta1Response := obj.Response
arv1beta1Request := obj.Request
if arv1beta1Response != nil {
resp = &AdmissionResponse{
UID: arv1beta1Response.UID,
Allowed: arv1beta1Response.Allowed,
Result: arv1beta1Response.Result,
Patch: arv1beta1Response.Patch,
Warnings: arv1beta1Response.Warnings,
}
if arv1beta1Response.PatchType != nil {
patchType := string(*arv1beta1Response.PatchType)
resp.PatchType = &patchType
}
}
if arv1beta1Request != nil {
req = &AdmissionRequest{
UID: arv1beta1Request.UID,
Kind: arv1beta1Request.Kind,
Resource: arv1beta1Request.Resource,
UserInfo: arv1beta1Request.UserInfo,
Name: arv1beta1Request.Name,
Namespace: arv1beta1Request.Namespace,
Operation: string(arv1beta1Request.Operation),
Object: arv1beta1Request.Object,
OldObject: arv1beta1Request.OldObject,
DryRun: arv1beta1Request.DryRun,
}
}
case *admissionv1.AdmissionReview:
typeMeta = obj.TypeMeta
arv1Response := obj.Response
arv1Request := obj.Request
if arv1Response != nil {
resp = &AdmissionResponse{
UID: arv1Response.UID,
Allowed: arv1Response.Allowed,
Result: arv1Response.Result,
Patch: arv1Response.Patch,
Warnings: arv1Response.Warnings,
}
if arv1Response.PatchType != nil {
patchType := string(*arv1Response.PatchType)
resp.PatchType = &patchType
}
}
if arv1Request != nil {
req = &AdmissionRequest{
UID: arv1Request.UID,
Kind: arv1Request.Kind,
Resource: arv1Request.Resource,
UserInfo: arv1Request.UserInfo,
Name: arv1Request.Name,
Namespace: arv1Request.Namespace,
Operation: string(arv1Request.Operation),
Object: arv1Request.Object,
OldObject: arv1Request.OldObject,
DryRun: arv1Request.DryRun,
}
}
default:
return nil, fmt.Errorf("unsupported type :%v", object.GetObjectKind())
}
return &AdmissionReview{
TypeMeta: typeMeta,
Request: req,
Response: resp,
}, nil
}
func AdmissionReviewAdapterToKube(ar *AdmissionReview, apiVersion string) runtime.Object {
var res runtime.Object
arRequest := ar.Request
arResponse := ar.Response
if apiVersion == "" {
apiVersion = admissionAPIV1beta1
}
switch apiVersion {
case admissionAPIV1beta1:
arv1beta1 := kubeApiAdmissionv1beta1.AdmissionReview{}
if arRequest != nil {
arv1beta1.Request = &kubeApiAdmissionv1beta1.AdmissionRequest{
UID: arRequest.UID,
Kind: arRequest.Kind,
Resource: arRequest.Resource,
SubResource: arRequest.SubResource,
Name: arRequest.Name,
Namespace: arRequest.Namespace,
RequestKind: arRequest.RequestKind,
RequestResource: arRequest.RequestResource,
RequestSubResource: arRequest.RequestSubResource,
Operation: kubeApiAdmissionv1beta1.Operation(arRequest.Operation),
UserInfo: arRequest.UserInfo,
Object: arRequest.Object,
OldObject: arRequest.OldObject,
DryRun: arRequest.DryRun,
Options: arRequest.Options,
}
}
if arResponse != nil {
var patchType *kubeApiAdmissionv1beta1.PatchType
if arResponse.PatchType != nil {
patchType = (*kubeApiAdmissionv1beta1.PatchType)(arResponse.PatchType)
}
arv1beta1.Response = &kubeApiAdmissionv1beta1.AdmissionResponse{
UID: arResponse.UID,
Allowed: arResponse.Allowed,
Result: arResponse.Result,
Patch: arResponse.Patch,
PatchType: patchType,
AuditAnnotations: arResponse.AuditAnnotations,
Warnings: arResponse.Warnings,
}
}
arv1beta1.TypeMeta = ar.TypeMeta
res = &arv1beta1
case admissionAPIV1:
arv1 := admissionv1.AdmissionReview{}
if arRequest != nil {
arv1.Request = &admissionv1.AdmissionRequest{
UID: arRequest.UID,
Kind: arRequest.Kind,
Resource: arRequest.Resource,
SubResource: arRequest.SubResource,
Name: arRequest.Name,
Namespace: arRequest.Namespace,
RequestKind: arRequest.RequestKind,
RequestResource: arRequest.RequestResource,
RequestSubResource: arRequest.RequestSubResource,
Operation: admissionv1.Operation(arRequest.Operation),
UserInfo: arRequest.UserInfo,
Object: arRequest.Object,
OldObject: arRequest.OldObject,
DryRun: arRequest.DryRun,
Options: arRequest.Options,
}
}
if arResponse != nil {
var patchType *admissionv1.PatchType
if arResponse.PatchType != nil {
patchType = (*admissionv1.PatchType)(arResponse.PatchType)
}
arv1.Response = &admissionv1.AdmissionResponse{
UID: arResponse.UID,
Allowed: arResponse.Allowed,
Result: arResponse.Result,
Patch: arResponse.Patch,
PatchType: patchType,
AuditAnnotations: arResponse.AuditAnnotations,
Warnings: arResponse.Warnings,
}
}
arv1.TypeMeta = ar.TypeMeta
res = &arv1
}
return res
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-multierror"
io2 "github.com/AdamKorcz/bugdetectors/io"
"go.uber.org/atomic"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/credentials"
v1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
kubeExtClient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
extfake "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/fake"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/runtime/serializer/yaml"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
kubeVersion "k8s.io/apimachinery/pkg/version"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/dynamic"
dynamicfake "k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
kubescheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/metadata"
metadatafake "k8s.io/client-go/metadata/fake"
"k8s.io/client-go/rest"
clienttesting "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/remotecommand"
gatewayapiv1 "sigs.k8s.io/gateway-api/apis/v1"
gatewayapi "sigs.k8s.io/gateway-api/apis/v1alpha2"
gatewayapibeta "sigs.k8s.io/gateway-api/apis/v1beta1"
gatewayapiclient "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned"
gatewayapifake "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned/fake"
"istio.io/api/annotation"
"istio.io/api/label"
clientextensions "istio.io/client-go/pkg/apis/extensions/v1alpha1"
clientnetworkingalpha "istio.io/client-go/pkg/apis/networking/v1alpha3"
clientnetworkingbeta "istio.io/client-go/pkg/apis/networking/v1beta1"
clientsecurity "istio.io/client-go/pkg/apis/security/v1beta1"
clienttelemetry "istio.io/client-go/pkg/apis/telemetry/v1alpha1"
istioclient "istio.io/client-go/pkg/clientset/versioned"
istiofake "istio.io/client-go/pkg/clientset/versioned/fake"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/kube/informerfactory"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/kube/mcs"
"istio.io/istio/pkg/lazy"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/sleep"
"istio.io/istio/pkg/test/util/yml"
"istio.io/istio/pkg/version"
)
const (
defaultLocalAddress = "localhost"
RunningStatus = "status.phase=Running"
)
// Client is a helper for common Kubernetes client operations. This contains various different kubernetes
// clients using a shared config. It is expected that all of Istiod can share the same set of clients and
// informers. Sharing informers is especially important for load on the API server/Istiod itself.
type Client interface {
// RESTConfig returns the Kubernetes rest.Config used to configure the clients.
RESTConfig() *rest.Config
// Ext returns the API extensions client.
Ext() kubeExtClient.Interface
// Kube returns the core kube client
Kube() kubernetes.Interface
// Dynamic client.
Dynamic() dynamic.Interface
// Metadata returns the Metadata kube client.
Metadata() metadata.Interface
// Istio returns the Istio kube client.
Istio() istioclient.Interface
// GatewayAPI returns the gateway-api kube client.
GatewayAPI() gatewayapiclient.Interface
// Informers returns an informer factory
Informers() informerfactory.InformerFactory
// CrdWatcher returns the CRD watcher for this client
CrdWatcher() kubetypes.CrdWatcher
// RunAndWait starts all informers and waits for their caches to sync.
// Warning: this must be called AFTER .Informer() is called, which will register the informer.
RunAndWait(stop <-chan struct{})
// WaitForCacheSync waits for all cache functions to sync, as well as all informers started by the *fake* client.
WaitForCacheSync(name string, stop <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool
// GetKubernetesVersion returns the Kubernetes server version
GetKubernetesVersion() (*kubeVersion.Info, error)
// Shutdown closes all informers and waits for them to terminate
Shutdown()
// ClusterID returns the cluster this client is connected to
ClusterID() cluster.ID
}
// CLIClient is an extended client with additional helpers/functionality for Istioctl and testing.
// CLIClient is not appropriate for controllers, as it does a number of highly privileged or highly risky operations
// such as `exec`, `port-forward`, etc.
type CLIClient interface {
Client
// Revision of the Istio control plane.
Revision() string
// EnvoyDo makes a http request to the Envoy in the specified pod.
EnvoyDo(ctx context.Context, podName, podNamespace, method, path string) ([]byte, error)
// EnvoyDoWithPort makes a http request to the Envoy in the specified pod and port.
EnvoyDoWithPort(ctx context.Context, podName, podNamespace, method, path string, port int) ([]byte, error)
// AllDiscoveryDo makes a http request to each Istio discovery instance.
AllDiscoveryDo(ctx context.Context, namespace, path string) (map[string][]byte, error)
// GetIstioVersions gets the version for each Istio control plane component.
GetIstioVersions(ctx context.Context, namespace string) (*version.MeshInfo, error)
// PodsForSelector finds pods matching selector.
PodsForSelector(ctx context.Context, namespace string, labelSelectors ...string) (*v1.PodList, error)
// GetIstioPods retrieves the pod objects for Istio deployments
GetIstioPods(ctx context.Context, namespace string, opts metav1.ListOptions) ([]v1.Pod, error)
// GetProxyPods retrieves all the proxy pod objects: sidecar injected pods and gateway pods.
GetProxyPods(ctx context.Context, limit int64, token string) (*v1.PodList, error)
// PodExecCommands takes a list of commands and the pod data to run the commands in the specified pod.
PodExecCommands(podName, podNamespace, container string, commands []string) (stdout string, stderr string, err error)
// PodExec takes a command and the pod data to run the command in the specified pod.
PodExec(podName, podNamespace, container string, command string) (stdout string, stderr string, err error)
// PodLogs retrieves the logs for the given pod.
PodLogs(ctx context.Context, podName string, podNamespace string, container string, previousLog bool) (string, error)
// NewPortForwarder creates a new PortForwarder configured for the given pod. If localPort=0, a port will be
// dynamically selected. If localAddress is empty, "localhost" is used.
NewPortForwarder(podName string, ns string, localAddress string, localPort int, podPort int) (PortForwarder, error)
// ApplyYAMLFiles applies the resources in the given YAML files.
ApplyYAMLFiles(namespace string, yamlFiles ...string) error
// ApplyYAMLContents applies the resources in the given YAML strings.
ApplyYAMLContents(namespace string, yamls ...string) error
// ApplyYAMLFilesDryRun performs a dry run for applying the resource in the given YAML files
ApplyYAMLFilesDryRun(namespace string, yamlFiles ...string) error
// DeleteYAMLFiles deletes the resources in the given YAML files.
DeleteYAMLFiles(namespace string, yamlFiles ...string) error
// DeleteYAMLFilesDryRun performs a dry run for deleting the resources in the given YAML files.
DeleteYAMLFilesDryRun(namespace string, yamlFiles ...string) error
// CreatePerRPCCredentials creates a gRPC bearer token provider that can create (and renew!) Istio tokens
CreatePerRPCCredentials(ctx context.Context, tokenNamespace, tokenServiceAccount string, audiences []string,
expirationSeconds int64) (credentials.PerRPCCredentials, error)
// UtilFactory returns a kubectl factory
UtilFactory() PartialFactory
// InvalidateDiscovery invalidates the discovery client, useful after manually changing CRD's
InvalidateDiscovery()
}
type PortManager func() (uint16, error)
var (
_ Client = &client{}
_ CLIClient = &client{}
)
// NewFakeClient creates a new, fake, client
func NewFakeClient(objects ...runtime.Object) CLIClient {
c := &client{
informerWatchesPending: atomic.NewInt32(0),
clusterID: "fake",
}
c.kube = fake.NewSimpleClientset(objects...)
c.config = &rest.Config{
Host: "server",
}
c.informerFactory = informerfactory.NewSharedInformerFactory()
s := FakeIstioScheme
c.metadata = metadatafake.NewSimpleMetadataClient(s)
c.dynamic = dynamicfake.NewSimpleDynamicClient(s)
c.istio = istiofake.NewSimpleClientset()
c.gatewayapi = gatewayapifake.NewSimpleClientset()
c.extSet = extfake.NewSimpleClientset()
// https://github.com/kubernetes/kubernetes/issues/95372
// There is a race condition in the client fakes, where events that happen between the List and Watch
// of an informer are dropped. To avoid this, we explicitly manage the list and watch, ensuring all lists
// have an associated watch before continuing.
// This would likely break any direct calls to List(), but for now our tests don't do that anyways. If we need
// to in the future we will need to identify the Lists that have a corresponding Watch, possibly by looking
// at created Informers
// an atomic.Int is used instead of sync.WaitGroup because wg.Add and wg.Wait cannot be called concurrently
listReactor := func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
c.informerWatchesPending.Inc()
return false, nil, nil
}
watchReactor := func(tracker clienttesting.ObjectTracker) func(action clienttesting.Action) (handled bool, ret watch.Interface, err error) {
return func(action clienttesting.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := tracker.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
c.informerWatchesPending.Dec()
return true, watch, nil
}
}
// https://github.com/kubernetes/client-go/issues/439
createReactor := func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) {
ret = action.(clienttesting.CreateAction).GetObject()
meta, ok := ret.(metav1.Object)
if !ok {
return
}
if meta.GetName() == "" && meta.GetGenerateName() != "" {
meta.SetName(names.SimpleNameGenerator.GenerateName(meta.GetGenerateName()))
}
return
}
for _, fc := range []fakeClient{
c.kube.(*fake.Clientset),
c.istio.(*istiofake.Clientset),
c.gatewayapi.(*gatewayapifake.Clientset),
c.dynamic.(*dynamicfake.FakeDynamicClient),
c.metadata.(*metadatafake.FakeMetadataClient),
} {
fc.PrependReactor("list", "*", listReactor)
fc.PrependWatchReactor("*", watchReactor(fc.Tracker()))
fc.PrependReactor("create", "*", createReactor)
}
c.fastSync = true
c.version = lazy.NewWithRetry(c.kube.Discovery().ServerVersion)
if NewCrdWatcher != nil {
c.crdWatcher = NewCrdWatcher(c)
}
return c
}
func NewFakeClientWithVersion(minor string, objects ...runtime.Object) CLIClient {
c := NewFakeClient(objects...).(*client)
c.Kube().Discovery().(*fakediscovery.FakeDiscovery).FakedServerVersion = &kubeVersion.Info{Major: "1", Minor: minor, GitVersion: fmt.Sprintf("v1.%v.0", minor)}
return c
}
type fakeClient interface {
PrependReactor(verb, resource string, reaction clienttesting.ReactionFunc)
PrependWatchReactor(resource string, reaction clienttesting.WatchReactionFunc)
Tracker() clienttesting.ObjectTracker
}
// Client is a helper wrapper around the Kube RESTClient for istioctl -> Pilot/Envoy/Mesh related things
type client struct {
clientFactory *clientFactory
config *rest.Config
clusterID cluster.ID
informerFactory informerfactory.InformerFactory
extSet kubeExtClient.Interface
kube kubernetes.Interface
dynamic dynamic.Interface
metadata metadata.Interface
istio istioclient.Interface
gatewayapi gatewayapiclient.Interface
started atomic.Bool
// If enabled, will wait for cache syncs with extremely short delay. This should be used only for tests
fastSync bool
informerWatchesPending *atomic.Int32
// These may be set only when creating an extended client.
revision string
restClient *rest.RESTClient
discoveryClient discovery.CachedDiscoveryInterface
mapper meta.ResettableRESTMapper
version lazy.Lazy[*kubeVersion.Info]
crdWatcher kubetypes.CrdWatcher
// http is a client for HTTP requests
http *http.Client
}
// newClientInternal creates a Kubernetes client from the given factory.
func newClientInternal(clientFactory *clientFactory, revision string, cluster cluster.ID) (*client, error) {
var c client
var err error
c.clientFactory = clientFactory
c.config, err = clientFactory.ToRESTConfig()
if err != nil {
return nil, err
}
c.clusterID = cluster
c.revision = revision
c.restClient, err = clientFactory.RESTClient()
if err != nil {
return nil, err
}
c.discoveryClient, err = clientFactory.ToDiscoveryClient()
if err != nil {
return nil, err
}
c.mapper, err = clientFactory.mapper.Get()
if err != nil {
return nil, err
}
c.informerFactory = informerfactory.NewSharedInformerFactory()
c.kube, err = kubernetes.NewForConfig(c.config)
if err != nil {
return nil, err
}
c.metadata, err = metadata.NewForConfig(c.config)
if err != nil {
return nil, err
}
c.dynamic, err = dynamic.NewForConfig(c.config)
if err != nil {
return nil, err
}
c.istio, err = istioclient.NewForConfig(c.config)
if err != nil {
return nil, err
}
c.gatewayapi, err = gatewayapiclient.NewForConfig(c.config)
if err != nil {
return nil, err
}
c.extSet, err = kubeExtClient.NewForConfig(c.config)
if err != nil {
return nil, err
}
c.http = &http.Client{
Timeout: time.Second * 15,
}
var clientWithTimeout kubernetes.Interface
clientWithTimeout = c.kube
restConfig := c.RESTConfig()
if restConfig != nil {
restConfig.Timeout = time.Second * 5
kubeClient, err := kubernetes.NewForConfig(restConfig)
if err == nil {
clientWithTimeout = kubeClient
}
}
c.version = lazy.NewWithRetry(clientWithTimeout.Discovery().ServerVersion)
return &c, nil
}
// EnableCrdWatcher enables the CRD watcher on the client.
func EnableCrdWatcher(c Client) Client {
if NewCrdWatcher == nil {
panic("NewCrdWatcher is unset. Likely the crd watcher library is not imported anywhere")
}
if c.(*client).crdWatcher != nil {
panic("EnableCrdWatcher called twice for the same client")
}
c.(*client).crdWatcher = NewCrdWatcher(c)
return c
}
var NewCrdWatcher func(Client) kubetypes.CrdWatcher
// NewDefaultClient returns a default client, using standard Kubernetes config resolution to determine
// the cluster to access.
func NewDefaultClient() (Client, error) {
return NewClient(BuildClientCmd("", ""), "")
}
// NewCLIClient creates a Kubernetes client from the given ClientConfig. The "revision" parameter
// controls the behavior of GetIstioPods, by selecting a specific revision of the control plane.
// This is appropriate for use in CLI libraries because it exposes functionality unsafe for in-cluster controllers,
// and uses standard CLI (kubectl) caching.
func NewCLIClient(clientConfig clientcmd.ClientConfig, revision string) (CLIClient, error) {
return newClientInternal(newClientFactory(clientConfig, true), revision, "")
}
// NewClient creates a Kubernetes client from the given rest config.
func NewClient(clientConfig clientcmd.ClientConfig, cluster cluster.ID) (Client, error) {
return newClientInternal(newClientFactory(clientConfig, false), "", cluster)
}
func (c *client) RESTConfig() *rest.Config {
if c.config == nil {
return nil
}
cpy := *c.config
return &cpy
}
func (c *client) Ext() kubeExtClient.Interface {
return c.extSet
}
func (c *client) Dynamic() dynamic.Interface {
return c.dynamic
}
func (c *client) Kube() kubernetes.Interface {
return c.kube
}
func (c *client) Metadata() metadata.Interface {
return c.metadata
}
func (c *client) Istio() istioclient.Interface {
return c.istio
}
func (c *client) GatewayAPI() gatewayapiclient.Interface {
return c.gatewayapi
}
func (c *client) Informers() informerfactory.InformerFactory {
return c.informerFactory
}
func (c *client) CrdWatcher() kubetypes.CrdWatcher {
return c.crdWatcher
}
// RunAndWait starts all informers and waits for their caches to sync.
// Warning: this must be called AFTER .Informer() is called, which will register the informer.
func (c *client) RunAndWait(stop <-chan struct{}) {
c.Run(stop)
if c.fastSync {
if c.crdWatcher != nil {
c.WaitForCacheSync("crd watcher", stop, c.crdWatcher.HasSynced)
}
// WaitForCacheSync will virtually never be synced on the first call, as its called immediately after Start()
// This triggers a 100ms delay per call, which is often called 2-3 times in a test, delaying tests.
// Instead, we add an aggressive sync polling
fastWaitForCacheSync(stop, c.informerFactory)
_ = wait.PollUntilContextTimeout(context.Background(), time.Microsecond*100, wait.ForeverTestTimeout, true, func(ctx context.Context) (bool, error) {
select {
case <-stop:
return false, fmt.Errorf("channel closed")
default:
}
if c.informerWatchesPending.Load() == 0 {
return true, nil
}
return false, nil
})
} else {
if c.crdWatcher != nil {
c.WaitForCacheSync("crd watcher", stop, c.crdWatcher.HasSynced)
}
c.informerFactory.WaitForCacheSync(stop)
}
}
func (c *client) Shutdown() {
c.informerFactory.Shutdown()
}
func (c *client) Run(stop <-chan struct{}) {
c.informerFactory.Start(stop)
if c.crdWatcher != nil {
go c.crdWatcher.Run(stop)
}
alreadyStarted := c.started.Swap(true)
if alreadyStarted {
log.Debugf("cluster %q kube client started again", c.clusterID)
} else {
log.Infof("cluster %q kube client started", c.clusterID)
}
}
func (c *client) GetKubernetesVersion() (*kubeVersion.Info, error) {
return c.version.Get()
}
func (c *client) ClusterID() cluster.ID {
return c.clusterID
}
// Wait for cache sync immediately, rather than with 100ms delay which slows tests
// See https://github.com/kubernetes/kubernetes/issues/95262#issuecomment-703141573
func fastWaitForCacheSync(stop <-chan struct{}, informerFactory informerfactory.InformerFactory) {
returnImmediately := make(chan struct{})
close(returnImmediately)
_ = wait.PollUntilContextTimeout(context.Background(), time.Microsecond*100, wait.ForeverTestTimeout, true, func(context.Context) (bool, error) {
select {
case <-stop:
return false, fmt.Errorf("channel closed")
default:
}
return informerFactory.WaitForCacheSync(returnImmediately), nil
})
}
// WaitForCacheSync waits until all caches are synced. This will return true only if things synced
// successfully before the stop channel is closed. This function also lives in the Kubernetes cache
// library. However, that library will poll with 100ms fixed interval. Often the cache syncs in a few
// ms, but we are delayed a full 100ms. This is especially apparent in tests, which previously spent
// most of their time just in the 100ms wait interval.
//
// To optimize this, this function performs exponential backoff. This is generally safe because
// cache.InformerSynced functions are ~always quick to run. However, if the sync functions do perform
// expensive checks this function may not be suitable.
func WaitForCacheSync(name string, stop <-chan struct{}, cacheSyncs ...cache.InformerSynced) (r bool) {
t0 := time.Now()
max := time.Millisecond * 100
delay := time.Millisecond
f := func() bool {
for _, syncFunc := range cacheSyncs {
if !syncFunc() {
return false
}
}
return true
}
attempt := 0
defer func() {
if r {
log.WithLabels("name", name, "attempt", attempt, "time", time.Since(t0)).Debugf("sync complete")
} else {
log.WithLabels("name", name, "attempt", attempt, "time", time.Since(t0)).Errorf("sync failed")
}
}()
for {
select {
case <-stop:
return false
default:
}
attempt++
res := f()
if res {
return true
}
delay *= 2
if delay > max {
delay = max
}
log.WithLabels("name", name, "attempt", attempt, "time", time.Since(t0)).Debugf("waiting for sync...")
if attempt%50 == 0 {
// Log every 50th attempt (5s) at info, to avoid too much noisy
log.WithLabels("name", name, "attempt", attempt, "time", time.Since(t0)).Infof("waiting for sync...")
}
if !sleep.Until(stop, delay) {
return false
}
}
}
// WaitForCacheSync is a specialized version of the general WaitForCacheSync function which also
// handles fake client syncing.
// This is only required in cases where fake clients are used without RunAndWait.
func (c *client) WaitForCacheSync(name string, stop <-chan struct{}, cacheSyncs ...cache.InformerSynced) bool {
if c.informerWatchesPending == nil {
return WaitForCacheSync(name, stop, cacheSyncs...)
}
syncFns := append(cacheSyncs, func() bool {
return c.informerWatchesPending.Load() == 0
})
return WaitForCacheSync(name, stop, syncFns...)
}
func (c *client) Revision() string {
return c.revision
}
func (c *client) PodExecCommands(podName, podNamespace, container string, commands []string) (stdout, stderr string, err error) {
defer func() {
if err != nil {
if len(stderr) > 0 {
err = fmt.Errorf("error exec'ing into %s/%s %s container: %v\n%s",
podNamespace, podName, container, err, stderr)
} else {
err = fmt.Errorf("error exec'ing into %s/%s %s container: %v",
podNamespace, podName, container, err)
}
}
}()
req := c.restClient.Post().
Resource("pods").
Name(podName).
Namespace(podNamespace).
SubResource("exec").
Param("container", container).
VersionedParams(&v1.PodExecOptions{
Container: container,
Command: commands,
Stdin: false,
Stdout: true,
Stderr: true,
TTY: false,
}, kubescheme.ParameterCodec)
wrapper, upgrader, err := roundTripperFor(c.config)
if err != nil {
return "", "", err
}
exec, err := remotecommand.NewSPDYExecutorForTransports(wrapper, upgrader, "POST", req.URL())
if err != nil {
return "", "", err
}
var stdoutBuf, stderrBuf bytes.Buffer
err = exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{
Stdin: nil,
Stdout: &stdoutBuf,
Stderr: &stderrBuf,
Tty: false,
})
stdout = stdoutBuf.String()
stderr = stderrBuf.String()
return
}
func (c *client) PodExec(podName, podNamespace, container string, command string) (stdout, stderr string, err error) {
commandFields := strings.Fields(command)
return c.PodExecCommands(podName, podNamespace, container, commandFields)
}
func (c *client) PodLogs(ctx context.Context, podName, podNamespace, container string, previousLog bool) (string, error) {
opts := &v1.PodLogOptions{
Container: container,
Previous: previousLog,
}
res, err := c.kube.CoreV1().Pods(podNamespace).GetLogs(podName, opts).Stream(ctx)
if err != nil {
return "", err
}
defer closeQuietly(res)
builder := &strings.Builder{}
if _, err = io.Copy(builder, res); err != nil {
return "", err
}
return builder.String(), nil
}
func (c *client) AllDiscoveryDo(ctx context.Context, istiodNamespace, path string) (map[string][]byte, error) {
istiods, err := c.GetIstioPods(ctx, istiodNamespace, metav1.ListOptions{
LabelSelector: "app=istiod",
FieldSelector: RunningStatus,
})
if err != nil {
return nil, err
}
if len(istiods) == 0 {
return nil, errors.New("unable to find any Istiod instances")
}
result := map[string][]byte{}
for _, istiod := range istiods {
monitoringPort := findIstiodMonitoringPort(&istiod)
res, err := c.portForwardRequest(ctx, istiod.Name, istiod.Namespace, http.MethodGet, path, monitoringPort)
if err != nil {
return nil, err
}
if len(res) > 0 {
result[istiod.Name] = res
}
}
// If any Discovery servers responded, treat as a success
if len(result) > 0 {
return result, nil
}
return nil, nil
}
func (c *client) EnvoyDo(ctx context.Context, podName, podNamespace, method, path string) ([]byte, error) {
return c.portForwardRequest(ctx, podName, podNamespace, method, path, 15000)
}
func (c *client) EnvoyDoWithPort(ctx context.Context, podName, podNamespace, method, path string, port int) ([]byte, error) {
return c.portForwardRequest(ctx, podName, podNamespace, method, path, port)
}
func (c *client) portForwardRequest(ctx context.Context, podName, podNamespace, method, path string, port int) ([]byte, error) {
formatError := func(err error) error {
return fmt.Errorf("failure running port forward process: %v", err)
}
fw, err := c.NewPortForwarder(podName, podNamespace, "", 0, port)
if err != nil {
return nil, err
}
if err = fw.Start(); err != nil {
return nil, formatError(err)
}
defer fw.Close()
req, err := http.NewRequest(method, fmt.Sprintf("http://%s/%s", fw.Address(), path), nil)
if err != nil {
return nil, formatError(err)
}
resp, err := c.http.Do(req.WithContext(ctx))
if err != nil {
return nil, formatError(err)
}
defer closeQuietly(resp.Body)
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
out, err := io2.ReadAll(resp.Body, "/src/istio/pkg/kube/client.go:780:14 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
return nil, formatError(err)
}
return out, nil
}
func (c *client) GetIstioPods(ctx context.Context, namespace string, opts metav1.ListOptions) ([]v1.Pod, error) {
if c.revision != "" {
if opts.LabelSelector != "" {
opts.LabelSelector += fmt.Sprintf(",%s=%s", label.IoIstioRev.Name, c.revision)
} else {
opts.LabelSelector = fmt.Sprintf("%s=%s", label.IoIstioRev.Name, c.revision)
}
}
pl, err := c.kube.CoreV1().Pods(namespace).List(ctx, opts)
if err != nil {
return nil, fmt.Errorf("unable to retrieve Pods: %v", err)
}
return pl.Items, nil
}
func (c *client) GetIstioVersions(ctx context.Context, namespace string) (*version.MeshInfo, error) {
pods, err := c.GetIstioPods(ctx, namespace, metav1.ListOptions{
LabelSelector: "app=istiod",
FieldSelector: RunningStatus,
})
if err != nil {
return nil, err
}
// Pod maybe running but not ready, so we need to check the container status
readyPods := make([]v1.Pod, 0)
for _, pod := range pods {
if CheckPodReady(&pod) == nil {
readyPods = append(readyPods, pod)
}
}
if len(readyPods) == 0 {
return nil, fmt.Errorf("no ready Istio pods in %q", namespace)
}
var errs error
res := version.MeshInfo{}
for _, pod := range readyPods {
component := pod.Labels["istio"]
server := version.ServerInfo{
Component: component,
Revision: pod.GetLabels()[label.IoIstioRev.Name],
}
monitoringPort := findIstiodMonitoringPort(&pod)
result, err := c.portForwardRequest(ctx, pod.Name, pod.Namespace, http.MethodGet, "/version", monitoringPort)
if err != nil {
errs = multierror.Append(errs,
fmt.Errorf("error port-forwarding into %s.%s: %v", pod.Namespace, pod.Name, err),
err,
)
continue
}
var v version.Version
err = json.Unmarshal(result, &v)
if err == nil && v.ClientVersion.Version != "" {
server.Info = *v.ClientVersion
res = append(res, server)
continue
}
// :15014/version returns something like
// 1.7-alpha.9c900ba74d10a1affe7c23557ef0eebd6103b03c-9c900ba74d10a1affe7c23557ef0eebd6103b03c-Clean
if len(result) > 0 {
setServerInfoWithIstiodVersionInfo(&server.Info, string(result))
// (Golang version not available through :15014/version endpoint)
res = append(res, server)
}
}
return &res, errs
}
func revisionOfPod(pod *v1.Pod) string {
if revision, ok := pod.GetLabels()[label.IoIstioRev.Name]; ok && len(revision) > 0 {
// For istiod or gateways.
return revision
}
// For pods injected.
statusAnno, ok := pod.GetAnnotations()[annotation.SidecarStatus.Name]
if !ok {
return ""
}
var status struct {
Revision string `json:"revision"`
}
if err := json.Unmarshal([]byte(statusAnno), &status); err != nil {
return ""
}
return status.Revision
}
func (c *client) GetProxyPods(ctx context.Context, limit int64, token string) (*v1.PodList, error) {
opts := metav1.ListOptions{
LabelSelector: label.ServiceCanonicalName.Name,
FieldSelector: RunningStatus,
Limit: limit,
Continue: token,
}
// get pods from all the namespaces.
list, err := c.kube.CoreV1().Pods(metav1.NamespaceAll).List(ctx, opts)
if err != nil {
return nil, fmt.Errorf("failed to get the pod list: %v", err)
}
// If we have a istio.io/rev label for the injected pods,
// this loop may not be needed. Instead, we can use "LabelSelector"
// to get pods in a specific revision.
if c.revision != "" {
items := []v1.Pod{}
for _, p := range list.Items {
if revisionOfPod(&p) == c.revision {
items = append(items, p)
}
}
list.Items = items
}
return list, nil
}
func (c *client) NewPortForwarder(podName, ns, localAddress string, localPort int, podPort int) (PortForwarder, error) {
return newPortForwarder(c, podName, ns, localAddress, localPort, podPort)
}
func (c *client) PodsForSelector(ctx context.Context, namespace string, labelSelectors ...string) (*v1.PodList, error) {
return c.kube.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: strings.Join(labelSelectors, ","),
})
}
func (c *client) ApplyYAMLFiles(namespace string, yamlFiles ...string) error {
g, _ := errgroup.WithContext(context.TODO())
for _, f := range removeEmptyFiles(yamlFiles) {
f := f
g.Go(func() error {
return c.ssapplyYAMLFile(namespace, false, f)
})
}
return g.Wait()
}
func (c *client) ApplyYAMLContents(namespace string, yamls ...string) error {
g, _ := errgroup.WithContext(context.TODO())
for _, yaml := range yamls {
cfgs := yml.SplitString(yaml)
for _, cfg := range cfgs {
cfg := cfg
g.Go(func() error {
return c.ssapplyYAML(cfg, namespace, false)
})
}
}
return g.Wait()
}
func (c *client) ApplyYAMLFilesDryRun(namespace string, yamlFiles ...string) error {
g, _ := errgroup.WithContext(context.TODO())
for _, f := range removeEmptyFiles(yamlFiles) {
f := f
g.Go(func() error {
return c.ssapplyYAMLFile(namespace, true, f)
})
}
return g.Wait()
}
func (c *client) CreatePerRPCCredentials(_ context.Context, tokenNamespace, tokenServiceAccount string, audiences []string,
expirationSeconds int64,
) (credentials.PerRPCCredentials, error) {
return NewRPCCredentials(c, tokenNamespace, tokenServiceAccount, audiences, expirationSeconds, 60)
}
func (c *client) UtilFactory() PartialFactory {
return c.clientFactory
}
func (c *client) ssapplyYAMLFile(namespace string, dryRun bool, file string) error {
d, err := os.ReadFile(file)
if err != nil {
return err
}
cfgs := yml.SplitString(string(d))
for _, cfg := range cfgs {
if err := c.ssapplyYAML(cfg, namespace, dryRun); err != nil {
return err
}
}
return nil
}
func (c *client) ssapplyYAML(cfg string, namespace string, dryRun bool) error {
obj, dr, err := c.buildObject(cfg, namespace)
if err != nil {
if runtime.IsMissingKind(err) {
log.Infof("skip applying, not a Kubernetes kind")
return nil
}
return err
}
data, err := json.Marshal(obj)
if err != nil {
return err
}
force := true
_, err = dr.Patch(context.Background(), obj.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{
DryRun: getDryRun(dryRun),
Force: &force,
FieldManager: "istio-ci",
})
// If we are changing CRDs, invalidate the discovery client so future calls will not fail
if !dryRun && obj.GetKind() == gvk.CustomResourceDefinition.Kind {
c.InvalidateDiscovery()
}
return err
}
func (c *client) deleteYAMLFile(namespace string, dryRun bool, file string) error {
d, err := os.ReadFile(file)
if err != nil {
return err
}
cfgs := yml.SplitString(string(d))
for _, cfg := range cfgs {
if err := c.deleteYAML(cfg, namespace, dryRun); err != nil {
return err
}
}
return nil
}
func (c *client) deleteYAML(cfg, namespace string, dryRun bool) error {
obj, dr, err := c.buildObject(cfg, namespace)
if err != nil {
if runtime.IsMissingKind(err) {
log.Infof("skip delete, not a Kubernetes kind")
return nil
}
return err
}
err = dr.Delete(context.Background(), obj.GetName(), metav1.DeleteOptions{
DryRun: getDryRun(dryRun),
})
if kerrors.IsNotFound(err) {
return nil
}
return err
}
func (c *client) InvalidateDiscovery() {
c.discoveryClient.Invalidate()
c.mapper.Reset()
}
func (c *client) DeleteYAMLFiles(namespace string, yamlFiles ...string) (err error) {
yamlFiles = removeEmptyFiles(yamlFiles)
// Run each delete concurrently and collect the errors.
errs := make([]error, len(yamlFiles))
g, _ := errgroup.WithContext(context.TODO())
for i, f := range yamlFiles {
i, f := i, f
g.Go(func() error {
errs[i] = c.deleteYAMLFile(namespace, false, f)
return errs[i]
})
}
_ = g.Wait()
return multierror.Append(nil, errs...).ErrorOrNil()
}
func (c *client) DeleteYAMLFilesDryRun(namespace string, yamlFiles ...string) (err error) {
yamlFiles = removeEmptyFiles(yamlFiles)
// Run each delete concurrently and collect the errors.
errs := make([]error, len(yamlFiles))
g, _ := errgroup.WithContext(context.TODO())
for i, f := range yamlFiles {
i, f := i, f
g.Go(func() error {
errs[i] = c.deleteYAMLFile(namespace, true, f)
return errs[i]
})
}
_ = g.Wait()
return multierror.Append(nil, errs...).ErrorOrNil()
}
func closeQuietly(c io.Closer) {
_ = c.Close()
}
func removeEmptyFiles(files []string) []string {
out := make([]string, 0, len(files))
for _, f := range files {
if !isEmptyFile(f) {
out = append(out, f)
}
}
return out
}
func isEmptyFile(f string) bool {
fileInfo, err := os.Stat(f)
if err != nil {
return true
}
if fileInfo.Size() == 0 {
return true
}
return false
}
var decUnstructured = yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme)
func getDryRun(dryRun bool) []string {
var dryRunArgs []string
if dryRun {
dryRunArgs = []string{metav1.DryRunAll}
}
return dryRunArgs
}
// buildObject takes a config YAML and default namespace and returns the same object as Unstructured, along with the client to access it
func (c *client) buildObject(cfg string, namespace string) (*unstructured.Unstructured, dynamic.ResourceInterface, error) {
obj := &unstructured.Unstructured{}
_, gvk, err := decUnstructured.Decode([]byte(cfg), nil, obj)
if err != nil {
return nil, nil, err
}
mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
return nil, nil, fmt.Errorf("mapping: %v", err)
}
var dr dynamic.ResourceInterface
if mapping.Scope.Name() == meta.RESTScopeNameNamespace {
ns := obj.GetNamespace()
if ns == "" {
ns = namespace
} else if namespace != "" && ns != namespace {
return nil, nil, fmt.Errorf("object %v/%v provided namespace %q but apply called with %q", gvk, obj.GetName(), ns, namespace)
}
// namespaced resources should specify the namespace
dr = c.dynamic.Resource(mapping.Resource).Namespace(ns)
} else {
// for cluster-wide resources
dr = c.dynamic.Resource(mapping.Resource)
}
return obj, dr, nil
}
// IstioScheme returns a scheme will all known Istio-related types added
var (
IstioScheme = istioScheme()
IstioCodec = serializer.NewCodecFactory(IstioScheme)
)
// FakeIstioScheme is an IstioScheme that has List type registered.
var FakeIstioScheme = func() *runtime.Scheme {
s := istioScheme()
// Workaround https://github.com/kubernetes/kubernetes/issues/107823
s.AddKnownTypeWithName(schema.GroupVersionKind{Group: "fake-metadata-client-group", Version: "v1", Kind: "List"}, &metav1.List{})
return s
}()
func istioScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
utilruntime.Must(kubescheme.AddToScheme(scheme))
utilruntime.Must(mcs.AddToScheme(scheme))
utilruntime.Must(clientnetworkingalpha.AddToScheme(scheme))
utilruntime.Must(clientnetworkingbeta.AddToScheme(scheme))
utilruntime.Must(clientsecurity.AddToScheme(scheme))
utilruntime.Must(clienttelemetry.AddToScheme(scheme))
utilruntime.Must(clientextensions.AddToScheme(scheme))
utilruntime.Must(gatewayapi.AddToScheme(scheme))
utilruntime.Must(gatewayapibeta.AddToScheme(scheme))
utilruntime.Must(gatewayapiv1.AddToScheme(scheme))
utilruntime.Must(apiextensionsv1.AddToScheme(scheme))
return scheme
}
func setServerInfoWithIstiodVersionInfo(serverInfo *version.BuildInfo, istioInfo string) {
versionParts := strings.Split(istioInfo, "-")
nParts := len(versionParts)
if nParts >= 3 {
// The format will be like 1.12.0-016bc46f4a5e0ef3fa135b3c5380ab7765467c1a-dirty-Modified
// version is '1.12.0' || '1.12.0-custom-build'
// revision is '016bc46f4a5e0ef3fa135b3c5380ab7765467c1a' || '016bc46f4a5e0ef3fa135b3c5380ab7765467c1a-dirty'
// status is 'Modified' || 'Clean'
// Ref From common/scripts/report_build_info.sh
serverInfo.Version = strings.Join(versionParts[:nParts-2], "-")
serverInfo.GitRevision = versionParts[nParts-2]
serverInfo.BuildStatus = versionParts[nParts-1]
if (len(serverInfo.GitRevision) >= 1 && string(serverInfo.GitRevision[0]) == "d") && (len(serverInfo.GitRevision) >= 2 && string(serverInfo.GitRevision[1]) == "i") && (len(serverInfo.GitRevision) >= 3 && string(serverInfo.GitRevision[2]) == "r") && (len(serverInfo.GitRevision) >= 4 && string(serverInfo.GitRevision[3]) == "t") && (len(serverInfo.GitRevision) >= 5 && string(serverInfo.GitRevision[4]) == "y") {
serverInfo.GitRevision = strings.Join([]string{versionParts[nParts-3], "dirty"}, "-")
serverInfo.Version = strings.Join(versionParts[:nParts-3], "-")
}
serverInfo.GitTag = serverInfo.Version
} else {
serverInfo.Version = istioInfo
}
}
func SetRevisionForTest(c CLIClient, rev string) CLIClient {
tc := c.(*client)
tc.revision = rev
return tc
}
func findIstiodMonitoringPort(pod *v1.Pod) int {
if v, ok := pod.GetAnnotations()["prometheus.io/port"]; ok {
if port, err := strconv.Atoi(v); err == nil {
return port
}
}
return 15014
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
)
const (
contextName = "context0"
clusterName = "cluster0"
authInfoName = "authInfo0"
)
var _ clientcmd.ClientConfig = &clientConfig{}
// clientConfig is a utility that allows construction of a k8s ClientConfig from
// a k8s rest.Config
type clientConfig struct {
restConfig rest.Config
}
// NewClientConfigForRestConfig creates a new k8s clientcmd.ClientConfig from the given rest.Config.
func NewClientConfigForRestConfig(restConfig *rest.Config) clientcmd.ClientConfig {
return &clientConfig{
restConfig: *restConfig,
}
}
func (c *clientConfig) RawConfig() (api.Config, error) {
cfg := api.Config{
Kind: "Config",
APIVersion: "v1",
Preferences: api.Preferences{},
Clusters: map[string]*api.Cluster{
clusterName: newCluster(&c.restConfig),
},
AuthInfos: map[string]*api.AuthInfo{
authInfoName: newAuthInfo(&c.restConfig),
},
Contexts: map[string]*api.Context{
contextName: {
Cluster: clusterName,
AuthInfo: authInfoName,
},
},
CurrentContext: contextName,
}
return cfg, nil
}
func (c *clientConfig) ClientConfig() (*rest.Config, error) {
return c.copyRestConfig(), nil
}
func (c *clientConfig) Namespace() (string, bool, error) {
return "default", false, nil
}
func (c *clientConfig) ConfigAccess() clientcmd.ConfigAccess {
return nil
}
func (c *clientConfig) copyRestConfig() *rest.Config {
out := c.restConfig
return &out
}
func newAuthInfo(restConfig *rest.Config) *api.AuthInfo {
return &api.AuthInfo{
ClientCertificate: restConfig.CertFile,
ClientCertificateData: restConfig.CertData,
ClientKey: restConfig.KeyFile,
ClientKeyData: restConfig.KeyData,
Token: restConfig.BearerToken,
TokenFile: restConfig.BearerTokenFile,
Impersonate: restConfig.Impersonate.UserName,
ImpersonateGroups: restConfig.Impersonate.Groups,
ImpersonateUserExtra: restConfig.Impersonate.Extra,
Username: restConfig.Username,
Password: restConfig.Password,
AuthProvider: restConfig.AuthProvider,
Exec: restConfig.ExecProvider,
}
}
func newCluster(restConfig *rest.Config) *api.Cluster {
return &api.Cluster{
Server: restConfig.Host,
TLSServerName: restConfig.ServerName,
InsecureSkipTLSVerify: restConfig.Insecure,
CertificateAuthority: restConfig.CAFile,
CertificateAuthorityData: restConfig.CAData,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"path/filepath"
"regexp"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/client-go/discovery"
diskcached "k8s.io/client-go/discovery/cached/disk"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
"istio.io/istio/pkg/lazy"
)
var _ PartialFactory = &clientFactory{}
// clientFactory partially implements the kubectl util.Factory, which is provides access to various k8s clients.
// The full Factory can be built with MakeKubeFactory.
// This split is to avoid huge dependencies.
type clientFactory struct {
clientConfig clientcmd.ClientConfig
expander lazy.Lazy[meta.RESTMapper]
mapper lazy.Lazy[meta.ResettableRESTMapper]
discoveryClient lazy.Lazy[discovery.CachedDiscoveryInterface]
}
// newClientFactory creates a new util.Factory from the given clientcmd.ClientConfig.
func newClientFactory(clientConfig clientcmd.ClientConfig, diskCache bool) *clientFactory {
out := &clientFactory{
clientConfig: clientConfig,
}
out.discoveryClient = lazy.NewWithRetry(func() (discovery.CachedDiscoveryInterface, error) {
restConfig, err := out.ToRESTConfig()
if err != nil {
return nil, err
}
// Setup cached discovery. CLIs uses disk cache, controllers use memory cache.
if diskCache {
// From https://github.com/kubernetes/cli-runtime/blob/4fdf49ae46a0caa7fafdfe97825c6129d5153f06/pkg/genericclioptions/config_flags.go#L288
cacheDir := filepath.Join(homedir.HomeDir(), ".kube", "cache")
httpCacheDir := filepath.Join(cacheDir, "http")
discoveryCacheDir := computeDiscoverCacheDir(filepath.Join(cacheDir, "discovery"), restConfig.Host)
return diskcached.NewCachedDiscoveryClientForConfig(restConfig, discoveryCacheDir, httpCacheDir, 6*time.Hour)
}
d, err := discovery.NewDiscoveryClientForConfig(restConfig)
if err != nil {
return nil, err
}
return memory.NewMemCacheClient(d), nil
})
out.mapper = lazy.NewWithRetry(func() (meta.ResettableRESTMapper, error) {
discoveryClient, err := out.ToDiscoveryClient()
if err != nil {
return nil, err
}
return restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient), nil
})
out.expander = lazy.NewWithRetry(func() (meta.RESTMapper, error) {
discoveryClient, err := out.discoveryClient.Get()
if err != nil {
return nil, err
}
mapper, err := out.mapper.Get()
if err != nil {
return nil, err
}
return restmapper.NewShortcutExpander(mapper, discoveryClient, func(string) {}), nil
})
return out
}
func (c *clientFactory) ToRESTConfig() (*rest.Config, error) {
restConfig, err := c.clientConfig.ClientConfig()
if err != nil {
return nil, err
}
return SetRestDefaults(restConfig), nil
}
func (c *clientFactory) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) {
return c.discoveryClient.Get()
}
// overlyCautiousIllegalFileCharacters matches characters that *might* not be supported. Windows is really restrictive, so this is really restrictive
var overlyCautiousIllegalFileCharacters = regexp.MustCompile(`[^(\w/.)]`)
// computeDiscoverCacheDir takes the parentDir and the host and comes up with a "usually non-colliding" name.
func computeDiscoverCacheDir(parentDir, host string) string {
// strip the optional scheme from host if its there:
schemelessHost := strings.Replace(strings.Replace(host, "https://", "", 1), "http://", "", 1)
// now do a simple collapse of non-AZ09 characters. Collisions are possible but unlikely. Even if we do collide the problem is short lived
safeHost := overlyCautiousIllegalFileCharacters.ReplaceAllString(schemelessHost, "_")
return filepath.Join(parentDir, safeHost)
}
func (c *clientFactory) ToRESTMapper() (meta.RESTMapper, error) {
return c.expander.Get()
}
func (c *clientFactory) ToRawKubeConfigLoader() clientcmd.ClientConfig {
return c.clientConfig
}
func (c *clientFactory) DynamicClient() (dynamic.Interface, error) {
restConfig, err := c.ToRESTConfig()
if err != nil {
return nil, err
}
return dynamic.NewForConfig(restConfig)
}
func (c *clientFactory) KubernetesClientSet() (*kubernetes.Clientset, error) {
restConfig, err := c.ToRESTConfig()
if err != nil {
return nil, err
}
return kubernetes.NewForConfig(restConfig)
}
func (c *clientFactory) RESTClient() (*rest.RESTClient, error) {
clientConfig, err := c.ToRESTConfig()
if err != nil {
return nil, err
}
return rest.RESTClientFor(clientConfig)
}
type rESTClientGetter interface {
// ToRESTConfig returns restconfig
ToRESTConfig() (*rest.Config, error)
// ToDiscoveryClient returns discovery client
ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error)
// ToRESTMapper returns a restmapper
ToRESTMapper() (meta.RESTMapper, error)
// ToRawKubeConfigLoader return kubeconfig loader as-is
ToRawKubeConfigLoader() clientcmd.ClientConfig
}
type PartialFactory interface {
rESTClientGetter
// DynamicClient returns a dynamic client ready for use
DynamicClient() (dynamic.Interface, error)
// KubernetesClientSet gives you back an external clientset
KubernetesClientSet() (*kubernetes.Clientset, error)
// Returns a RESTClient for accessing Kubernetes resources or an error.
RESTClient() (*rest.RESTClient, error)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controllers
import (
"fmt"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/gvk"
istiolog "istio.io/istio/pkg/log"
)
var log = istiolog.RegisterScope("controllers", "common controller logic")
// Object is a union of runtime + meta objects. Essentially every k8s object meets this interface.
// and certainly all that we care about.
type Object interface {
metav1.Object
runtime.Object
}
type ComparableObject interface {
comparable
Object
}
// IsNil works around comparing generic types
func IsNil[O ComparableObject](o O) bool {
var t O
return o == t
}
// UnstructuredToGVR extracts the GVR of an unstructured resource. This is useful when using dynamic
// clients.
func UnstructuredToGVR(u unstructured.Unstructured) (schema.GroupVersionResource, error) {
res := schema.GroupVersionResource{}
gv, err := schema.ParseGroupVersion(u.GetAPIVersion())
if err != nil {
return res, err
}
gk := config.GroupVersionKind{
Group: gv.Group,
Version: gv.Version,
Kind: u.GetKind(),
}
found, ok := gvk.ToGVR(gk)
if !ok {
return res, fmt.Errorf("unknown gvk: %v", gk)
}
return found, nil
}
// ObjectToGVR extracts the GVR of an unstructured resource. This is useful when using dynamic
// clients.
func ObjectToGVR(u Object) (schema.GroupVersionResource, error) {
g := u.GetObjectKind().GroupVersionKind()
gk := config.GroupVersionKind{
Group: g.Group,
Version: g.Version,
Kind: g.Kind,
}
found, ok := gvk.ToGVR(gk)
if !ok {
return schema.GroupVersionResource{}, fmt.Errorf("unknown gvk: %v", gk)
}
return found, nil
}
// EnqueueForParentHandler returns a handler that will enqueue the parent (by ownerRef) resource
func EnqueueForParentHandler(q Queue, kind config.GroupVersionKind) func(obj Object) {
handler := func(obj Object) {
for _, ref := range obj.GetOwnerReferences() {
refGV, err := schema.ParseGroupVersion(ref.APIVersion)
if err != nil {
log.Errorf("could not parse OwnerReference api version %q: %v", ref.APIVersion, err)
continue
}
if refGV.Group == kind.Group && ref.Kind == kind.Kind {
// We found a parent we care about, add it to the queue
q.Add(types.NamespacedName{
// Reference doesn't have namespace, but its always same-namespace, so use objects
Namespace: obj.GetNamespace(),
Name: ref.Name,
})
}
}
}
return handler
}
// EventType represents a registry update event
type EventType int
const (
// EventAdd is sent when an object is added
EventAdd EventType = iota
// EventUpdate is sent when an object is modified
// Captures the modified object
EventUpdate
// EventDelete is sent when an object is deleted
// Captures the object at the last known state
EventDelete
)
func (event EventType) String() string {
out := "unknown"
switch event {
case EventAdd:
out = "add"
case EventUpdate:
out = "update"
case EventDelete:
out = "delete"
}
return out
}
type Event struct {
Old Object
New Object
Event EventType
}
func (e Event) Latest() Object {
if e.New != nil {
return e.New
}
return e.Old
}
func FromEventHandler(handler func(o Event)) cache.ResourceEventHandler {
return cache.ResourceEventHandlerFuncs{
AddFunc: func(obj any) {
o := ExtractObject(obj)
if o == nil {
return
}
handler(Event{
New: o,
Event: EventAdd,
})
},
UpdateFunc: func(oldInterface, newInterface any) {
oldObj := ExtractObject(oldInterface)
if oldObj == nil {
return
}
newObj := ExtractObject(newInterface)
if newObj == nil {
return
}
handler(Event{
Old: oldObj,
New: newObj,
Event: EventUpdate,
})
},
DeleteFunc: func(obj any) {
o := ExtractObject(obj)
if o == nil {
return
}
handler(Event{
Old: o,
Event: EventDelete,
})
},
}
}
// ObjectHandler returns a handler that will act on the latest version of an object
// This means Add/Update/Delete are all handled the same and are just used to trigger reconciling.
func ObjectHandler(handler func(o Object)) cache.ResourceEventHandler {
h := func(obj any) {
o := ExtractObject(obj)
if o == nil {
return
}
handler(o)
}
return cache.ResourceEventHandlerFuncs{
AddFunc: h,
UpdateFunc: func(oldObj, newObj any) {
h(newObj)
},
DeleteFunc: h,
}
}
// FilteredObjectHandler returns a handler that will act on the latest version of an object
// This means Add/Update/Delete are all handled the same and are just used to trigger reconciling.
// If filters are set, returning 'false' will exclude the event. For Add and Deletes, the filter will be based
// on the new or old item. For updates, the item will be handled if either the new or the old object is updated.
func FilteredObjectHandler(handler func(o Object), filter func(o Object) bool) cache.ResourceEventHandler {
return filteredObjectHandler(handler, false, filter)
}
// FilteredObjectSpecHandler returns a handler that will act on the latest version of an object
// This means Add/Update/Delete are all handled the same and are just used to trigger reconciling.
// Unlike FilteredObjectHandler, the handler is only trigger when the resource spec changes (ie resourceVersion)
// If filters are set, returning 'false' will exclude the event. For Add and Deletes, the filter will be based
// on the new or old item. For updates, the item will be handled if either the new or the old object is updated.
func FilteredObjectSpecHandler(handler func(o Object), filter func(o Object) bool) cache.ResourceEventHandler {
return filteredObjectHandler(handler, true, filter)
}
func filteredObjectHandler(handler func(o Object), onlyIncludeSpecChanges bool, filter func(o Object) bool) cache.ResourceEventHandler {
single := func(obj any) {
o := ExtractObject(obj)
if o == nil {
return
}
if !filter(o) {
return
}
handler(o)
}
return cache.ResourceEventHandlerFuncs{
AddFunc: single,
UpdateFunc: func(oldInterface, newInterface any) {
oldObj := ExtractObject(oldInterface)
if oldObj == nil {
return
}
newObj := ExtractObject(newInterface)
if newObj == nil {
return
}
if onlyIncludeSpecChanges && oldObj.GetResourceVersion() == newObj.GetResourceVersion() {
return
}
newer := filter(newObj)
older := filter(oldObj)
if !newer && !older {
return
}
handler(newObj)
},
DeleteFunc: single,
}
}
// Extract pulls a T from obj, handling tombstones.
// This will return nil if the object cannot be extracted.
func Extract[T Object](obj any) T {
var empty T
if obj == nil {
return empty
}
o, ok := obj.(T)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
log.Errorf("couldn't get object from tombstone: %+v", obj)
return empty
}
o, ok = tombstone.Obj.(T)
if !ok {
log.Errorf("tombstone contained object that is not an object (key:%v, obj:%T)", tombstone.Key, tombstone.Obj)
return empty
}
}
return o
}
func ExtractObject(obj any) Object {
return Extract[Object](obj)
}
// IgnoreNotFound returns nil on NotFound errors.
// All other values that are not NotFound errors or nil are returned unmodified.
func IgnoreNotFound(err error) error {
if kerrors.IsNotFound(err) {
return nil
}
return err
}
// EventHandler mirrors ResourceEventHandlerFuncs, but takes typed T objects instead of any.
type EventHandler[T Object] struct {
AddFunc func(obj T)
UpdateFunc func(oldObj, newObj T)
DeleteFunc func(obj T)
}
func (e EventHandler[T]) OnAdd(obj interface{}, _ bool) {
if e.AddFunc != nil {
e.AddFunc(Extract[T](obj))
}
}
func (e EventHandler[T]) OnUpdate(oldObj, newObj interface{}) {
if e.UpdateFunc != nil {
e.UpdateFunc(Extract[T](oldObj), Extract[T](newObj))
}
}
func (e EventHandler[T]) OnDelete(obj interface{}) {
if e.DeleteFunc != nil {
e.DeleteFunc(Extract[T](obj))
}
}
var _ cache.ResourceEventHandler = EventHandler[Object]{}
type Shutdowner interface {
ShutdownHandlers()
}
// ShutdownAll is a simple helper to shutdown all informers
func ShutdownAll(s ...Shutdowner) {
for _, h := range s {
h.ShutdownHandlers()
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controllers
import (
"fmt"
"time"
"go.uber.org/atomic"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
"istio.io/istio/pkg/config"
istiolog "istio.io/istio/pkg/log"
)
type ReconcilerFn func(key types.NamespacedName) error
// Queue defines an abstraction around Kubernetes' workqueue.
// Items enqueued are deduplicated; this generally means relying on ordering of events in the queue is not feasible.
type Queue struct {
queue workqueue.RateLimitingInterface
initialSync *atomic.Bool
name string
maxAttempts int
workFn func(key any) error
closed chan struct{}
log *istiolog.Scope
}
// WithName sets a name for the queue. This is used for logging
func WithName(name string) func(q *Queue) {
return func(q *Queue) {
q.name = name
}
}
// WithRateLimiter allows defining a custom rate limiter for the queue
func WithRateLimiter(r workqueue.RateLimiter) func(q *Queue) {
return func(q *Queue) {
q.queue = workqueue.NewRateLimitingQueue(r)
}
}
// WithMaxAttempts allows defining a custom max attempts for the queue. If not set, items will not be retried
func WithMaxAttempts(n int) func(q *Queue) {
return func(q *Queue) {
q.maxAttempts = n
}
}
// WithReconciler defines the handler function to handle items in the queue.
func WithReconciler(f ReconcilerFn) func(q *Queue) {
return func(q *Queue) {
q.workFn = func(key any) error {
return f(key.(types.NamespacedName))
}
}
}
// WithGenericReconciler defines the handler function to handle items in the queue that can handle any type
func WithGenericReconciler(f func(key any) error) func(q *Queue) {
return func(q *Queue) {
q.workFn = func(key any) error {
return f(key)
}
}
}
// NewQueue creates a new queue
func NewQueue(name string, options ...func(*Queue)) Queue {
q := Queue{
name: name,
closed: make(chan struct{}),
initialSync: atomic.NewBool(false),
}
for _, o := range options {
o(&q)
}
if q.queue == nil {
q.queue = workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
}
q.log = log.WithLabels("controller", q.name)
return q
}
// Add an item to the queue.
func (q Queue) Add(item any) {
q.queue.Add(item)
}
// AddObject takes an Object and adds the types.NamespacedName associated.
func (q Queue) AddObject(obj Object) {
q.queue.Add(config.NamespacedName(obj))
}
// Run the queue. This is synchronous, so should typically be called in a goroutine.
func (q Queue) Run(stop <-chan struct{}) {
defer q.queue.ShutDown()
q.log.Infof("starting")
q.queue.Add(defaultSyncSignal)
go func() {
// Process updates until we return false, which indicates the queue is terminated
for q.processNextItem() {
}
close(q.closed)
}()
select {
case <-stop:
case <-q.closed:
}
q.log.Infof("stopped")
}
// syncSignal defines a dummy signal that is enqueued when .Run() is called. This allows us to detect
// when we have processed all items added to the queue prior to Run().
type syncSignal struct{}
// defaultSyncSignal is a singleton instanceof syncSignal.
var defaultSyncSignal = syncSignal{}
// HasSynced returns true if the queue has 'synced'. A synced queue has started running and has
// processed all events that were added prior to Run() being called Warning: these items will be
// processed at least once, but may have failed.
func (q Queue) HasSynced() bool {
return q.initialSync.Load()
}
// Closed returns a chan that will be signaled when the Instance has stopped processing tasks.
func (q Queue) Closed() <-chan struct{} {
return q.closed
}
// processNextItem is the main workFn loop for the queue
func (q Queue) processNextItem() bool {
// Wait until there is a new item in the working queue
key, quit := q.queue.Get()
if quit {
// We are done, signal to exit the queue
return false
}
// We got the sync signal. This is not a real event, so we exit early after signaling we are synced
if key == defaultSyncSignal {
q.log.Debugf("synced")
q.initialSync.Store(true)
return true
}
q.log.Debugf("handling update: %v", formatKey(key))
// 'Done marks item as done processing' - should be called at the end of all processing
defer q.queue.Done(key)
err := q.workFn(key)
if err != nil {
retryCount := q.queue.NumRequeues(key) + 1
if retryCount < q.maxAttempts {
q.log.Errorf("error handling %v, retrying (retry count: %d): %v", formatKey(key), retryCount, err)
q.queue.AddRateLimited(key)
// Return early, so we do not call Forget(), allowing the rate limiting to backoff
return true
}
q.log.Errorf("error handling %v, and retry budget exceeded: %v", formatKey(key), err)
}
// 'Forget indicates that an item is finished being retried.' - should be called whenever we do not want to backoff on this key.
q.queue.Forget(key)
return true
}
// WaitForClose blocks until the Instance has stopped processing tasks or the timeout expires.
// If the timeout is zero, it will wait until the queue is done processing.
// WaitForClose an error if the timeout expires.
func (q Queue) WaitForClose(timeout time.Duration) error {
closed := q.Closed()
if timeout == 0 {
<-closed
return nil
}
timer := time.NewTimer(timeout)
defer timer.Stop()
select {
case <-closed:
return nil
case <-timer.C:
return fmt.Errorf("timeout waiting for queue to close after %v", timeout)
}
}
func formatKey(key any) string {
if t, ok := key.(types.NamespacedName); ok {
if len(t.Namespace) > 0 {
return t.String()
}
// because we use namespacedName for non namespace scope resource as well
return t.Name
}
if t, ok := key.(Event); ok {
key = t.Latest()
}
if t, ok := key.(Object); ok {
if len(t.GetNamespace()) > 0 {
return t.GetNamespace() + "/" + t.GetName()
}
return t.GetName()
}
res := fmt.Sprint(key)
if len(res) >= 50 {
return res[:50]
}
return res
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package informerfactory provides a "factory" to generate informers. This allows users to create the
// same informers in multiple different locations, while still using the same underlying resources.
// Additionally, aggregate operations like Start, Shutdown, and Wait are available.
// Kubernetes core has informer factories with very similar logic. However, this has a few problems that
// spurred a fork:
// * Factories are per package. That means we have ~6 distinct factories, which makes management a hassle.
// * Across these, the factories are often inconsistent in functionality. Changes to these takes >4 months.
// * Lack of functionality we want (see below).
//
// Added functionality:
// * Single factory for any type, including dynamic informers, meta informers, typed informers, etc.
// * Ability to create multiple informers of the same type but with different filters.
// * Ability to run a single informer rather than all of them.
package informerfactory
import (
"fmt"
"runtime/debug"
"sync"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/cache"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config/schema/gvr"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
// NewInformerFunc returns a SharedIndexInformer.
type NewInformerFunc func() cache.SharedIndexInformer
type StartableInformer struct {
Informer cache.SharedIndexInformer
start func(stopCh <-chan struct{})
}
func (s StartableInformer) Start(stopCh <-chan struct{}) {
s.start(stopCh)
}
// InformerFactory provides access to a shared informer factory
type InformerFactory interface {
// Start initializes all requested informers. They are handled in goroutines
// which run until the stop channel gets closed.
Start(stopCh <-chan struct{})
// InformerFor returns the SharedIndexInformer the provided type.
InformerFor(resource schema.GroupVersionResource, opts kubetypes.InformerOptions, newFunc NewInformerFunc) StartableInformer
// WaitForCacheSync blocks until all started informers' caches were synced
// or the stop channel gets closed.
WaitForCacheSync(stopCh <-chan struct{}) bool
// Shutdown marks a factory as shutting down. At that point no new
// informers can be started anymore and Start will return without
// doing anything.
//
// In addition, Shutdown blocks until all goroutines have terminated. For that
// to happen, the close channel(s) that they were started with must be closed,
// either before Shutdown gets called or while it is waiting.
//
// Shutdown may be called multiple times, even concurrently. All such calls will
// block until all goroutines have terminated.
Shutdown()
}
// NewSharedInformerFactory constructs a new instance of informerFactory for all namespaces.
func NewSharedInformerFactory() InformerFactory {
return &informerFactory{
informers: map[informerKey]builtInformer{},
startedInformers: sets.New[informerKey](),
}
}
// InformerKey represents a unique informer
type informerKey struct {
gvr schema.GroupVersionResource
labelSelector string
fieldSelector string
informerType kubetypes.InformerType
namespace string
}
type builtInformer struct {
informer cache.SharedIndexInformer
objectTransform func(obj any) (any, error)
}
type informerFactory struct {
lock sync.Mutex
informers map[informerKey]builtInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers sets.Set[informerKey]
// wg tracks how many goroutines were started.
wg sync.WaitGroup
// shuttingDown is true when Shutdown has been called. It may still be running
// because it needs to wait for goroutines.
shuttingDown bool
}
var _ InformerFactory = &informerFactory{}
func (f *informerFactory) InformerFor(resource schema.GroupVersionResource, opts kubetypes.InformerOptions, newFunc NewInformerFunc) StartableInformer {
f.lock.Lock()
defer f.lock.Unlock()
key := informerKey{
gvr: resource,
labelSelector: opts.LabelSelector,
fieldSelector: opts.FieldSelector,
informerType: opts.InformerType,
namespace: opts.Namespace,
}
inf, exists := f.informers[key]
if exists {
checkInformerOverlap(inf, resource, opts)
return f.makeStartableInformer(inf.informer, key)
}
informer := newFunc()
f.informers[key] = builtInformer{
informer: informer,
objectTransform: opts.ObjectTransform,
}
return f.makeStartableInformer(informer, key)
}
func allowedOverlap(resource schema.GroupVersionResource) bool {
// We register an optimized Pod watcher for standard flow, but for the experimental analysis feature we need the full pod,
// so we start another watch.
// We may want to reconsider this if the analysis feature becomes stable.
return features.EnableAnalysis && resource == gvr.Pod
}
func checkInformerOverlap(inf builtInformer, resource schema.GroupVersionResource, opts kubetypes.InformerOptions) {
if fmt.Sprintf("%p", inf.objectTransform) == fmt.Sprintf("%p", opts.ObjectTransform) {
return
}
l := log.Warnf
if features.EnableUnsafeAssertions && !allowedOverlap(resource) {
l = log.Fatalf
}
l("for type %v, registered conflicting ObjectTransform. Stack: %v", resource, string(debug.Stack()))
}
func (f *informerFactory) makeStartableInformer(informer cache.SharedIndexInformer, key informerKey) StartableInformer {
return StartableInformer{
Informer: informer,
start: func(stopCh <-chan struct{}) {
f.startOne(stopCh, key)
},
}
}
func (f *informerFactory) startOne(stopCh <-chan struct{}, informerType informerKey) {
f.lock.Lock()
defer f.lock.Unlock()
if f.shuttingDown {
return
}
informer, ff := f.informers[informerType]
if !ff {
panic(fmt.Sprintf("bug: informer key %+v not found", informerType))
}
if !f.startedInformers.Contains(informerType) {
f.wg.Add(1)
go func() {
defer f.wg.Done()
informer.informer.Run(stopCh)
}()
f.startedInformers.Insert(informerType)
}
}
// Start initializes all requested informers.
func (f *informerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.shuttingDown {
return
}
for informerType, informer := range f.informers {
if !f.startedInformers.Contains(informerType) {
f.wg.Add(1)
// We need a new variable in each loop iteration,
// otherwise the goroutine would use the loop variable
// and that keeps changing.
informer := informer
go func() {
defer f.wg.Done()
informer.informer.Run(stopCh)
}()
f.startedInformers.Insert(informerType)
}
}
}
// WaitForCacheSync waits for all started informers' cache were synced.
func (f *informerFactory) WaitForCacheSync(stopCh <-chan struct{}) bool {
informers := func() []cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := make([]cache.SharedIndexInformer, 0, len(f.informers))
for informerKey, informer := range f.informers {
if f.startedInformers.Contains(informerKey) {
informers = append(informers, informer.informer)
}
}
return informers
}()
for _, informer := range informers {
if !cache.WaitForCacheSync(stopCh, informer.HasSynced) {
return false
}
}
return true
}
func (f *informerFactory) Shutdown() {
// Will return immediately if there is nothing to wait for.
defer f.wg.Wait()
f.lock.Lock()
defer f.lock.Unlock()
f.shuttingDown = true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package inject implements kube-inject or webhoook autoinject feature to inject sidecar.
// This file is focused on rewriting Kubernetes app probers to support mutual TLS.
package inject
import (
"encoding/json"
"strconv"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"istio.io/api/annotation"
"istio.io/istio/pilot/cmd/pilot-agent/status"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
)
// ShouldRewriteAppHTTPProbers returns if we should rewrite apps' probers config.
func ShouldRewriteAppHTTPProbers(annotations map[string]string, specSetting bool) bool {
if annotations != nil {
if value, ok := annotations[annotation.SidecarRewriteAppHTTPProbers.Name]; ok {
if isSetInAnnotation, err := strconv.ParseBool(value); err == nil {
return isSetInAnnotation
}
}
}
return specSetting
}
// FindSidecar returns the pointer to the first container whose name matches the "istio-proxy".
func FindSidecar(pod *corev1.Pod) *corev1.Container {
return FindContainerFromPod(ProxyContainerName, pod)
}
// FindContainerFromPod returns the pointer to the first container whose name matches in init containers or regular containers
func FindContainerFromPod(name string, pod *corev1.Pod) *corev1.Container {
if c := FindContainer(name, pod.Spec.Containers); c != nil {
return c
}
return FindContainer(name, pod.Spec.InitContainers)
}
// FindContainer returns the pointer to the first container whose name matches.
func FindContainer(name string, containers []corev1.Container) *corev1.Container {
for i := range containers {
if containers[i].Name == name {
return &containers[i]
}
}
return nil
}
// convertAppProber returns an overwritten `Probe` for pilot agent to take over.
func convertAppProber(probe *corev1.Probe, newURL string, statusPort int) *corev1.Probe {
if probe == nil {
return nil
}
if probe.HTTPGet != nil {
return convertAppProberHTTPGet(probe, newURL, statusPort)
} else if probe.TCPSocket != nil {
return convertAppProberTCPSocket(probe, newURL, statusPort)
} else if probe.GRPC != nil {
return convertAppProberGRPC(probe, newURL, statusPort)
}
return nil
}
// convertAppProberHTTPGet returns an overwritten `Probe` (HttpGet) for pilot agent to take over.
func convertAppProberHTTPGet(probe *corev1.Probe, newURL string, statusPort int) *corev1.Probe {
p := probe.DeepCopy()
// Change the application container prober config.
p.HTTPGet.Port = intstr.FromInt32(int32(statusPort))
p.HTTPGet.Path = newURL
// For HTTPS prober, we change to HTTP,
// and pilot agent uses https to request application prober endpoint.
// Kubelet -> HTTP -> Pilot Agent -> HTTPS -> Application
if p.HTTPGet.Scheme == corev1.URISchemeHTTPS {
p.HTTPGet.Scheme = corev1.URISchemeHTTP
}
return p
}
// convertAppProberTCPSocket returns an overwritten `Probe` (TcpSocket) for pilot agent to take over.
func convertAppProberTCPSocket(probe *corev1.Probe, newURL string, statusPort int) *corev1.Probe {
p := probe.DeepCopy()
// the sidecar intercepts all tcp connections, so we change it to a HTTP probe and the sidecar will check tcp
p.HTTPGet = &corev1.HTTPGetAction{}
p.HTTPGet.Port = intstr.FromInt32(int32(statusPort))
p.HTTPGet.Path = newURL
p.TCPSocket = nil
return p
}
// convertAppProberGRPC returns an overwritten `Probe` (gRPC) for pilot agent to take over.
func convertAppProberGRPC(probe *corev1.Probe, newURL string, statusPort int) *corev1.Probe {
p := probe.DeepCopy()
// the sidecar intercepts all gRPC connections, so we change it to a HTTP probe and the sidecar will check gRPC
p.HTTPGet = &corev1.HTTPGetAction{}
p.HTTPGet.Port = intstr.FromInt32(int32(statusPort))
p.HTTPGet.Path = newURL
// For gRPC prober, we change to HTTP,
// and pilot agent uses gRPC to request application prober endpoint.
// Kubelet -> HTTP -> Pilot Agent -> gRPC -> Application
p.GRPC = nil
return p
}
type KubeAppProbers map[string]*Prober
// Prober represents a single container prober
type Prober struct {
HTTPGet *corev1.HTTPGetAction `json:"httpGet,omitempty"`
TCPSocket *corev1.TCPSocketAction `json:"tcpSocket,omitempty"`
GRPC *corev1.GRPCAction `json:"grpc,omitempty"`
TimeoutSeconds int32 `json:"timeoutSeconds,omitempty"`
}
// DumpAppProbers returns a json encoded string as `status.KubeAppProbers`.
// Also update the probers so that all usages of named port will be resolved to integer.
func DumpAppProbers(pod *corev1.Pod, targetPort int32) string {
out := KubeAppProbers{}
updateNamedPort := func(p *Prober, portMap map[string]int32) *Prober {
if p == nil {
return nil
}
if p.GRPC != nil {
// don't need to update for gRPC probe port as it only supports integer
return p
}
if p.HTTPGet == nil && p.TCPSocket == nil {
return nil
}
var probePort *intstr.IntOrString
if p.HTTPGet != nil {
probePort = &p.HTTPGet.Port
} else {
probePort = &p.TCPSocket.Port
}
if probePort.Type == intstr.String {
port, exists := portMap[probePort.StrVal]
if !exists {
return nil
}
*probePort = intstr.FromInt32(port)
} else if probePort.IntVal == targetPort {
// Already is rewritten
return nil
}
return p
}
for _, c := range allContainers(pod) {
if c.Name == ProxyContainerName {
continue
}
readyz, livez, startupz := status.FormatProberURL(c.Name)
portMap := map[string]int32{}
for _, p := range c.Ports {
if p.Name != "" {
portMap[p.Name] = p.ContainerPort
}
}
if h := updateNamedPort(kubeProbeToInternalProber(c.ReadinessProbe), portMap); h != nil {
out[readyz] = h
}
if h := updateNamedPort(kubeProbeToInternalProber(c.LivenessProbe), portMap); h != nil {
out[livez] = h
}
if h := updateNamedPort(kubeProbeToInternalProber(c.StartupProbe), portMap); h != nil {
out[startupz] = h
}
}
// prevent generate '{}'
if len(out) == 0 {
return ""
}
b, err := json.Marshal(out)
if err != nil {
log.Errorf("failed to serialize the app prober config %v", err)
return ""
}
return string(b)
}
func allContainers(pod *corev1.Pod) []corev1.Container {
return append(slices.Clone(pod.Spec.InitContainers), pod.Spec.Containers...)
}
// patchRewriteProbe generates the patch for webhook.
func patchRewriteProbe(annotations map[string]string, pod *corev1.Pod, defaultPort int32) {
statusPort := int(defaultPort)
if v, f := annotations[annotation.SidecarStatusPort.Name]; f {
p, err := strconv.Atoi(v)
if err != nil {
log.Errorf("Invalid annotation %v=%v: %v", annotation.SidecarStatusPort.Name, v, err)
}
statusPort = p
}
for i, c := range pod.Spec.Containers {
// Skip sidecar container.
if c.Name == ProxyContainerName {
continue
}
convertProbe(&c, statusPort)
pod.Spec.Containers[i] = c
}
for i, c := range pod.Spec.InitContainers {
// Skip sidecar container.
if c.Name == ProxyContainerName {
continue
}
convertProbe(&c, statusPort)
pod.Spec.InitContainers[i] = c
}
}
func convertProbe(c *corev1.Container, statusPort int) {
readyz, livez, startupz := status.FormatProberURL(c.Name)
if probePatch := convertAppProber(c.ReadinessProbe, readyz, statusPort); probePatch != nil {
c.ReadinessProbe = probePatch
}
if probePatch := convertAppProber(c.LivenessProbe, livez, statusPort); probePatch != nil {
c.LivenessProbe = probePatch
}
if probePatch := convertAppProber(c.StartupProbe, startupz, statusPort); probePatch != nil {
c.StartupProbe = probePatch
}
}
// kubeProbeToInternalProber converts a Kubernetes Probe to an Istio internal Prober
func kubeProbeToInternalProber(probe *corev1.Probe) *Prober {
if probe == nil {
return nil
}
if probe.HTTPGet != nil {
return &Prober{
HTTPGet: probe.HTTPGet,
TimeoutSeconds: probe.TimeoutSeconds,
}
}
if probe.TCPSocket != nil {
return &Prober{
TCPSocket: probe.TCPSocket,
TimeoutSeconds: probe.TimeoutSeconds,
}
}
if probe.GRPC != nil {
return &Prober{
GRPC: probe.GRPC,
TimeoutSeconds: probe.TimeoutSeconds,
}
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package inject
import (
openshiftv1 "github.com/openshift/api/apps/v1"
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/util/sets"
)
// IgnoredNamespaces contains the system namespaces referenced from Kubernetes:
// Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/#viewing-namespaces
// "kube-system": The namespace for objects created by the Kubernetes system.
// "kube-public": This namespace is mostly reserved for cluster usage.
// "kube-node-lease": This namespace for the lease objects associated with each node
// which improves the performance of the node heartbeats as the cluster scales.
// "local-path-storage": Dynamically provisioning persistent local storage with Kubernetes.
//
// used with Kind cluster: https://github.com/rancher/local-path-provisioner
var IgnoredNamespaces = sets.New(
constants.KubeSystemNamespace,
constants.KubePublicNamespace,
constants.KubeNodeLeaseNamespace,
constants.LocalPathStorageNamespace)
var (
kinds = []struct {
groupVersion schema.GroupVersion
obj runtime.Object
resource string
apiPath string
}{
{v1.SchemeGroupVersion, &v1.ReplicationController{}, "replicationcontrollers", "/api"},
{v1.SchemeGroupVersion, &v1.Pod{}, "pods", "/api"},
{appsv1.SchemeGroupVersion, &appsv1.Deployment{}, "deployments", "/apis"},
{appsv1.SchemeGroupVersion, &appsv1.DaemonSet{}, "daemonsets", "/apis"},
{appsv1.SchemeGroupVersion, &appsv1.ReplicaSet{}, "replicasets", "/apis"},
{batchv1.SchemeGroupVersion, &batchv1.Job{}, "jobs", "/apis"},
{batchv1.SchemeGroupVersion, &batchv1.CronJob{}, "cronjobs", "/apis"},
{appsv1.SchemeGroupVersion, &appsv1.StatefulSet{}, "statefulsets", "/apis"},
{v1.SchemeGroupVersion, &v1.List{}, "lists", "/apis"},
{openshiftv1.GroupVersion, &openshiftv1.DeploymentConfig{}, "deploymentconfigs", "/apis"},
}
injectScheme = runtime.NewScheme()
)
func init() {
for _, kind := range kinds {
injectScheme.AddKnownTypes(kind.groupVersion, kind.obj)
injectScheme.AddUnversionedTypes(kind.groupVersion, kind.obj)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package inject
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"text/template"
"github.com/Masterminds/sprig/v3"
customBytes "github.com/AdamKorcz/bugdetectors/bytes"
jsonpatch "github.com/evanphx/json-patch/v5"
appsv1 "k8s.io/api/apps/v1"
batch "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
yamlDecoder "k8s.io/apimachinery/pkg/util/yaml"
"sigs.k8s.io/yaml"
"istio.io/api/annotation"
"istio.io/api/label"
meshconfig "istio.io/api/mesh/v1alpha1"
proxyConfig "istio.io/api/networking/v1beta1"
opconfig "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/log"
"istio.io/istio/tools/istio-iptables/pkg/constants"
)
// InjectionPolicy determines the policy for injecting the
// sidecar proxy into the watched namespace(s).
type InjectionPolicy string
const (
// InjectionPolicyDisabled specifies that the sidecar injector
// will not inject the sidecar into resources by default for the
// namespace(s) being watched. Resources can enable injection
// using the "sidecar.istio.io/inject" annotation with value of
// true.
InjectionPolicyDisabled InjectionPolicy = "disabled"
// InjectionPolicyEnabled specifies that the sidecar injector will
// inject the sidecar into resources by default for the
// namespace(s) being watched. Resources can disable injection
// using the "sidecar.istio.io/inject" annotation with value of
// false.
InjectionPolicyEnabled InjectionPolicy = "enabled"
)
const (
// ProxyContainerName is used by e2e integration tests for fetching logs
ProxyContainerName = "istio-proxy"
// ValidationContainerName is the name of the init container that validates
// if CNI has made the necessary changes to iptables
ValidationContainerName = "istio-validation"
// InitContainerName is the name of the init container that deploys iptables
InitContainerName = "istio-init"
// EnableCoreDumpName is the name of the init container that allows core dumps
EnableCoreDumpName = "enable-core-dump"
)
const (
// ImageTypeDebug is the suffix of the debug image.
ImageTypeDebug = "debug"
// ImageTypeDistroless is the suffix of the distroless image.
ImageTypeDistroless = "distroless"
// ImageTypeDefault is the type name of the default image, sufix is elided.
ImageTypeDefault = "default"
)
// SidecarTemplateData is the data object to which the templated
// version of `SidecarInjectionSpec` is applied.
type SidecarTemplateData struct {
TypeMeta metav1.TypeMeta
DeploymentMeta metav1.ObjectMeta
ObjectMeta metav1.ObjectMeta
Spec corev1.PodSpec
ProxyConfig *meshconfig.ProxyConfig
MeshConfig *meshconfig.MeshConfig
Values map[string]any
Revision string
ProxyImage string
ProxyUID int64
ProxyGID int64
InboundTrafficPolicyMode string
}
type (
Template *corev1.Pod
RawTemplates map[string]string
Templates map[string]*template.Template
)
type Injector interface {
Inject(pod *corev1.Pod, namespace string) ([]byte, error)
}
// Config specifies the sidecar injection configuration This includes
// the sidecar template and cluster-side injection policy. It is used
// by kube-inject, sidecar injector, and http endpoint.
type Config struct {
Policy InjectionPolicy `json:"policy"`
// DefaultTemplates defines the default template to use for pods that do not explicitly specify a template
DefaultTemplates []string `json:"defaultTemplates"`
// RawTemplates defines a set of templates to be used. The specified template will be run, provided with
// SidecarTemplateData, and merged with the original pod spec using a strategic merge patch.
RawTemplates RawTemplates `json:"templates"`
// Aliases defines a translation of a name to inject template. For example, `sidecar: [proxy,init]` could allow
// referencing two templates, "proxy" and "init" by a single name, "sidecar".
// Expansion is not recursive.
Aliases map[string][]string `json:"aliases"`
// NeverInjectSelector: Refuses the injection on pods whose labels match this selector.
// It's an array of label selectors, that will be OR'ed, meaning we will iterate
// over it and stop at the first match
// Takes precedence over AlwaysInjectSelector.
NeverInjectSelector []metav1.LabelSelector `json:"neverInjectSelector"`
// AlwaysInjectSelector: Forces the injection on pods whose labels match this selector.
// It's an array of label selectors, that will be OR'ed, meaning we will iterate
// over it and stop at the first match
AlwaysInjectSelector []metav1.LabelSelector `json:"alwaysInjectSelector"`
// InjectedAnnotations are additional annotations that will be added to the pod spec after injection
// This is primarily to support PSP annotations.
InjectedAnnotations map[string]string `json:"injectedAnnotations"`
// Templates is a pre-parsed copy of RawTemplates
Templates Templates `json:"-"`
}
const (
SidecarTemplateName = "sidecar"
)
// UnmarshalConfig unmarshals the provided YAML configuration, while normalizing the resulting configuration
func UnmarshalConfig(yml []byte) (Config, error) {
var injectConfig Config
if err := yaml.Unmarshal(yml, &injectConfig); err != nil {
return injectConfig, fmt.Errorf("failed to unmarshal injection template: %v", err)
}
if injectConfig.RawTemplates == nil {
injectConfig.RawTemplates = make(map[string]string)
}
if len(injectConfig.DefaultTemplates) == 0 {
injectConfig.DefaultTemplates = []string{SidecarTemplateName}
}
if len(injectConfig.RawTemplates) == 0 {
log.Warnf("injection templates are empty." +
" This may be caused by using an injection template from an older version of Istio." +
" Please ensure the template is correct; mismatch template versions can lead to unexpected results, including pods not being injected.")
}
var err error
injectConfig.Templates, err = ParseTemplates(injectConfig.RawTemplates)
if err != nil {
return injectConfig, err
}
return injectConfig, nil
}
func injectRequired(ignored []string, config *Config, podSpec *corev1.PodSpec, metadata metav1.ObjectMeta) bool { // nolint: lll
// Skip injection when host networking is enabled. The problem is
// that the iptables changes are assumed to be within the pod when,
// in fact, they are changing the routing at the host level. This
// often results in routing failures within a node which can
// affect the network provider within the cluster causing
// additional pod failures.
if podSpec.HostNetwork {
return false
}
// skip special kubernetes system namespaces
for _, namespace := range ignored {
if metadata.Namespace == namespace {
return false
}
}
annos := metadata.GetAnnotations()
var useDefault bool
var inject bool
objectSelector := annos[annotation.SidecarInject.Name]
if lbl, labelPresent := metadata.GetLabels()[label.SidecarInject.Name]; labelPresent {
// The label is the new API; if both are present we prefer the label
objectSelector = lbl
}
switch strings.ToLower(objectSelector) {
// http://yaml.org/type/bool.html
case "y", "yes", "true", "on":
inject = true
case "":
useDefault = true
}
// If an annotation is not explicitly given, check the LabelSelectors, starting with NeverInject
if useDefault {
for _, neverSelector := range config.NeverInjectSelector {
selector, err := metav1.LabelSelectorAsSelector(&neverSelector)
if err != nil {
log.Warnf("Invalid selector for NeverInjectSelector: %v (%v)", neverSelector, err)
} else if !selector.Empty() && selector.Matches(labels.Set(metadata.Labels)) {
log.Debugf("Explicitly disabling injection for pod %s/%s due to pod labels matching NeverInjectSelector config map entry.",
metadata.Namespace, potentialPodName(metadata))
inject = false
useDefault = false
break
}
}
}
// If there's no annotation nor a NeverInjectSelector, check the AlwaysInject one
if useDefault {
for _, alwaysSelector := range config.AlwaysInjectSelector {
selector, err := metav1.LabelSelectorAsSelector(&alwaysSelector)
if err != nil {
log.Warnf("Invalid selector for AlwaysInjectSelector: %v (%v)", alwaysSelector, err)
} else if !selector.Empty() && selector.Matches(labels.Set(metadata.Labels)) {
log.Debugf("Explicitly enabling injection for pod %s/%s due to pod labels matching AlwaysInjectSelector config map entry.",
metadata.Namespace, potentialPodName(metadata))
inject = true
useDefault = false
break
}
}
}
var required bool
switch config.Policy {
default: // InjectionPolicyOff
log.Errorf("Illegal value for autoInject:%s, must be one of [%s,%s]. Auto injection disabled!",
config.Policy, InjectionPolicyDisabled, InjectionPolicyEnabled)
required = false
case InjectionPolicyDisabled:
if useDefault {
required = false
} else {
required = inject
}
case InjectionPolicyEnabled:
if useDefault {
required = true
} else {
required = inject
}
}
if log.DebugEnabled() {
// Build a log message for the annotations.
annotationStr := ""
for name := range AnnotationValidation {
value, ok := annos[name]
if !ok {
value = "(unset)"
}
annotationStr += fmt.Sprintf("%s:%s ", name, value)
}
log.Debugf("Sidecar injection policy for %v/%v: namespacePolicy:%v useDefault:%v inject:%v required:%v %s",
metadata.Namespace,
potentialPodName(metadata),
config.Policy,
useDefault,
inject,
required,
annotationStr)
}
return required
}
// ProxyImage constructs image url in a backwards compatible way.
// values based name => {{ .Values.global.hub }}/{{ .Values.global.proxy.image }}:{{ .Values.global.tag }}
func ProxyImage(values *opconfig.Values, image *proxyConfig.ProxyImage, annotations map[string]string) string {
imageName := "proxyv2"
global := values.GetGlobal()
tag := ""
if global.GetTag() != nil { // Tag is an interface but we need the string form.
tag = fmt.Sprintf("%v", global.GetTag().AsInterface())
}
imageType := ""
if image != nil {
imageType = image.ImageType
}
if global.GetProxy() != nil && global.GetProxy().GetImage() != "" {
imageName = global.GetProxy().GetImage()
}
if it, ok := annotations[annotation.SidecarProxyImageType.Name]; ok {
imageType = it
}
return imageURL(global.GetHub(), imageName, tag, imageType)
}
func InboundTrafficPolicyMode(meshConfig *meshconfig.MeshConfig) string {
switch meshConfig.GetInboundTrafficPolicy().GetMode() {
case meshconfig.MeshConfig_InboundTrafficPolicy_LOCALHOST:
return "localhost"
case meshconfig.MeshConfig_InboundTrafficPolicy_PASSTHROUGH:
return "passthrough"
}
return "passthrough"
}
// imageURL creates url from parts.
// imageType is appended if not empty
// if imageType is already present in the tag, then it is replaced.
// docker.io/istio/proxyv2:1.12-distroless
// gcr.io/gke-release/asm/proxyv2:1.11.2-asm.17-distroless
// docker.io/istio/proxyv2:1.12
func imageURL(hub, imageName, tag, imageType string) string {
return hub + "/" + imageName + ":" + updateImageTypeIfPresent(tag, imageType)
}
// KnownImageTypes are image types that istio pubishes.
var KnownImageTypes = []string{ImageTypeDistroless, ImageTypeDebug}
func updateImageTypeIfPresent(tag string, imageType string) string {
if imageType == "" {
return tag
}
for _, i := range KnownImageTypes {
if strings.HasSuffix(tag, "-"+i) {
tag = tag[:len(tag)-(len(i)+1)]
break
}
}
if imageType == ImageTypeDefault {
return tag
}
return tag + "-" + imageType
}
// RunTemplate renders the sidecar template
// Returns the raw string template, as well as the parse pod form
func RunTemplate(params InjectionParameters) (mergedPod *corev1.Pod, templatePod *corev1.Pod, err error) {
metadata := ¶ms.pod.ObjectMeta
meshConfig := params.meshConfig
if err := validateAnnotations(metadata.GetAnnotations()); err != nil {
log.Errorf("Injection failed due to invalid annotations: %v", err)
return nil, nil, err
}
cluster := params.valuesConfig.asStruct.GetGlobal().GetMultiCluster().GetClusterName()
// TODO allow overriding the values.global network in injection with the system namespace label
network := params.valuesConfig.asStruct.GetGlobal().GetNetwork()
// params may be set from webhook URL, take priority over values yaml
if params.proxyEnvs["ISTIO_META_CLUSTER_ID"] != "" {
cluster = params.proxyEnvs["ISTIO_META_CLUSTER_ID"]
}
if params.proxyEnvs["ISTIO_META_NETWORK"] != "" {
network = params.proxyEnvs["ISTIO_META_NETWORK"]
}
// explicit label takes highest precedence
if n, ok := metadata.Labels[label.TopologyNetwork.Name]; ok {
network = n
}
// use network in values for template, and proxy env variables
if cluster != "" {
params.proxyEnvs["ISTIO_META_CLUSTER_ID"] = cluster
}
if network != "" {
params.proxyEnvs["ISTIO_META_NETWORK"] = network
}
strippedPod, err := reinsertOverrides(stripPod(params))
if err != nil {
return nil, nil, err
}
proxyUID, proxyGID := GetProxyIDs(params.namespace)
data := SidecarTemplateData{
TypeMeta: params.typeMeta,
DeploymentMeta: params.deployMeta,
ObjectMeta: strippedPod.ObjectMeta,
Spec: strippedPod.Spec,
ProxyConfig: params.proxyConfig,
MeshConfig: meshConfig,
Values: params.valuesConfig.asMap,
Revision: params.revision,
ProxyImage: ProxyImage(params.valuesConfig.asStruct, params.proxyConfig.Image, strippedPod.Annotations),
ProxyUID: proxyUID,
ProxyGID: proxyGID,
InboundTrafficPolicyMode: InboundTrafficPolicyMode(meshConfig),
}
mergedPod = params.pod
templatePod = &corev1.Pod{}
for _, templateName := range selectTemplates(params) {
parsedTemplate, f := params.templates[templateName]
if !f {
return nil, nil, fmt.Errorf("requested template %q not found; have %v",
templateName, strings.Join(knownTemplates(params.templates), ", "))
}
bbuf, err := runTemplate(parsedTemplate, data)
if err != nil {
return nil, nil, err
}
templatePod, err = applyOverlayYAML(templatePod, customBytes.CheckLen(bbuf.Bytes(), "/src/istio/pkg/kube/inject/inject.go:442:52 (May be slightly inaccurate) NEW_LINEbbuf.Bytes()"))
if err != nil {
return nil, nil, fmt.Errorf("failed applying injection overlay: %v", err)
}
// This is a bit of a weird hack. With NativeSidecars, the container will be under initContainers in the template pod.
// But we may have injection customizations (https://istio.io/latest/docs/setup/additional-setup/sidecar-injection/#customizing-injection);
// these will be in the `containers` field.
// So if we see the proxy container in `containers` in the original pod, and in `initContainers` in the template pod,
// move the container.
if features.EnableNativeSidecars.Get() &&
FindContainer(ProxyContainerName, templatePod.Spec.InitContainers) != nil &&
FindContainer(ProxyContainerName, mergedPod.Spec.Containers) != nil {
mergedPod = mergedPod.DeepCopy()
mergedPod.Spec.Containers, mergedPod.Spec.InitContainers = moveContainer(mergedPod.Spec.Containers, mergedPod.Spec.InitContainers, ProxyContainerName)
}
mergedPod, err = applyOverlayYAML(mergedPod, customBytes.CheckLen(bbuf.Bytes(), "/src/istio/pkg/kube/inject/inject.go:457:48 (May be slightly inaccurate) NEW_LINEbbuf.Bytes()"))
if err != nil {
return nil, nil, fmt.Errorf("failed parsing generated injected YAML (check Istio sidecar injector configuration): %v", err)
}
}
return mergedPod, templatePod, nil
}
func knownTemplates(t Templates) []string {
keys := make([]string, 0, len(t))
for k := range t {
keys = append(keys, k)
}
return keys
}
func selectTemplates(params InjectionParameters) []string {
if a, f := params.pod.Annotations[annotation.InjectTemplates.Name]; f {
names := []string{}
for _, tmplName := range strings.Split(a, ",") {
name := strings.TrimSpace(tmplName)
names = append(names, name)
}
return resolveAliases(params, names)
}
return resolveAliases(params, params.defaultTemplate)
}
func resolveAliases(params InjectionParameters, names []string) []string {
ret := []string{}
for _, name := range names {
if al, f := params.aliases[name]; f {
ret = append(ret, al...)
} else {
ret = append(ret, name)
}
}
return ret
}
func stripPod(req InjectionParameters) *corev1.Pod {
pod := req.pod.DeepCopy()
prevStatus := injectionStatus(pod)
if prevStatus == nil {
return req.pod
}
// We found a previous status annotation. Possibly we are re-injecting the pod
// To ensure idempotency, remove our injected containers first
for _, c := range prevStatus.Containers {
pod.Spec.Containers = modifyContainers(pod.Spec.Containers, c, Remove)
}
for _, c := range prevStatus.InitContainers {
pod.Spec.InitContainers = modifyContainers(pod.Spec.InitContainers, c, Remove)
}
targetPort := strconv.Itoa(int(req.meshConfig.GetDefaultConfig().GetStatusPort()))
if cur, f := getPrometheusPort(pod); f {
// We have already set the port, assume user is controlling this or, more likely, re-injected
// the pod.
if cur == targetPort {
clearPrometheusAnnotations(pod)
}
}
delete(pod.Annotations, annotation.SidecarStatus.Name)
return pod
}
func injectionStatus(pod *corev1.Pod) *SidecarInjectionStatus {
var statusBytes []byte
if pod.ObjectMeta.Annotations != nil {
if value, ok := pod.ObjectMeta.Annotations[annotation.SidecarStatus.Name]; ok {
statusBytes = []byte(value)
}
}
if statusBytes == nil {
return nil
}
// default case when injected pod has explicit status
var iStatus SidecarInjectionStatus
if err := json.Unmarshal(statusBytes, &iStatus); err != nil {
return nil
}
return &iStatus
}
func parseDryTemplate(tmplStr string, funcMap map[string]any) (*template.Template, error) {
temp := template.New("inject")
t, err := temp.Funcs(sprig.TxtFuncMap()).Funcs(funcMap).Parse(tmplStr)
if err != nil {
log.Infof("Failed to parse template: %v %v\n", err, tmplStr)
return nil, err
}
return t, nil
}
func runTemplate(tmpl *template.Template, data SidecarTemplateData) (bytes.Buffer, error) {
var res bytes.Buffer
if err := tmpl.Execute(&res, &data); err != nil {
log.Errorf("Invalid template: %v", err)
return bytes.Buffer{}, err
}
return res, nil
}
// IntoResourceFile injects the istio proxy into the specified
// kubernetes YAML file.
// nolint: lll
func IntoResourceFile(injector Injector, sidecarTemplate Templates,
valuesConfig ValuesConfig, revision string, meshconfig *meshconfig.MeshConfig, in io.Reader, out io.Writer, warningHandler func(string),
) error {
reader := yamlDecoder.NewYAMLReader(bufio.NewReaderSize(in, 4096))
for {
raw, err := reader.Read()
if err == io.EOF {
break
}
if err != nil {
return err
}
obj, err := FromRawToObject(raw)
if err != nil && !runtime.IsNotRegisteredError(err) {
return err
}
var updated []byte
if err == nil {
outObject, err := IntoObject(injector, sidecarTemplate, valuesConfig, revision, meshconfig, obj, warningHandler) // nolint: vetshadow
if err != nil {
return err
}
if updated, err = yaml.Marshal(outObject); err != nil {
return err
}
} else {
updated = raw // unchanged
}
if _, err = out.Write(updated); err != nil {
return err
}
if _, err = fmt.Fprint(out, "---\n"); err != nil {
return err
}
}
return nil
}
// FromRawToObject is used to convert from raw to the runtime object
func FromRawToObject(raw []byte) (runtime.Object, error) {
var typeMeta metav1.TypeMeta
if err := yaml.Unmarshal(raw, &typeMeta); err != nil {
return nil, err
}
gvk := schema.FromAPIVersionAndKind(typeMeta.APIVersion, typeMeta.Kind)
obj, err := injectScheme.New(gvk)
if err != nil {
return nil, err
}
if err = yaml.Unmarshal(raw, obj); err != nil {
return nil, err
}
return obj, nil
}
// IntoObject convert the incoming resources into Injected resources
// nolint: lll
func IntoObject(injector Injector, sidecarTemplate Templates, valuesConfig ValuesConfig,
revision string, meshconfig *meshconfig.MeshConfig, in runtime.Object, warningHandler func(string),
) (any, error) {
out := in.DeepCopyObject()
var deploymentMetadata metav1.ObjectMeta
var metadata *metav1.ObjectMeta
var podSpec *corev1.PodSpec
var typeMeta metav1.TypeMeta
// Handle Lists
if list, ok := out.(*corev1.List); ok {
result := list
for i, item := range list.Items {
obj, err := FromRawToObject(item.Raw)
if runtime.IsNotRegisteredError(err) {
continue
}
if err != nil {
return nil, err
}
r, err := IntoObject(injector, sidecarTemplate, valuesConfig, revision, meshconfig, obj, warningHandler) // nolint: vetshadow
if err != nil {
return nil, err
}
re := runtime.RawExtension{}
re.Object = r.(runtime.Object)
result.Items[i] = re
}
return result, nil
}
// CronJobs have JobTemplates in them, instead of Templates, so we
// special case them.
switch v := out.(type) {
case *batch.CronJob:
job := v
typeMeta = job.TypeMeta
metadata = &job.Spec.JobTemplate.ObjectMeta
deploymentMetadata = job.ObjectMeta
podSpec = &job.Spec.JobTemplate.Spec.Template.Spec
case *corev1.Pod:
pod := v
metadata = &pod.ObjectMeta
// sync from webhook inject
deploymentMetadata, typeMeta = kube.GetDeployMetaFromPod(pod)
podSpec = &pod.Spec
case *appsv1.Deployment: // Added to be explicit about the most expected case
deploy := v
typeMeta = deploy.TypeMeta
deploymentMetadata = deploy.ObjectMeta
metadata = &deploy.Spec.Template.ObjectMeta
podSpec = &deploy.Spec.Template.Spec
default:
// `in` is a pointer to an Object. Dereference it.
outValue := reflect.ValueOf(out).Elem()
typeMeta = outValue.FieldByName("TypeMeta").Interface().(metav1.TypeMeta)
deploymentMetadata = outValue.FieldByName("ObjectMeta").Interface().(metav1.ObjectMeta)
templateValue := outValue.FieldByName("Spec").FieldByName("Template")
// `Template` is defined as a pointer in some older API
// definitions, e.g. ReplicationController
if templateValue.Kind() == reflect.Ptr {
if templateValue.IsNil() {
return out, fmt.Errorf("spec.template is required value")
}
templateValue = templateValue.Elem()
}
metadata = templateValue.FieldByName("ObjectMeta").Addr().Interface().(*metav1.ObjectMeta)
podSpec = templateValue.FieldByName("Spec").Addr().Interface().(*corev1.PodSpec)
}
name := metadata.Name
if name == "" {
name = deploymentMetadata.Name
}
namespace := metadata.Namespace
if namespace == "" {
namespace = deploymentMetadata.Namespace
}
var fullName string
if deploymentMetadata.Namespace != "" {
fullName = fmt.Sprintf("%s/%s", deploymentMetadata.Namespace, name)
} else {
fullName = name
}
kind := typeMeta.Kind
// Skip injection when host networking is enabled. The problem is
// that the iptable changes are assumed to be within the pod when,
// in fact, they are changing the routing at the host level. This
// often results in routing failures within a node which can
// affect the network provider within the cluster causing
// additional pod failures.
if podSpec.HostNetwork {
warningStr := fmt.Sprintf("===> Skipping injection because %q has host networking enabled\n",
fullName)
if kind != "" {
warningStr = fmt.Sprintf("===> Skipping injection because %s %q has host networking enabled\n",
kind, fullName)
}
warningHandler(warningStr)
return out, nil
}
pod := &corev1.Pod{
ObjectMeta: *metadata,
Spec: *podSpec,
}
var patchBytes []byte
var err error
if injector != nil {
patchBytes, err = injector.Inject(pod, namespace)
}
if err != nil {
return nil, err
}
// TODO(Monkeyanator) istioctl injection still applies just the pod annotation since we don't have
// the ProxyConfig CRs here.
if pca, f := metadata.GetAnnotations()[annotation.ProxyConfig.Name]; f {
var merr error
meshconfig, merr = mesh.ApplyProxyConfig(pca, meshconfig)
if merr != nil {
return nil, merr
}
}
if patchBytes == nil {
if !injectRequired(IgnoredNamespaces.UnsortedList(), &Config{Policy: InjectionPolicyEnabled}, &pod.Spec, pod.ObjectMeta) {
warningStr := fmt.Sprintf("===> Skipping injection because %q has sidecar injection disabled\n", fullName)
if kind != "" {
warningStr = fmt.Sprintf("===> Skipping injection because %s %q has sidecar injection disabled\n",
kind, fullName)
}
warningHandler(warningStr)
return out, nil
}
params := InjectionParameters{
pod: pod,
deployMeta: deploymentMetadata,
typeMeta: typeMeta,
// Todo replace with some template resolver abstraction
templates: sidecarTemplate,
defaultTemplate: []string{SidecarTemplateName},
meshConfig: meshconfig,
proxyConfig: meshconfig.GetDefaultConfig(),
valuesConfig: valuesConfig,
revision: revision,
proxyEnvs: map[string]string{},
injectedAnnotations: nil,
}
patchBytes, err = injectPod(params)
}
if err != nil {
return nil, err
}
patched, err := applyJSONPatchToPod(pod, patchBytes)
if err != nil {
return nil, err
}
patchedObject, _, err := jsonSerializer.Decode(patched, nil, &corev1.Pod{})
if err != nil {
return nil, err
}
patchedPod := patchedObject.(*corev1.Pod)
*metadata = patchedPod.ObjectMeta
*podSpec = patchedPod.Spec
return out, nil
}
func applyJSONPatchToPod(input *corev1.Pod, patch []byte) ([]byte, error) {
objJS, err := runtime.Encode(jsonSerializer, input)
if err != nil {
return nil, err
}
p, err := jsonpatch.DecodePatch(patch)
if err != nil {
return nil, err
}
patchedJSON, err := p.Apply(objJS)
if err != nil {
return nil, err
}
return patchedJSON, nil
}
// SidecarInjectionStatus contains basic information about the
// injected sidecar. This includes the names of added containers and
// volumes.
type SidecarInjectionStatus struct {
InitContainers []string `json:"initContainers"`
Containers []string `json:"containers"`
Volumes []string `json:"volumes"`
ImagePullSecrets []string `json:"imagePullSecrets"`
Revision string `json:"revision"`
}
func potentialPodName(metadata metav1.ObjectMeta) string {
if metadata.Name != "" {
return metadata.Name
}
if metadata.GenerateName != "" {
return metadata.GenerateName + "***** (actual name not yet known)"
}
return ""
}
// overwriteClusterInfo updates cluster name and network from url path
// This is needed when webconfig config runs on a different cluster than webhook
func overwriteClusterInfo(pod *corev1.Pod, params InjectionParameters) {
c := FindSidecar(pod)
if c == nil {
return
}
if len(params.proxyEnvs) > 0 {
log.Debugf("Updating cluster envs based on inject url: %s\n", params.proxyEnvs)
updateClusterEnvs(c, params.proxyEnvs)
}
}
func updateClusterEnvs(container *corev1.Container, newKVs map[string]string) {
envVars := make([]corev1.EnvVar, 0)
for _, env := range container.Env {
if _, found := newKVs[env.Name]; !found {
envVars = append(envVars, env)
}
}
keys := make([]string, 0, len(newKVs))
for key := range newKVs {
keys = append(keys, key)
}
sort.Strings(keys)
for _, key := range keys {
val := newKVs[key]
envVars = append(envVars, corev1.EnvVar{Name: key, Value: val, ValueFrom: nil})
}
container.Env = envVars
}
// GetProxyIDs returns the UID and GID to be used in the RunAsUser and RunAsGroup fields in the template
// Inspects the namespace metadata for hints and fallbacks to the usual value of 1337.
func GetProxyIDs(namespace *corev1.Namespace) (uid int64, gid int64) {
uid = constants.DefaultProxyUIDInt
gid = constants.DefaultProxyUIDInt
if namespace == nil {
return
}
// Check for OpenShift specifics and returns the max number in the range specified in the namespace annotation
if _, uidMax, err := getPreallocatedUIDRange(namespace); err == nil {
uid = *uidMax
}
if groups, err := getPreallocatedSupplementalGroups(namespace); err == nil && len(groups) > 0 {
gid = groups[0].Max
}
return
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Functions below were copied from
// https://github.com/openshift/apiserver-library-go/blob/c22aa58bb57416b9f9f190957d07c9e7669c26df/pkg/securitycontextconstraints/sccmatching/matcher.go
// These functions are not exported, and, if they were, when imported bring k8s.io/kubernetes as dependency, which is problematic
// License is Apache 2.0: https://github.com/openshift/apiserver-library-go/blob/c22aa58bb57416b9f9f190957d07c9e7669c26df/LICENSE
package inject
import (
"fmt"
"strings"
securityv1 "github.com/openshift/api/security/v1"
corev1 "k8s.io/api/core/v1"
"istio.io/istio/pkg/log"
)
// getPreallocatedUIDRange retrieves the annotated value from the namespace, splits it to make
// the min/max and formats the data into the necessary types for the strategy options.
func getPreallocatedUIDRange(ns *corev1.Namespace) (*int64, *int64, error) {
annotationVal, ok := ns.Annotations[securityv1.UIDRangeAnnotation]
if !ok {
return nil, nil, fmt.Errorf("unable to find annotation %s", securityv1.UIDRangeAnnotation)
}
if len(annotationVal) == 0 {
return nil, nil, fmt.Errorf("found annotation %s but it was empty", securityv1.UIDRangeAnnotation)
}
uidBlock, err := ParseBlock(annotationVal)
if err != nil {
return nil, nil, err
}
min := int64(uidBlock.Start)
max := int64(uidBlock.End)
log.Debugf("got preallocated values for min: %d, max: %d for uid range in namespace %s", min, max, ns.Name)
return &min, &max, nil
}
// getPreallocatedSupplementalGroups gets the annotated value from the namespace.
func getPreallocatedSupplementalGroups(ns *corev1.Namespace) ([]securityv1.IDRange, error) {
groups, err := getSupplementalGroupsAnnotation(ns)
if err != nil {
return nil, err
}
log.Debugf("got preallocated value for groups: %s in namespace %s", groups, ns.Name)
blocks, err := parseSupplementalGroupAnnotation(groups)
if err != nil {
return nil, err
}
idRanges := []securityv1.IDRange{}
for _, block := range blocks {
rng := securityv1.IDRange{
Min: int64(block.Start),
Max: int64(block.End),
}
idRanges = append(idRanges, rng)
}
return idRanges, nil
}
// getSupplementalGroupsAnnotation provides a backwards compatible way to get supplemental groups
// annotations from a namespace by looking for SupplementalGroupsAnnotation and falling back to
// UIDRangeAnnotation if it is not found.
func getSupplementalGroupsAnnotation(ns *corev1.Namespace) (string, error) {
groups, ok := ns.Annotations[securityv1.SupplementalGroupsAnnotation]
if !ok {
log.Debugf("unable to find supplemental group annotation %s falling back to %s", securityv1.SupplementalGroupsAnnotation, securityv1.UIDRangeAnnotation)
groups, ok = ns.Annotations[securityv1.UIDRangeAnnotation]
if !ok {
return "", fmt.Errorf("unable to find supplemental group or uid annotation for namespace %s", ns.Name)
}
}
if len(groups) == 0 {
return "", fmt.Errorf("unable to find groups using %s and %s annotations", securityv1.SupplementalGroupsAnnotation, securityv1.UIDRangeAnnotation)
}
return groups, nil
}
// parseSupplementalGroupAnnotation parses the group annotation into blocks.
func parseSupplementalGroupAnnotation(groups string) ([]Block, error) {
blocks := []Block{}
segments := strings.Split(groups, ",")
for _, segment := range segments {
block, err := ParseBlock(segment)
if err != nil {
return nil, err
}
blocks = append(blocks, block)
}
if len(blocks) == 0 {
return nil, fmt.Errorf("no blocks parsed from annotation %s", groups)
}
return blocks, nil
}
// Functions below were copied from
// https://github.com/openshift/library-go/blob/561433066966536ac17f3c9852d7d85f7b7e1e36/pkg/security/uid/uid.go
// Copied here to avoid bringing tons of dependencies
// License is Apache 2.0: https://github.com/openshift/library-go/blob/561433066966536ac17f3c9852d7d85f7b7e1e36/LICENSE
type Block struct {
Start uint32
End uint32
}
var (
ErrBlockSlashBadFormat = fmt.Errorf("block not in the format \"<start>/<size>\"")
ErrBlockDashBadFormat = fmt.Errorf("block not in the format \"<start>-<end>\"")
)
func ParseBlock(in string) (Block, error) {
if strings.Contains(in, "/") {
var start, size uint32
n, err := fmt.Sscanf(in, "%d/%d", &start, &size)
if err != nil {
return Block{}, err
}
if n != 2 {
return Block{}, ErrBlockSlashBadFormat
}
return Block{Start: start, End: start + size - 1}, nil
}
var start, end uint32
n, err := fmt.Sscanf(in, "%d-%d", &start, &end)
if err != nil {
return Block{}, err
}
if n != 2 {
return Block{}, ErrBlockDashBadFormat
}
return Block{Start: start, End: end}, nil
}
func (b Block) String() string {
return fmt.Sprintf("%d/%d", b.Start, b.Size())
}
func (b Block) RangeString() string {
return fmt.Sprintf("%d-%d", b.Start, b.End)
}
func (b Block) Size() uint32 {
return b.End - b.Start + 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package inject
import (
"encoding/json"
"fmt"
"os"
"path"
"strconv"
"strings"
"text/template"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/durationpb"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/protomarshal"
)
var InjectionFuncmap = createInjectionFuncmap()
func createInjectionFuncmap() template.FuncMap {
return template.FuncMap{
"formatDuration": formatDuration,
"isset": isset,
"excludeInboundPort": excludeInboundPort,
"includeInboundPorts": includeInboundPorts,
"kubevirtInterfaces": kubevirtInterfaces,
"excludeInterfaces": excludeInterfaces,
"applicationPorts": applicationPorts,
"annotation": getAnnotation,
"valueOrDefault": valueOrDefault,
"toJSON": toJSON,
"fromJSON": fromJSON,
"structToJSON": structToJSON,
"protoToJSON": protoToJSON,
"toYaml": toYaml,
"indent": indent,
"directory": directory,
"contains": flippedContains,
"toLower": strings.ToLower,
"appendMultusNetwork": appendMultusNetwork,
"env": env,
"omit": omit,
"strdict": strdict,
"toJsonMap": toJSONMap,
"mergeMaps": mergeMaps,
}
}
// Allows the template to use env variables from istiod.
// Istiod will use a custom template, without 'values.yaml', and the pod will have
// an optional 'vendor' configmap where additional settings can be defined.
func env(key string, def string) string {
val := os.Getenv(key)
if val == "" {
return def
}
return val
}
func formatDuration(in *durationpb.Duration) string {
return in.AsDuration().String()
}
func isset(m map[string]string, key string) bool {
_, ok := m[key]
return ok
}
func directory(filepath string) string {
dir, _ := path.Split(filepath)
return dir
}
func flippedContains(needle, haystack string) bool {
return strings.Contains(haystack, needle)
}
func excludeInboundPort(port any, excludedInboundPorts string) string {
portStr := strings.TrimSpace(fmt.Sprint(port))
if len(portStr) == 0 || portStr == "0" {
// Nothing to do.
return excludedInboundPorts
}
// Exclude the readiness port if not already excluded.
ports := splitPorts(excludedInboundPorts)
outPorts := make([]string, 0, len(ports))
for _, port := range ports {
if port == portStr {
// The port is already excluded.
return excludedInboundPorts
}
port = strings.TrimSpace(port)
if len(port) > 0 {
outPorts = append(outPorts, port)
}
}
// The port was not already excluded - exclude it now.
outPorts = append(outPorts, portStr)
return strings.Join(outPorts, ",")
}
func valueOrDefault(value any, defaultValue any) any {
if value == "" || value == nil {
return defaultValue
}
return value
}
func toJSON(m map[string]string) string {
if m == nil {
return "{}"
}
ba, err := json.Marshal(m)
if err != nil {
log.Warnf("Unable to marshal %v", m)
return "{}"
}
return string(ba)
}
func fromJSON(j string) any {
var m any
err := json.Unmarshal([]byte(j), &m)
if err != nil {
log.Warnf("Unable to unmarshal %s", j)
return "{}"
}
return m
}
func indent(spaces int, source string) string {
res := strings.Split(source, "\n")
for i, line := range res {
if i > 0 {
res[i] = fmt.Sprintf(fmt.Sprintf("%% %ds%%s", spaces), "", line)
}
}
return strings.Join(res, "\n")
}
func toYaml(value any) string {
y, err := yaml.Marshal(value)
if err != nil {
log.Warnf("Unable to marshal %v", value)
return ""
}
return string(y)
}
func getAnnotation(meta metav1.ObjectMeta, name string, defaultValue any) string {
value, ok := meta.Annotations[name]
if !ok {
value = fmt.Sprint(defaultValue)
}
return value
}
func appendMultusNetwork(existingValue, istioCniNetwork string) string {
if existingValue == "" {
return istioCniNetwork
}
i := strings.LastIndex(existingValue, "]")
isJSON := i != -1
if isJSON {
networks := []map[string]any{}
err := json.Unmarshal([]byte(existingValue), &networks)
if err != nil {
// existingValue is not valid JSON; nothing we can do but skip injection
log.Warnf("Unable to unmarshal Multus Network annotation JSON value: %v", err)
return existingValue
}
for _, net := range networks {
if net["name"] == istioCniNetwork {
return existingValue
}
}
return existingValue[0:i] + fmt.Sprintf(`, {"name": "%s"}`, istioCniNetwork) + existingValue[i:]
}
for _, net := range strings.Split(existingValue, ",") {
if strings.TrimSpace(net) == istioCniNetwork {
return existingValue
}
}
return existingValue + ", " + istioCniNetwork
}
// this function is no longer used by the template but kept around for backwards compatibility
func applicationPorts(containers []corev1.Container) string {
return getContainerPorts(containers, func(c corev1.Container) bool {
return c.Name != ProxyContainerName
})
}
func includeInboundPorts(containers []corev1.Container) string {
// Include the ports from all containers in the deployment.
return getContainerPorts(containers, func(corev1.Container) bool { return true })
}
func getPortsForContainer(container corev1.Container) []string {
parts := make([]string, 0)
for _, p := range container.Ports {
if p.Protocol == corev1.ProtocolUDP || p.Protocol == corev1.ProtocolSCTP {
continue
}
parts = append(parts, strconv.Itoa(int(p.ContainerPort)))
}
return parts
}
func getContainerPorts(containers []corev1.Container, shouldIncludePorts func(corev1.Container) bool) string {
parts := make([]string, 0)
for _, c := range containers {
if shouldIncludePorts(c) {
parts = append(parts, getPortsForContainer(c)...)
}
}
return strings.Join(parts, ",")
}
func kubevirtInterfaces(s string) string {
return s
}
func excludeInterfaces(s string) string {
return s
}
func structToJSON(v any) string {
if v == nil {
return "{}"
}
ba, err := json.Marshal(v)
if err != nil {
log.Warnf("Unable to marshal %v", v)
return "{}"
}
return string(ba)
}
func protoToJSON(v proto.Message) string {
v = cleanProxyConfig(v)
if v == nil {
return "{}"
}
ba, err := protomarshal.ToJSON(v)
if err != nil {
log.Warnf("Unable to marshal %v: %v", v, err)
return "{}"
}
return ba
}
// Rather than dump the entire proxy config, we remove fields that are default
// This makes the pod spec much smaller
// This is not comprehensive code, but nothing will break if this misses some fields
func cleanProxyConfig(msg proto.Message) proto.Message {
originalProxyConfig, ok := msg.(*meshconfig.ProxyConfig)
if !ok || originalProxyConfig == nil {
return msg
}
pc := proto.Clone(originalProxyConfig).(*meshconfig.ProxyConfig)
defaults := mesh.DefaultProxyConfig()
if pc.ConfigPath == defaults.ConfigPath {
pc.ConfigPath = ""
}
if pc.BinaryPath == defaults.BinaryPath {
pc.BinaryPath = ""
}
if pc.ControlPlaneAuthPolicy == defaults.ControlPlaneAuthPolicy {
pc.ControlPlaneAuthPolicy = 0
}
if x, ok := pc.GetClusterName().(*meshconfig.ProxyConfig_ServiceCluster); ok {
if x.ServiceCluster == defaults.GetClusterName().(*meshconfig.ProxyConfig_ServiceCluster).ServiceCluster {
pc.ClusterName = nil
}
}
if proto.Equal(pc.DrainDuration, defaults.DrainDuration) {
pc.DrainDuration = nil
}
if proto.Equal(pc.TerminationDrainDuration, defaults.TerminationDrainDuration) {
pc.TerminationDrainDuration = nil
}
if pc.DiscoveryAddress == defaults.DiscoveryAddress {
pc.DiscoveryAddress = ""
}
if proto.Equal(pc.EnvoyMetricsService, defaults.EnvoyMetricsService) {
pc.EnvoyMetricsService = nil
}
if proto.Equal(pc.EnvoyAccessLogService, defaults.EnvoyAccessLogService) {
pc.EnvoyAccessLogService = nil
}
if proto.Equal(pc.Tracing, defaults.Tracing) {
pc.Tracing = nil
}
if pc.ProxyAdminPort == defaults.ProxyAdminPort {
pc.ProxyAdminPort = 0
}
if pc.StatNameLength == defaults.StatNameLength {
pc.StatNameLength = 0
}
if pc.StatusPort == defaults.StatusPort {
pc.StatusPort = 0
}
if proto.Equal(pc.Concurrency, defaults.Concurrency) {
pc.Concurrency = nil
}
if len(pc.ProxyMetadata) == 0 {
pc.ProxyMetadata = nil
}
return proto.Message(pc)
}
func toJSONMap(mps ...map[string]string) string {
data, err := json.Marshal(mergeMaps(mps...))
if err != nil {
return ""
}
return string(data)
}
func omit(dict map[string]string, keys ...string) map[string]string {
res := map[string]string{}
omit := make(map[string]bool, len(keys))
for _, k := range keys {
omit[k] = true
}
for k, v := range dict {
if _, ok := omit[k]; !ok {
res[k] = v
}
}
return res
}
// strdict is the same as the "dict" function (http://masterminds.github.io/sprig/dicts.html)
// but returns a map[string]string instead of interface{} types. This allows it to be used
// in annotations/labels.
func strdict(v ...string) map[string]string {
dict := map[string]string{}
lenv := len(v)
for i := 0; i < lenv; i += 2 {
key := v[i]
if i+1 >= lenv {
dict[key] = ""
continue
}
dict[key] = v[i+1]
}
return dict
}
// Merge maps merges multiple maps. Latter maps take precedence over previous maps on overlapping fields
func mergeMaps(maps ...map[string]string) map[string]string {
if len(maps) == 0 {
return nil
}
res := make(map[string]string, len(maps[0]))
for _, m := range maps {
for k, v := range m {
res[k] = v
}
}
return res
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package inject
import (
"fmt"
"net/netip"
"strconv"
"strings"
"github.com/hashicorp/go-multierror"
"istio.io/api/annotation"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/validation"
"istio.io/istio/pkg/util/protomarshal"
)
type annotationValidationFunc func(value string) error
// per-sidecar policy and status
var (
AnnotationValidation = map[string]annotationValidationFunc{
annotation.SidecarInterceptionMode.Name: validateInterceptionMode,
annotation.SidecarEnableCoreDump.Name: validateBool,
annotation.SidecarStatusPort.Name: validateStatusPort,
annotation.SidecarStatusReadinessInitialDelaySeconds.Name: validateUInt32,
annotation.SidecarStatusReadinessPeriodSeconds.Name: validateUInt32,
annotation.SidecarStatusReadinessFailureThreshold.Name: validateUInt32,
annotation.SidecarTrafficIncludeOutboundIPRanges.Name: ValidateIncludeIPRanges,
annotation.SidecarTrafficExcludeOutboundIPRanges.Name: ValidateExcludeIPRanges,
annotation.SidecarTrafficIncludeInboundPorts.Name: ValidateIncludeInboundPorts,
annotation.SidecarTrafficExcludeInboundPorts.Name: ValidateExcludeInboundPorts,
annotation.SidecarTrafficExcludeOutboundPorts.Name: ValidateExcludeOutboundPorts,
annotation.PrometheusMergeMetrics.Name: validateBool,
annotation.ProxyConfig.Name: validateProxyConfig,
}
)
func validateProxyConfig(value string) error {
config := mesh.DefaultProxyConfig()
if err := protomarshal.ApplyYAML(value, config); err != nil {
return fmt.Errorf("failed to convert to apply proxy config: %v", err)
}
return validation.ValidateMeshConfigProxyConfig(config)
}
func validateAnnotations(annotations map[string]string) (err error) {
for name, value := range annotations {
if v, ok := AnnotationValidation[name]; ok {
if e := v(value); e != nil {
err = multierror.Append(err, fmt.Errorf("invalid value '%s' for annotation '%s': %v", value, name, e))
}
}
}
return
}
func validatePortList(parameterName, ports string) error {
if _, err := parsePorts(ports); err != nil {
return fmt.Errorf("%s invalid: %v", parameterName, err)
}
return nil
}
// validateInterceptionMode validates the interceptionMode annotation
func validateInterceptionMode(mode string) error {
switch mode {
case meshconfig.ProxyConfig_REDIRECT.String():
case meshconfig.ProxyConfig_TPROXY.String():
case string(model.InterceptionNone): // not a global mesh config - must be enabled for each sidecar
default:
return fmt.Errorf("interceptionMode invalid, use REDIRECT,TPROXY,NONE: %v", mode)
}
return nil
}
// ValidateIncludeIPRanges validates the includeIPRanges parameter
func ValidateIncludeIPRanges(ipRanges string) error {
if ipRanges != "*" {
if e := validateCIDRList(ipRanges); e != nil {
return fmt.Errorf("includeIPRanges invalid: %v", e)
}
}
return nil
}
// ValidateExcludeIPRanges validates the excludeIPRanges parameter
func ValidateExcludeIPRanges(ipRanges string) error {
if e := validateCIDRList(ipRanges); e != nil {
return fmt.Errorf("excludeIPRanges invalid: %v", e)
}
return nil
}
// ValidateIncludeInboundPorts validates the includeInboundPorts parameter
func ValidateIncludeInboundPorts(ports string) error {
if ports != "*" {
return validatePortList("includeInboundPorts", ports)
}
return nil
}
// ValidateExcludeInboundPorts validates the excludeInboundPorts parameter
func ValidateExcludeInboundPorts(ports string) error {
return validatePortList("excludeInboundPorts", ports)
}
// ValidateExcludeOutboundPorts validates the excludeOutboundPorts parameter
func ValidateExcludeOutboundPorts(ports string) error {
return validatePortList("excludeOutboundPorts", ports)
}
// validateStatusPort validates the statusPort parameter
func validateStatusPort(port string) error {
if _, e := parsePort(port); e != nil {
return fmt.Errorf("excludeInboundPorts invalid: %v", e)
}
return nil
}
// validateUInt32 validates that the given annotation value is a positive integer.
func validateUInt32(value string) error {
_, err := strconv.ParseUint(value, 10, 32)
return err
}
// validateBool validates that the given annotation value is a boolean.
func validateBool(value string) error {
_, err := strconv.ParseBool(value)
return err
}
func validateCIDRList(cidrs string) error {
if len(cidrs) > 0 {
for _, cidr := range strings.Split(cidrs, ",") {
if _, err := netip.ParsePrefix(cidr); err != nil {
return fmt.Errorf("failed parsing cidr '%s': %v", cidr, err)
}
}
}
return nil
}
func splitPorts(portsString string) []string {
return strings.Split(portsString, ",")
}
func parsePort(portStr string) (int, error) {
port, err := strconv.ParseUint(strings.TrimSpace(portStr), 10, 16)
if err != nil {
return 0, fmt.Errorf("failed parsing port '%s': %v", portStr, err)
}
return int(port), nil
}
func parsePorts(portsString string) ([]int, error) {
portsString = strings.TrimSpace(portsString)
ports := make([]int, 0)
if len(portsString) > 0 {
for _, portStr := range splitPorts(portsString) {
port, err := parsePort(portStr)
if err != nil {
return nil, fmt.Errorf("failed parsing port '%s': %v", portStr, err)
}
ports = append(ports, port)
}
}
return ports, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package inject
import (
"context"
"fmt"
"path/filepath"
"time"
"github.com/fsnotify/fsnotify"
"github.com/hashicorp/go-multierror"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/watcher/configmapwatcher"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/istiomultierror"
)
// Watcher watches for and reacts to injection config updates.
type Watcher interface {
// SetHandler sets the handler that is run when the config changes.
// Must call this before Run.
SetHandler(func(*Config, string) error)
// Run starts the Watcher. Must call this after SetHandler.
Run(<-chan struct{})
// Get returns the sidecar and values configuration.
Get() (*Config, string, error)
}
var _ Watcher = &fileWatcher{}
var _ Watcher = &configMapWatcher{}
type fileWatcher struct {
watcher *fsnotify.Watcher
configFile string
valuesFile string
handler func(*Config, string) error
}
type configMapWatcher struct {
c *configmapwatcher.Controller
client kube.Client
namespace string
name string
configKey string
valuesKey string
handler func(*Config, string) error
}
// NewFileWatcher creates a Watcher for local config and values files.
func NewFileWatcher(configFile, valuesFile string) (Watcher, error) {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return nil, err
}
// watch the parent directory of the target files so we can catch
// symlink updates of k8s ConfigMaps volumes.
watchDir, _ := filepath.Split(configFile)
if err := watcher.Add(watchDir); err != nil {
return nil, fmt.Errorf("could not watch %v: %v", watchDir, err)
}
return &fileWatcher{
watcher: watcher,
configFile: configFile,
valuesFile: valuesFile,
}, nil
}
func (w *fileWatcher) Run(stop <-chan struct{}) {
defer w.watcher.Close()
var timerC <-chan time.Time
for {
select {
case <-timerC:
timerC = nil
sidecarConfig, valuesConfig, err := w.Get()
if err != nil {
log.Errorf("update error: %v", err)
break
}
if w.handler != nil {
if err := w.handler(sidecarConfig, valuesConfig); err != nil {
log.Errorf("update error: %v", err)
}
}
case event, ok := <-w.watcher.Events:
if !ok {
return
}
log.Debugf("Injector watch update: %+v", event)
// use a timer to debounce configuration updates
if (event.Has(fsnotify.Write) || event.Has(fsnotify.Create)) && timerC == nil {
timerC = time.After(watchDebounceDelay)
}
case err, ok := <-w.watcher.Errors:
if !ok {
return
}
log.Errorf("Watcher error: %v", err)
case <-stop:
return
}
}
}
func (w *fileWatcher) Get() (*Config, string, error) {
return loadConfig(w.configFile, w.valuesFile)
}
func (w *fileWatcher) SetHandler(handler func(*Config, string) error) {
w.handler = handler
}
// NewConfigMapWatcher creates a new Watcher for changes to the given ConfigMap.
func NewConfigMapWatcher(client kube.Client, namespace, name, configKey, valuesKey string) Watcher {
w := &configMapWatcher{
client: client,
namespace: namespace,
name: name,
configKey: configKey,
valuesKey: valuesKey,
}
w.c = configmapwatcher.NewController(client, namespace, name, func(cm *v1.ConfigMap) {
sidecarConfig, valuesConfig, err := readConfigMap(cm, configKey, valuesKey)
if err != nil {
log.Warnf("failed to read injection config from ConfigMap: %v", err)
return
}
if w.handler != nil {
if err := w.handler(sidecarConfig, valuesConfig); err != nil {
log.Errorf("update error: %v", err)
}
}
})
return w
}
func (w *configMapWatcher) Run(stop <-chan struct{}) {
w.c.Run(stop)
}
func (w *configMapWatcher) Get() (*Config, string, error) {
cms := w.client.Kube().CoreV1().ConfigMaps(w.namespace)
cm, err := cms.Get(context.TODO(), w.name, metav1.GetOptions{})
if err != nil {
return nil, "", err
}
return readConfigMap(cm, w.configKey, w.valuesKey)
}
func (w *configMapWatcher) SetHandler(handler func(*Config, string) error) {
w.handler = handler
}
func readConfigMap(cm *v1.ConfigMap, configKey, valuesKey string) (*Config, string, error) {
if cm == nil {
return nil, "", fmt.Errorf("no ConfigMap found")
}
configYaml, exists := cm.Data[configKey]
if !exists {
return nil, "", fmt.Errorf("missing ConfigMap config key %q", configKey)
}
c, err := unmarshalConfig([]byte(configYaml))
if err != nil {
return nil, "", fmt.Errorf("failed reading config: %v. YAML:\n%s", err, configYaml)
}
valuesConfig, exists := cm.Data[valuesKey]
if !exists {
return nil, "", fmt.Errorf("missing ConfigMap values key %q", valuesKey)
}
return c, valuesConfig, nil
}
// WatcherMulticast allows multiple event handlers to register for the same watcher,
// simplifying injector based controllers.
type WatcherMulticast struct {
handlers []func(*Config, string) error
impl Watcher
Get func() WebhookConfig
}
func NewMulticast(impl Watcher, getter func() WebhookConfig) *WatcherMulticast {
res := &WatcherMulticast{
impl: impl,
Get: getter,
}
impl.SetHandler(func(c *Config, s string) error {
err := istiomultierror.New()
for _, h := range res.handlers {
err = multierror.Append(err, h(c, s))
}
return err.ErrorOrNil()
})
return res
}
// SetHandler sets the handler that is run when the config changes.
// Must call this before Run.
func (wm *WatcherMulticast) AddHandler(handler func(*Config, string) error) {
wm.handlers = append(wm.handlers, handler)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package inject
import (
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
"net/http"
"os"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/prometheus/prometheus/util/strutil"
"gomodules.xyz/jsonpatch/v2"
admissionv1 "k8s.io/api/admission/v1"
kubeApiAdmissionv1beta1 "k8s.io/api/admission/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
kjson "k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/mergepatch"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"sigs.k8s.io/yaml"
"istio.io/api/annotation"
"istio.io/api/label"
meshconfig "istio.io/api/mesh/v1alpha1"
opconfig "istio.io/istio/operator/pkg/apis/istio/v1alpha1"
"istio.io/istio/pilot/cmd/pilot-agent/status"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/platform"
"istio.io/istio/pkg/slices"
"istio.io/istio/pkg/util/protomarshal"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/tools/istio-iptables/pkg/constants"
)
var (
runtimeScheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(runtimeScheme)
deserializer = codecs.UniversalDeserializer()
jsonSerializer = kjson.NewSerializerWithOptions(kjson.DefaultMetaFactory, runtimeScheme, runtimeScheme, kjson.SerializerOptions{})
URLParameterToEnv = map[string]string{
"cluster": "ISTIO_META_CLUSTER_ID",
"net": "ISTIO_META_NETWORK",
}
)
func init() {
_ = corev1.AddToScheme(runtimeScheme)
_ = admissionv1.AddToScheme(runtimeScheme)
_ = kubeApiAdmissionv1beta1.AddToScheme(runtimeScheme)
}
const (
// prometheus will convert annotation to this format
// `prometheus.io/scrape` `prometheus.io.scrape` `prometheus-io/scrape` have the same meaning in Prometheus
// for more details, please checkout [here](https://github.com/prometheus/prometheus/blob/71a0f42331566a8849863d77078083edbb0b3bc4/util/strutil/strconv.go#L40)
prometheusScrapeAnnotation = "prometheus_io_scrape"
prometheusPortAnnotation = "prometheus_io_port"
prometheusPathAnnotation = "prometheus_io_path"
watchDebounceDelay = 100 * time.Millisecond
)
const (
// InitContainers is the name of the property in k8s pod spec
InitContainers = "initContainers"
// Containers is the name of the property in k8s pod spec
Containers = "containers"
)
type WebhookConfig struct {
Templates Templates
Values ValuesConfig
MeshConfig *meshconfig.MeshConfig
}
// Webhook implements a mutating webhook for automatic proxy injection.
type Webhook struct {
mu sync.RWMutex
Config *Config
meshConfig *meshconfig.MeshConfig
valuesConfig ValuesConfig
namespaces kclient.Client[*corev1.Namespace]
// please do not call SetHandler() on this watcher, instead us MultiCast.AddHandler()
watcher Watcher
MultiCast *WatcherMulticast
env *model.Environment
revision string
}
func (wh *Webhook) GetConfig() WebhookConfig {
wh.mu.RLock()
defer wh.mu.RUnlock()
return WebhookConfig{
Templates: wh.Config.Templates,
Values: wh.valuesConfig,
MeshConfig: wh.meshConfig,
}
}
// ParsedContainers holds the unmarshalled containers and initContainers
type ParsedContainers struct {
Containers []corev1.Container `json:"containers,omitempty"`
InitContainers []corev1.Container `json:"initContainers,omitempty"`
}
func (p ParsedContainers) AllContainers() []corev1.Container {
return append(slices.Clone(p.Containers), p.InitContainers...)
}
// nolint directives: interfacer
func loadConfig(injectFile, valuesFile string) (*Config, string, error) {
data, err := os.ReadFile(injectFile)
if err != nil {
return nil, "", err
}
var c *Config
if c, err = unmarshalConfig(data); err != nil {
log.Warnf("Failed to parse injectFile %s", string(data))
return nil, "", err
}
valuesConfig, err := os.ReadFile(valuesFile)
if err != nil {
return nil, "", err
}
return c, string(valuesConfig), nil
}
func unmarshalConfig(data []byte) (*Config, error) {
c, err := UnmarshalConfig(data)
if err != nil {
return nil, err
}
log.Debugf("New inject configuration: sha256sum %x", sha256.Sum256(data))
log.Debugf("Policy: %v", c.Policy)
log.Debugf("AlwaysInjectSelector: %v", c.AlwaysInjectSelector)
log.Debugf("NeverInjectSelector: %v", c.NeverInjectSelector)
log.Debugf("Templates: %v", c.RawTemplates)
return &c, nil
}
// WebhookParameters configures parameters for the sidecar injection
// webhook.
type WebhookParameters struct {
// Watcher watches the sidecar injection configuration.
Watcher Watcher
// Port is the webhook port, e.g. typically 443 for https.
// This is mainly used for tests. Webhook runs on the port started by Istiod.
Port int
Env *model.Environment
// Use an existing mux instead of creating our own.
Mux *http.ServeMux
// The istio.io/rev this injector is responsible for
Revision string
KubeClient kube.Client
}
// NewWebhook creates a new instance of a mutating webhook for automatic sidecar injection.
func NewWebhook(p WebhookParameters) (*Webhook, error) {
if p.Mux == nil {
return nil, errors.New("expected mux to be passed, but was not passed")
}
wh := &Webhook{
watcher: p.Watcher,
meshConfig: p.Env.Mesh(),
env: p.Env,
revision: p.Revision,
}
if p.KubeClient != nil {
if platform.IsOpenShift() {
wh.namespaces = kclient.New[*corev1.Namespace](p.KubeClient)
}
}
mc := NewMulticast(p.Watcher, wh.GetConfig)
mc.AddHandler(wh.updateConfig)
wh.MultiCast = mc
sidecarConfig, valuesConfig, err := p.Watcher.Get()
if err != nil {
return nil, err
}
if err := wh.updateConfig(sidecarConfig, valuesConfig); err != nil {
log.Errorf("failed to process webhook config: %v", err)
}
p.Mux.HandleFunc("/inject", wh.serveInject)
p.Mux.HandleFunc("/inject/", wh.serveInject)
p.Env.Watcher.AddMeshHandler(func() {
wh.mu.Lock()
wh.meshConfig = p.Env.Mesh()
wh.mu.Unlock()
})
return wh, nil
}
// Run implements the webhook server
func (wh *Webhook) Run(stop <-chan struct{}) {
go wh.watcher.Run(stop)
}
func (wh *Webhook) HasSynced() bool {
if wh.namespaces != nil {
return wh.namespaces.HasSynced()
}
return true
}
func (wh *Webhook) updateConfig(sidecarConfig *Config, valuesConfig string) error {
wh.mu.Lock()
defer wh.mu.Unlock()
wh.Config = sidecarConfig
vc, err := NewValuesConfig(valuesConfig)
if err != nil {
return err
}
wh.valuesConfig = vc
return nil
}
type ContainerReorder int
const (
MoveFirst ContainerReorder = iota
MoveLast
Remove
)
func moveContainer(from, to []corev1.Container, name string) ([]corev1.Container, []corev1.Container) {
var container *corev1.Container
for i, c := range from {
c := c
if from[i].Name == name {
from = slices.Delete(from, i)
container = &c
break
}
}
if container != nil {
to = append(to, *container)
}
return from, to
}
func modifyContainers(cl []corev1.Container, name string, modifier ContainerReorder) []corev1.Container {
containers := []corev1.Container{}
var match *corev1.Container
for _, c := range cl {
c := c
if c.Name != name {
containers = append(containers, c)
} else {
match = &c
}
}
if match == nil {
return containers
}
switch modifier {
case MoveFirst:
return append([]corev1.Container{*match}, containers...)
case MoveLast:
return append(containers, *match)
case Remove:
return containers
default:
return cl
}
}
func hasContainer(cl []corev1.Container, name string) bool {
for _, c := range cl {
if c.Name == name {
return true
}
}
return false
}
func enablePrometheusMerge(mesh *meshconfig.MeshConfig, anno map[string]string) bool {
// If annotation is present, we look there first
if val, f := anno[annotation.PrometheusMergeMetrics.Name]; f {
bval, err := strconv.ParseBool(val)
if err != nil {
// This shouldn't happen since we validate earlier in the code
log.Warnf("invalid annotation %v=%v", annotation.PrometheusMergeMetrics.Name, bval)
} else {
return bval
}
}
// If mesh config setting is present, use that
if mesh.GetEnablePrometheusMerge() != nil {
return mesh.GetEnablePrometheusMerge().Value
}
// Otherwise, we default to enable
return true
}
func toAdmissionResponse(err error) *kube.AdmissionResponse {
return &kube.AdmissionResponse{Result: &metav1.Status{Message: err.Error()}}
}
func ParseTemplates(tmpls RawTemplates) (Templates, error) {
ret := make(Templates, len(tmpls))
for k, t := range tmpls {
p, err := parseDryTemplate(t, InjectionFuncmap)
if err != nil {
return nil, err
}
ret[k] = p
}
return ret, nil
}
type ValuesConfig struct {
raw string
asStruct *opconfig.Values
asMap map[string]any
}
func (v ValuesConfig) Struct() *opconfig.Values {
return v.asStruct
}
func (v ValuesConfig) Map() map[string]any {
return v.asMap
}
func NewValuesConfig(v string) (ValuesConfig, error) {
c := ValuesConfig{raw: v}
valuesStruct := &opconfig.Values{}
if err := protomarshal.ApplyYAML(v, valuesStruct); err != nil {
return c, fmt.Errorf("could not parse configuration values: %v", err)
}
c.asStruct = valuesStruct
values := map[string]any{}
if err := yaml.Unmarshal([]byte(v), &values); err != nil {
return c, fmt.Errorf("could not parse configuration values: %v", err)
}
c.asMap = values
return c, nil
}
type InjectionParameters struct {
pod *corev1.Pod
deployMeta metav1.ObjectMeta
namespace *corev1.Namespace
typeMeta metav1.TypeMeta
templates map[string]*template.Template
defaultTemplate []string
aliases map[string][]string
meshConfig *meshconfig.MeshConfig
proxyConfig *meshconfig.ProxyConfig
valuesConfig ValuesConfig
revision string
proxyEnvs map[string]string
injectedAnnotations map[string]string
}
func checkPreconditions(params InjectionParameters) {
spec := params.pod.Spec
metadata := params.pod.ObjectMeta
// If DNSPolicy is not ClusterFirst, the Envoy sidecar may not able to connect to Istio Pilot.
if spec.DNSPolicy != "" && spec.DNSPolicy != corev1.DNSClusterFirst {
podName := potentialPodName(metadata)
log.Warnf("%q's DNSPolicy is not %q. The Envoy sidecar may not able to connect to Istio Pilot",
metadata.Namespace+"/"+podName, corev1.DNSClusterFirst)
}
}
func getInjectionStatus(podSpec corev1.PodSpec, revision string) string {
stat := &SidecarInjectionStatus{}
for _, c := range podSpec.InitContainers {
stat.InitContainers = append(stat.InitContainers, c.Name)
}
for _, c := range podSpec.Containers {
stat.Containers = append(stat.Containers, c.Name)
}
for _, c := range podSpec.Volumes {
stat.Volumes = append(stat.Volumes, c.Name)
}
for _, c := range podSpec.ImagePullSecrets {
stat.ImagePullSecrets = append(stat.ImagePullSecrets, c.Name)
}
// Rather than setting istio.io/rev label on injected pods include them here in status annotation.
// This keeps us from overwriting the istio.io/rev label when using revision tags (i.e. istio.io/rev=<tag>).
if revision == "" {
revision = "default"
}
stat.Revision = revision
statusAnnotationValue, err := json.Marshal(stat)
if err != nil {
return "{}"
}
return string(statusAnnotationValue)
}
// injectPod is the core of the injection logic. This takes a pod and injection
// template, as well as some inputs to the injection template, and produces a
// JSON patch.
//
// In the webhook, we will receive a Pod directly from Kubernetes, and return the
// patch directly; Kubernetes will take care of applying the patch.
//
// For kube-inject, we will parse out a Pod from YAML (which may involve
// extraction from higher level types like Deployment), then apply the patch
// locally.
//
// The injection logic works by first applying the rendered injection template on
// top of the input pod This is done using a Strategic Patch Merge
// (https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md)
// Currently only a single template is supported, although in the future the template to use will be configurable
// and multiple templates will be supported by applying them in successive order.
//
// In addition to the plain templating, there is some post processing done to
// handle cases that cannot feasibly be covered in the template, such as
// re-ordering pods, rewriting readiness probes, etc.
func injectPod(req InjectionParameters) ([]byte, error) {
checkPreconditions(req)
// The patch will be built relative to the initial pod, capture its current state
originalPodSpec, err := json.Marshal(req.pod)
if err != nil {
return nil, err
}
// Run the injection template, giving us a partial pod spec
mergedPod, injectedPodData, err := RunTemplate(req)
if err != nil {
return nil, fmt.Errorf("failed to run injection template: %v", err)
}
mergedPod, err = reapplyOverwrittenContainers(mergedPod, req.pod, injectedPodData)
if err != nil {
return nil, fmt.Errorf("failed to re apply container: %v", err)
}
// Apply some additional transformations to the pod
if err := postProcessPod(mergedPod, *injectedPodData, req); err != nil {
return nil, fmt.Errorf("failed to process pod: %v", err)
}
patch, err := createPatch(mergedPod, originalPodSpec)
if err != nil {
return nil, fmt.Errorf("failed to create patch: %v", err)
}
log.Debugf("AdmissionResponse: patch=%v\n", string(patch))
return patch, nil
}
// reapplyOverwrittenContainers enables users to provide container level overrides for settings in the injection template
// * originalPod: the pod before injection. If needed, we will apply some configurations from this pod on top of the final pod
// * templatePod: the rendered injection template. This is needed only to see what containers we injected
// * finalPod: the current result of injection, roughly equivalent to the merging of originalPod and templatePod
// There are essentially three cases we cover here:
// 1. There is no overlap in containers in original and template pod. We will do nothing.
// 2. There is an overlap (ie, both define istio-proxy), but that is because the pod is being re-injected.
// In this case we do nothing, since we want to apply the new settings
// 3. There is an overlap. We will re-apply the original container.
//
// Where "overlap" is a container defined in both the original and template pod. Typically, this would mean
// the user has defined an `istio-proxy` container in their own pod spec.
func reapplyOverwrittenContainers(finalPod *corev1.Pod, originalPod *corev1.Pod, templatePod *corev1.Pod) (*corev1.Pod, error) {
overrides := ParsedContainers{}
existingOverrides := ParsedContainers{}
if annotationOverrides, f := originalPod.Annotations[annotation.ProxyOverrides.Name]; f {
if err := json.Unmarshal([]byte(annotationOverrides), &existingOverrides); err != nil {
return nil, err
}
}
parsedInjectedStatus := ParsedContainers{}
status, alreadyInjected := originalPod.Annotations[annotation.SidecarStatus.Name]
if alreadyInjected {
parsedInjectedStatus = parseStatus(status)
}
for _, c := range templatePod.Spec.Containers {
// sidecarStatus annotation is added on the pod by webhook. We should use new container template
// instead of restoring what may be previously injected. Doing this ensures we are correctly calculating
// env variables like ISTIO_META_APP_CONTAINERS and ISTIO_META_POD_PORTS.
if match := FindContainer(c.Name, parsedInjectedStatus.Containers); match != nil {
continue
}
match := FindContainer(c.Name, existingOverrides.Containers)
if match == nil {
match = FindContainer(c.Name, originalPod.Spec.Containers)
}
if match == nil {
continue
}
overlay := *match.DeepCopy()
resetFieldsInAutoImageContainer(&overlay, &c)
overrides.Containers = append(overrides.Containers, overlay)
newMergedPod, err := applyContainer(finalPod, overlay)
if err != nil {
return nil, fmt.Errorf("failed to apply sidecar container: %v", err)
}
finalPod = newMergedPod
}
for _, c := range templatePod.Spec.InitContainers {
if match := FindContainer(c.Name, parsedInjectedStatus.InitContainers); match != nil {
continue
}
match := FindContainer(c.Name, existingOverrides.InitContainers)
if match == nil {
match = FindContainerFromPod(c.Name, originalPod)
}
if match == nil {
continue
}
overlay := *match.DeepCopy()
resetFieldsInAutoImageContainer(&overlay, &c)
overrides.InitContainers = append(overrides.InitContainers, overlay)
newMergedPod, err := applyInitContainer(finalPod, overlay)
if err != nil {
return nil, fmt.Errorf("failed to apply sidecar init container: %v", err)
}
finalPod = newMergedPod
}
if !alreadyInjected && (len(overrides.Containers) > 0 || len(overrides.InitContainers) > 0) {
// We found any overrides. Put them in the pod annotation so we can re-apply them on re-injection
js, err := json.Marshal(overrides)
if err != nil {
return nil, err
}
if finalPod.Annotations == nil {
finalPod.Annotations = map[string]string{}
}
finalPod.Annotations[annotation.ProxyOverrides.Name] = string(js)
}
return finalPod, nil
}
func resetFieldsInAutoImageContainer(original *corev1.Container, template *corev1.Container) {
if original.Image == AutoImage {
original.Image = ""
}
// If the original pod comes with SecurityContext.RunAsUser and the template defines a value different than the default (1337),
// then ignore the original value and stick with the final (merged one)
// This is likely a scenario in OpenShift when the istio-proxy container with image: auto is parsed, if SecurityContext.RunAsUser
// does not exist, OpenShift automatically assigns a value which is based on an annotation in the namespace. Regardless if the user
// provided that value or if it was assigned by OpenShift, the correct value is the one in the template, as set by the `.ProxyUID` field.
if original.SecurityContext != nil && template.SecurityContext != nil && template.SecurityContext.RunAsUser != nil &&
*template.SecurityContext.RunAsUser != constants.DefaultProxyUIDInt {
original.SecurityContext.RunAsUser = nil
original.SecurityContext.RunAsGroup = nil
}
}
// parseStatus extracts containers from injected SidecarStatus annotation
func parseStatus(status string) ParsedContainers {
parsedContainers := ParsedContainers{}
var unMarshalledStatus map[string]interface{}
if err := json.Unmarshal([]byte(status), &unMarshalledStatus); err != nil {
log.Errorf("Failed to unmarshal %s : %v", annotation.SidecarStatus.Name, err)
return parsedContainers
}
parser := func(key string, obj map[string]interface{}) []corev1.Container {
out := make([]corev1.Container, 0)
if value, exist := obj[key]; exist && value != nil {
for _, v := range value.([]interface{}) {
out = append(out, corev1.Container{Name: v.(string)})
}
}
return out
}
parsedContainers.Containers = parser(Containers, unMarshalledStatus)
parsedContainers.InitContainers = parser(InitContainers, unMarshalledStatus)
return parsedContainers
}
// reinsertOverrides applies the containers listed in OverrideAnnotation to a pod. This is to achieve
// idempotency by handling an edge case where an injection template is modifying a container already
// present in the pod spec. In these cases, the logic to strip injected containers would remove the
// original injected parts as well, leading to the templating logic being different (for example,
// reading the .Spec.Containers field would be empty).
func reinsertOverrides(pod *corev1.Pod) (*corev1.Pod, error) {
type podOverrides struct {
Containers []corev1.Container `json:"containers,omitempty"`
InitContainers []corev1.Container `json:"initContainers,omitempty"`
}
existingOverrides := podOverrides{}
if annotationOverrides, f := pod.Annotations[annotation.ProxyOverrides.Name]; f {
if err := json.Unmarshal([]byte(annotationOverrides), &existingOverrides); err != nil {
return nil, err
}
}
pod = pod.DeepCopy()
for _, c := range existingOverrides.Containers {
match := FindContainer(c.Name, pod.Spec.Containers)
if match != nil {
continue
}
pod.Spec.Containers = append(pod.Spec.Containers, c)
}
for _, c := range existingOverrides.InitContainers {
match := FindContainer(c.Name, pod.Spec.InitContainers)
if match != nil {
continue
}
pod.Spec.InitContainers = append(pod.Spec.InitContainers, c)
}
return pod, nil
}
func createPatch(pod *corev1.Pod, original []byte) ([]byte, error) {
reinjected, err := json.Marshal(pod)
if err != nil {
return nil, err
}
p, err := jsonpatch.CreatePatch(original, reinjected)
if err != nil {
return nil, err
}
return json.Marshal(p)
}
// postProcessPod applies additionally transformations to the pod after merging with the injected template
// This is generally things that cannot reasonably be added to the template
func postProcessPod(pod *corev1.Pod, injectedPod corev1.Pod, req InjectionParameters) error {
if pod.Annotations == nil {
pod.Annotations = map[string]string{}
}
if pod.Labels == nil {
pod.Labels = map[string]string{}
}
overwriteClusterInfo(pod, req)
if err := applyPrometheusMerge(pod, req.meshConfig); err != nil {
return err
}
if err := applyRewrite(pod, req); err != nil {
return err
}
applyMetadata(pod, injectedPod, req)
if err := reorderPod(pod, req); err != nil {
return err
}
return nil
}
func applyMetadata(pod *corev1.Pod, injectedPodData corev1.Pod, req InjectionParameters) {
if nw, ok := req.proxyEnvs["ISTIO_META_NETWORK"]; ok {
pod.Labels[label.TopologyNetwork.Name] = nw
}
// Add all additional injected annotations. These are overridden if needed
pod.Annotations[annotation.SidecarStatus.Name] = getInjectionStatus(injectedPodData.Spec, req.revision)
// Deprecated; should be set directly in the template instead
for k, v := range req.injectedAnnotations {
pod.Annotations[k] = v
}
}
// reorderPod ensures containers are properly ordered after merging
func reorderPod(pod *corev1.Pod, req InjectionParameters) error {
var merr error
mc := req.meshConfig
// Get copy of pod proxyconfig, to determine container ordering
if pca, f := req.pod.ObjectMeta.GetAnnotations()[annotation.ProxyConfig.Name]; f {
mc, merr = mesh.ApplyProxyConfig(pca, req.meshConfig)
if merr != nil {
return merr
}
}
// nolint: staticcheck
holdPod := mc.GetDefaultConfig().GetHoldApplicationUntilProxyStarts().GetValue() ||
req.valuesConfig.asStruct.GetGlobal().GetProxy().GetHoldApplicationUntilProxyStarts().GetValue()
proxyLocation := MoveLast
// If HoldApplicationUntilProxyStarts is set, reorder the proxy location
if holdPod {
proxyLocation = MoveFirst
}
// Proxy container should be last, unless HoldApplicationUntilProxyStarts is set
// This is to ensure `kubectl exec` and similar commands continue to default to the user's container
pod.Spec.Containers = modifyContainers(pod.Spec.Containers, ProxyContainerName, proxyLocation)
if hasContainer(pod.Spec.InitContainers, ProxyContainerName) {
// This is using native sidecar support in K8s.
// We want istio to be first in this case, so init containers are part of the mesh
// This is {istio-init/istio-validation} => proxy => rest.
pod.Spec.InitContainers = modifyContainers(pod.Spec.InitContainers, EnableCoreDumpName, MoveFirst)
pod.Spec.InitContainers = modifyContainers(pod.Spec.InitContainers, ProxyContainerName, MoveFirst)
pod.Spec.InitContainers = modifyContainers(pod.Spec.InitContainers, ValidationContainerName, MoveFirst)
pod.Spec.InitContainers = modifyContainers(pod.Spec.InitContainers, InitContainerName, MoveFirst)
} else {
// Else, we want iptables setup last so we do not blackhole init containers
// This is istio-validation => rest => istio-init (note: only one of istio-init or istio-validation should be present)
// Validation container must be first to block any user containers
pod.Spec.InitContainers = modifyContainers(pod.Spec.InitContainers, ValidationContainerName, MoveFirst)
// Init container must be last to allow any traffic to pass before iptables is setup
pod.Spec.InitContainers = modifyContainers(pod.Spec.InitContainers, InitContainerName, MoveLast)
pod.Spec.InitContainers = modifyContainers(pod.Spec.InitContainers, EnableCoreDumpName, MoveLast)
}
return nil
}
func applyRewrite(pod *corev1.Pod, req InjectionParameters) error {
sidecar := FindSidecar(pod)
if sidecar == nil {
return nil
}
rewrite := ShouldRewriteAppHTTPProbers(pod.Annotations, req.valuesConfig.asStruct.GetSidecarInjectorWebhook().GetRewriteAppHTTPProbe().GetValue())
// We don't have to escape json encoding here when using golang libraries.
if rewrite {
if prober := DumpAppProbers(pod, req.meshConfig.GetDefaultConfig().GetStatusPort()); prober != "" {
// If sidecar.istio.io/status is not present then append instead of merge.
_, previouslyInjected := pod.Annotations[annotation.SidecarStatus.Name]
sidecar.Env = mergeOrAppendProbers(previouslyInjected, sidecar.Env, prober)
}
patchRewriteProbe(pod.Annotations, pod, req.meshConfig.GetDefaultConfig().GetStatusPort())
}
return nil
}
// mergeOrAppendProbers ensures that if sidecar has existing ISTIO_KUBE_APP_PROBERS,
// then probers should be merged.
func mergeOrAppendProbers(previouslyInjected bool, envVars []corev1.EnvVar, newProbers string) []corev1.EnvVar {
if !previouslyInjected {
return append(envVars, corev1.EnvVar{Name: status.KubeAppProberEnvName, Value: newProbers})
}
for idx, env := range envVars {
if env.Name == status.KubeAppProberEnvName {
var existingKubeAppProber KubeAppProbers
err := json.Unmarshal([]byte(env.Value), &existingKubeAppProber)
if err != nil {
log.Errorf("failed to unmarshal existing kubeAppProbers %v", err)
return envVars
}
var newKubeAppProber KubeAppProbers
err = json.Unmarshal([]byte(newProbers), &newKubeAppProber)
if err != nil {
log.Errorf("failed to unmarshal new kubeAppProbers %v", err)
return envVars
}
for k, v := range existingKubeAppProber {
// merge old and new probers.
newKubeAppProber[k] = v
}
marshalledKubeAppProber, err := json.Marshal(newKubeAppProber)
if err != nil {
log.Errorf("failed to serialize the merged app prober config %v", err)
return envVars
}
// replace old env var with new value.
envVars[idx] = corev1.EnvVar{Name: status.KubeAppProberEnvName, Value: string(marshalledKubeAppProber)}
return envVars
}
}
return envVars
}
var emptyScrape = status.PrometheusScrapeConfiguration{}
// applyPrometheusMerge configures prometheus scraping annotations for the "metrics merge" feature.
// This moves the current prometheus.io annotations into an environment variable and replaces them
// pointing to the agent.
func applyPrometheusMerge(pod *corev1.Pod, mesh *meshconfig.MeshConfig) error {
if getPrometheusScrape(pod) &&
enablePrometheusMerge(mesh, pod.ObjectMeta.Annotations) {
targetPort := strconv.Itoa(int(mesh.GetDefaultConfig().GetStatusPort()))
if cur, f := getPrometheusPort(pod); f {
// We have already set the port, assume user is controlling this or, more likely, re-injected
// the pod.
if cur == targetPort {
return nil
}
}
scrape := getPrometheusScrapeConfiguration(pod)
sidecar := FindSidecar(pod)
if sidecar != nil && scrape != emptyScrape {
by, err := json.Marshal(scrape)
if err != nil {
return err
}
sidecar.Env = append(sidecar.Env, corev1.EnvVar{Name: status.PrometheusScrapingConfig.Name, Value: string(by)})
}
if pod.Annotations == nil {
pod.Annotations = map[string]string{}
}
// if a user sets `prometheus/io/path: foo`, then we add `prometheus.io/path: /stats/prometheus`
// prometheus will pick a random one
// need to clear out all variants and then set ours
clearPrometheusAnnotations(pod)
pod.Annotations["prometheus.io/port"] = targetPort
pod.Annotations["prometheus.io/path"] = "/stats/prometheus"
pod.Annotations["prometheus.io/scrape"] = "true"
return nil
}
return nil
}
// getPrometheusScrape respect prometheus scrape config
// not to doing prometheusMerge if this return false
func getPrometheusScrape(pod *corev1.Pod) bool {
for k, val := range pod.Annotations {
if strutil.SanitizeLabelName(k) != prometheusScrapeAnnotation {
continue
}
if scrape, err := strconv.ParseBool(val); err == nil {
return scrape
}
}
return true
}
var prometheusAnnotations = sets.New(
prometheusPathAnnotation,
prometheusPortAnnotation,
prometheusScrapeAnnotation,
)
func clearPrometheusAnnotations(pod *corev1.Pod) {
needRemovedKeys := make([]string, 0, 2)
for k := range pod.Annotations {
anno := strutil.SanitizeLabelName(k)
if prometheusAnnotations.Contains(anno) {
needRemovedKeys = append(needRemovedKeys, k)
}
}
for _, k := range needRemovedKeys {
delete(pod.Annotations, k)
}
}
func getPrometheusScrapeConfiguration(pod *corev1.Pod) status.PrometheusScrapeConfiguration {
cfg := status.PrometheusScrapeConfiguration{}
for k, val := range pod.Annotations {
anno := strutil.SanitizeLabelName(k)
switch anno {
case prometheusPortAnnotation:
cfg.Port = val
case prometheusScrapeAnnotation:
cfg.Scrape = val
case prometheusPathAnnotation:
cfg.Path = val
}
}
return cfg
}
func getPrometheusPort(pod *corev1.Pod) (string, bool) {
for k, val := range pod.Annotations {
if strutil.SanitizeLabelName(k) != prometheusPortAnnotation {
continue
}
return val, true
}
return "", false
}
const (
// AutoImage is the special image name to indicate to the injector that we should use the injected image, and NOT override it
// This is necessary because image is a required field on container, so if a user defines an istio-proxy container
// with customizations they must set an image.
AutoImage = "auto"
)
// applyContainer merges a container spec on top of the provided pod
func applyContainer(target *corev1.Pod, container corev1.Container) (*corev1.Pod, error) {
overlay := &corev1.Pod{Spec: corev1.PodSpec{Containers: []corev1.Container{container}}}
overlayJSON, err := json.Marshal(overlay)
if err != nil {
return nil, err
}
return applyOverlay(target, overlayJSON)
}
// applyInitContainer merges a container spec on top of the provided pod as an init container
func applyInitContainer(target *corev1.Pod, container corev1.Container) (*corev1.Pod, error) {
overlay := &corev1.Pod{Spec: corev1.PodSpec{
// We need to set containers to empty, otherwise it will marshal as "null" and delete all containers
Containers: []corev1.Container{},
InitContainers: []corev1.Container{container},
}}
overlayJSON, err := json.Marshal(overlay)
if err != nil {
return nil, err
}
return applyOverlay(target, overlayJSON)
}
func patchHandleUnmarshal(j []byte, unmarshal func(data []byte, v any) error) (map[string]any, error) {
if j == nil {
j = []byte("{}")
}
m := map[string]any{}
err := unmarshal(j, &m)
if err != nil {
return nil, mergepatch.ErrBadJSONDoc
}
return m, nil
}
// StrategicMergePatchYAML is a small fork of strategicpatch.StrategicMergePatch to allow YAML patches
// This avoids expensive conversion from YAML to JSON
func StrategicMergePatchYAML(originalJSON []byte, patchYAML []byte, dataStruct any) ([]byte, error) {
schema, err := strategicpatch.NewPatchMetaFromStruct(dataStruct)
if err != nil {
return nil, err
}
originalMap, err := patchHandleUnmarshal(originalJSON, json.Unmarshal)
if err != nil {
return nil, err
}
patchMap, err := patchHandleUnmarshal(patchYAML, func(data []byte, v any) error {
return yaml.Unmarshal(data, v)
})
if err != nil {
return nil, err
}
result, err := strategicpatch.StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema)
if err != nil {
return nil, err
}
return json.Marshal(result)
}
// applyContainer merges a pod spec, provided as JSON, on top of the provided pod
func applyOverlayYAML(target *corev1.Pod, overlayYAML []byte) (*corev1.Pod, error) {
currentJSON, err := json.Marshal(target)
if err != nil {
return nil, err
}
pod := corev1.Pod{}
// Overlay the injected template onto the original podSpec
patched, err := StrategicMergePatchYAML(currentJSON, overlayYAML, pod)
if err != nil {
return nil, fmt.Errorf("strategic merge: %v", err)
}
if err := json.Unmarshal(patched, &pod); err != nil {
return nil, fmt.Errorf("unmarshal patched pod: %v", err)
}
return &pod, nil
}
// applyContainer merges a pod spec, provided as JSON, on top of the provided pod
func applyOverlay(target *corev1.Pod, overlayJSON []byte) (*corev1.Pod, error) {
currentJSON, err := json.Marshal(target)
if err != nil {
return nil, err
}
pod := corev1.Pod{}
// Overlay the injected template onto the original podSpec
patched, err := strategicpatch.StrategicMergePatch(currentJSON, overlayJSON, pod)
if err != nil {
return nil, fmt.Errorf("strategic merge: %v", err)
}
if err := json.Unmarshal(patched, &pod); err != nil {
return nil, fmt.Errorf("unmarshal patched pod: %v", err)
}
return &pod, nil
}
func (wh *Webhook) inject(ar *kube.AdmissionReview, path string) *kube.AdmissionResponse {
req := ar.Request
var pod corev1.Pod
if err := json.Unmarshal(req.Object.Raw, &pod); err != nil {
handleError(fmt.Sprintf("Could not unmarshal raw object: %v %s", err,
string(req.Object.Raw)))
return toAdmissionResponse(err)
}
// Managed fields is sometimes extremely large, leading to excessive CPU time on patch generation
// It does not impact the injection output at all, so we can just remove it.
pod.ManagedFields = nil
// Deal with potential empty fields, e.g., when the pod is created by a deployment
podName := potentialPodName(pod.ObjectMeta)
if pod.ObjectMeta.Namespace == "" {
pod.ObjectMeta.Namespace = req.Namespace
}
log.Infof("Sidecar injection request for %v/%v", req.Namespace, podName)
log.Debugf("Object: %v", string(req.Object.Raw))
log.Debugf("OldObject: %v", string(req.OldObject.Raw))
wh.mu.RLock()
if !injectRequired(IgnoredNamespaces.UnsortedList(), wh.Config, &pod.Spec, pod.ObjectMeta) {
log.Infof("Skipping %s/%s due to policy check", pod.ObjectMeta.Namespace, podName)
totalSkippedInjections.Increment()
wh.mu.RUnlock()
return &kube.AdmissionResponse{
Allowed: true,
}
}
proxyConfig := wh.env.GetProxyConfigOrDefault(pod.Namespace, pod.Labels, pod.Annotations, wh.meshConfig)
deploy, typeMeta := kube.GetDeployMetaFromPod(&pod)
var podNamespace *corev1.Namespace
if wh.namespaces != nil {
podNamespace = wh.namespaces.Get(pod.Namespace, "")
}
params := InjectionParameters{
pod: &pod,
deployMeta: deploy,
namespace: podNamespace,
typeMeta: typeMeta,
templates: wh.Config.Templates,
defaultTemplate: wh.Config.DefaultTemplates,
aliases: wh.Config.Aliases,
meshConfig: wh.meshConfig,
proxyConfig: proxyConfig,
valuesConfig: wh.valuesConfig,
revision: wh.revision,
injectedAnnotations: wh.Config.InjectedAnnotations,
proxyEnvs: parseInjectEnvs(path),
}
wh.mu.RUnlock()
patchBytes, err := injectPod(params)
if err != nil {
handleError(fmt.Sprintf("Pod injection failed: %v", err))
return toAdmissionResponse(err)
}
reviewResponse := kube.AdmissionResponse{
Allowed: true,
Patch: patchBytes,
PatchType: func() *string {
pt := "JSONPatch"
return &pt
}(),
}
totalSuccessfulInjections.Increment()
return &reviewResponse
}
func (wh *Webhook) serveInject(w http.ResponseWriter, r *http.Request) {
totalInjections.Increment()
t0 := time.Now()
defer func() { injectionTime.Record(time.Since(t0).Seconds()) }()
var body []byte
if r.Body != nil {
if data, err := kube.HTTPConfigReader(r); err == nil {
body = data
} else {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
if len(body) == 0 {
handleError("no body found")
http.Error(w, "no body found", http.StatusBadRequest)
return
}
// verify the content type is accurate
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
handleError(fmt.Sprintf("contentType=%s, expect application/json", contentType))
http.Error(w, "invalid Content-Type, want `application/json`", http.StatusUnsupportedMediaType)
return
}
path := ""
if r.URL != nil {
path = r.URL.Path
}
var reviewResponse *kube.AdmissionResponse
var obj runtime.Object
var ar *kube.AdmissionReview
if out, _, err := deserializer.Decode(body, nil, obj); err != nil {
handleError(fmt.Sprintf("Could not decode body: %v", err))
reviewResponse = toAdmissionResponse(err)
} else {
log.Debugf("AdmissionRequest for path=%s\n", path)
ar, err = kube.AdmissionReviewKubeToAdapter(out)
if err != nil {
handleError(fmt.Sprintf("Could not decode object: %v", err))
reviewResponse = toAdmissionResponse(err)
} else {
reviewResponse = wh.inject(ar, path)
}
}
response := kube.AdmissionReview{}
response.Response = reviewResponse
var responseKube runtime.Object
var apiVersion string
if ar != nil {
apiVersion = ar.APIVersion
response.TypeMeta = ar.TypeMeta
if response.Response != nil {
if ar.Request != nil {
response.Response.UID = ar.Request.UID
}
}
}
responseKube = kube.AdmissionReviewAdapterToKube(&response, apiVersion)
resp, err := json.Marshal(responseKube)
if err != nil {
log.Errorf("Could not encode response: %v", err)
http.Error(w, fmt.Sprintf("could not encode response: %v", err), http.StatusInternalServerError)
return
}
if _, err := w.Write(resp); err != nil {
log.Errorf("Could not write response: %v", err)
http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
}
}
// parseInjectEnvs parse new envs from inject url path. format: /inject/k1/v1/k2/v2
// slash characters in values must be replaced by --slash-- (e.g. /inject/k1/abc--slash--def/k2/v2).
func parseInjectEnvs(path string) map[string]string {
path = strings.TrimSuffix(path, "/")
res := func(path string) []string {
parts := strings.SplitN(path, "/", 3)
var newRes []string
if len(parts) == 3 { // If length is less than 3, then the path is simply "/inject".
if strings.HasPrefix(parts[2], ":ENV:") {
// Deprecated, not recommended.
// Note that this syntax fails validation when used to set injectionPath (i.e., service.path in mwh).
// It doesn't fail validation when used to set injectionURL, however. K8s bug maybe?
pairs := strings.Split(parts[2], ":ENV:")
for i := 1; i < len(pairs); i++ { // skip the first part, it is a nil
pair := strings.SplitN(pairs[i], "=", 2)
// The first part is the variable name which can not be empty
// the second part is the variable value which can be empty but has to exist
// for example, aaa=bbb, aaa= are valid, but =aaa or = are not valid, the
// invalid ones will be ignored.
if len(pair[0]) > 0 && len(pair) == 2 {
newRes = append(newRes, pair...)
}
}
return newRes
}
newRes = strings.Split(parts[2], "/")
}
for i, value := range newRes {
if i%2 != 0 {
// Replace --slash-- with / in values.
newRes[i] = strings.ReplaceAll(value, "--slash--", "/")
}
}
return newRes
}(path)
newEnvs := make(map[string]string)
for i := 0; i < len(res); i += 2 {
k := res[i]
if i == len(res)-1 { // ignore the last key without value
log.Warnf("Odd number of inject env entries, ignore the last key %s\n", k)
break
}
env, found := URLParameterToEnv[k]
if !found {
env = strings.ToUpper(k) // if not found, use the custom env directly
}
if env != "" {
newEnvs[env] = res[i+1]
}
}
return newEnvs
}
func handleError(message string) {
log.Errorf(message)
totalFailedInjections.Increment()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kclient
import (
"context"
"fmt"
"sync"
"sync/atomic"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/cache"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config/schema/gvk"
istiogvr "istio.io/istio/pkg/config/schema/gvr"
"istio.io/istio/pkg/config/schema/kubeclient"
types "istio.io/istio/pkg/config/schema/kubetypes"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/informerfactory"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/ptr"
)
type fullClient[T controllers.Object] struct {
writeClient[T]
Informer[T]
}
type writeClient[T controllers.Object] struct {
client kube.Client
}
type informerClient[T controllers.Object] struct {
informer cache.SharedIndexInformer
startInformer func(stopCh <-chan struct{})
filter func(t any) bool
handlerMu sync.RWMutex
registeredHandlers []cache.ResourceEventHandlerRegistration
}
func (n *informerClient[T]) Get(name, namespace string) T {
obj, exists, err := n.informer.GetIndexer().GetByKey(keyFunc(name, namespace))
if err != nil {
return ptr.Empty[T]()
}
if !exists {
return ptr.Empty[T]()
}
cast := obj.(T)
if !n.applyFilter(cast) {
return ptr.Empty[T]()
}
return cast
}
func (n *informerClient[T]) applyFilter(t T) bool {
if n.filter == nil {
return true
}
return n.filter(t)
}
func (n *informerClient[T]) Start(stopCh <-chan struct{}) {
n.startInformer(stopCh)
}
func (n *writeClient[T]) Create(object T) (T, error) {
api := kubeclient.GetWriteClient[T](n.client, object.GetNamespace())
return api.Create(context.Background(), object, metav1.CreateOptions{})
}
func (n *writeClient[T]) Update(object T) (T, error) {
api := kubeclient.GetWriteClient[T](n.client, object.GetNamespace())
return api.Update(context.Background(), object, metav1.UpdateOptions{})
}
func (n *writeClient[T]) UpdateStatus(object T) (T, error) {
api, ok := kubeclient.GetWriteClient[T](n.client, object.GetNamespace()).(kubetypes.WriteStatusAPI[T])
if !ok {
return ptr.Empty[T](), fmt.Errorf("%T does not support UpdateStatus", object)
}
return api.UpdateStatus(context.Background(), object, metav1.UpdateOptions{})
}
func (n *writeClient[T]) Delete(name, namespace string) error {
api := kubeclient.GetWriteClient[T](n.client, namespace)
return api.Delete(context.Background(), name, metav1.DeleteOptions{})
}
func (n *informerClient[T]) ShutdownHandlers() {
n.handlerMu.Lock()
defer n.handlerMu.Unlock()
for _, c := range n.registeredHandlers {
_ = n.informer.RemoveEventHandler(c)
}
}
func (n *informerClient[T]) AddEventHandler(h cache.ResourceEventHandler) {
fh := cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
if n.filter == nil {
return true
}
return n.filter(obj)
},
Handler: h,
}
reg, _ := n.informer.AddEventHandler(fh)
n.handlerMu.Lock()
defer n.handlerMu.Unlock()
n.registeredHandlers = append(n.registeredHandlers, reg)
}
func (n *informerClient[T]) HasSynced() bool {
if !n.informer.HasSynced() {
return false
}
n.handlerMu.RLock()
defer n.handlerMu.RUnlock()
// HasSynced is fast, so doing it under the lock is okay
for _, g := range n.registeredHandlers {
if !g.HasSynced() {
return false
}
}
return true
}
func (n *informerClient[T]) List(namespace string, selector klabels.Selector) []T {
var res []T
err := cache.ListAllByNamespace(n.informer.GetIndexer(), namespace, selector, func(i any) {
cast := i.(T)
if n.applyFilter(cast) {
res = append(res, cast)
}
})
// Should never happen
if err != nil && features.EnableUnsafeAssertions {
log.Fatalf("lister returned err for %v: %v", namespace, err)
}
return res
}
func (n *informerClient[T]) ListUnfiltered(namespace string, selector klabels.Selector) []T {
var res []T
err := cache.ListAllByNamespace(n.informer.GetIndexer(), namespace, selector, func(i any) {
cast := i.(T)
res = append(res, cast)
})
// Should never happen
if err != nil && features.EnableUnsafeAssertions {
log.Fatalf("lister returned err for %v: %v", namespace, err)
}
return res
}
// Filter allows filtering read operations.
// This is aliased to allow easier access when constructing clients.
type Filter = kubetypes.Filter
// New returns a Client for the given type.
// Internally, this uses a shared informer, so calling this multiple times will share the same internals.
func New[T controllers.ComparableObject](c kube.Client) Client[T] {
return NewFiltered[T](c, Filter{})
}
// NewFiltered returns a Client with some filter applied.
// Internally, this uses a shared informer, so calling this multiple times will share the same internals. This is keyed on
// unique {Type,LabelSelector,FieldSelector}.
//
// Warning: if conflicting filter.ObjectTransform are used for the same key, the first one registered wins.
// This means there must only be one filter configuration for a given type using the same kube.Client.
// Use with caution.
func NewFiltered[T controllers.ComparableObject](c kube.Client, filter Filter) Client[T] {
gvr := gvk.MustToGVR(types.GetGVK[T]())
inf := kubeclient.GetInformerFiltered[T](c, ToOpts(c, gvr, filter))
return &fullClient[T]{
writeClient: writeClient[T]{client: c},
Informer: newInformerClient[T](inf, filter),
}
}
// NewDelayedInformer returns a "delayed" client for the given GVR. This is read-only.
// A delayed client is used for CRD watches when the CRD may or may not exist. When the CRD is not present, the client will return
// empty results for all operations and watch for the CRD creation. Once created, watchers will be started and read operations will
// begin returning results.
// HasSynced will only return true if the CRD was not present upon creation OR the watch is fully synced. This ensures the creation
// is fully consistent if the CRD was present during creation; otherwise it is eventually consistent.
func NewDelayedInformer[T controllers.ComparableObject](
c kube.Client,
gvr schema.GroupVersionResource,
informerType kubetypes.InformerType,
filter Filter,
) Informer[T] {
watcher := c.CrdWatcher()
if watcher == nil {
log.Fatalf("NewDelayedInformer called without a CrdWatcher enabled")
}
delay := newDelayedFilter(gvr, watcher)
inf := func() informerfactory.StartableInformer {
opts := ToOpts(c, gvr, filter)
opts.InformerType = informerType
return kubeclient.GetInformerFilteredFromGVR(c, opts, gvr)
}
return newDelayedInformer[T](gvr, inf, delay, filter)
}
// NewUntypedInformer returns an untyped client for a given GVR. This is read-only.
func NewUntypedInformer(c kube.Client, gvr schema.GroupVersionResource, filter Filter) Untyped {
inf := kubeclient.GetInformerFilteredFromGVR(c, ToOpts(c, gvr, filter), gvr)
return newInformerClient[controllers.Object](inf, filter)
}
// NewDynamic returns a dynamic client for a given GVR. This is read-only.
func NewDynamic(c kube.Client, gvr schema.GroupVersionResource, filter Filter) Untyped {
opts := ToOpts(c, gvr, filter)
opts.InformerType = kubetypes.DynamicInformer
inf := kubeclient.GetInformerFilteredFromGVR(c, opts, gvr)
return newInformerClient[controllers.Object](inf, filter)
}
// NewMetadata returns a metadata client for a given GVR. This is read-only.
func NewMetadata(c kube.Client, gvr schema.GroupVersionResource, filter Filter) Informer[*metav1.PartialObjectMetadata] {
opts := ToOpts(c, gvr, filter)
opts.InformerType = kubetypes.MetadataInformer
inf := kubeclient.GetInformerFilteredFromGVR(c, opts, gvr)
return newInformerClient[*metav1.PartialObjectMetadata](inf, filter)
}
// NewWriteClient is exposed for testing.
func NewWriteClient[T controllers.ComparableObject](c kube.Client) Writer[T] {
return &writeClient[T]{client: c}
}
func newDelayedInformer[T controllers.ComparableObject](
gvr schema.GroupVersionResource,
getInf func() informerfactory.StartableInformer,
delay kubetypes.DelayedFilter,
filter Filter,
) Informer[T] {
delayedClient := &delayedClient[T]{
inf: new(atomic.Pointer[Informer[T]]),
delayed: delay,
}
// If resource is not yet known, we will use the delayedClient.
// When the resource is later loaded, the callback will trigger and swap our dummy delayedClient
// with a full client
readyNow := delay.KnownOrCallback(func(stop <-chan struct{}) {
// The inf() call is responsible for starting the informer
inf := getInf()
fc := &informerClient[T]{
informer: inf.Informer,
startInformer: inf.Start,
filter: filter.ObjectFilter,
}
inf.Start(stop)
log.Infof("%v is now ready, building client", gvr.GroupResource())
// Swap out the dummy client with the full one
delayedClient.set(fc)
})
if !readyNow {
log.Debugf("%v is not ready now, building delayed client", gvr.GroupResource())
return delayedClient
}
log.Debugf("%v ready now, building client", gvr.GroupResource())
return newInformerClient[T](getInf(), filter)
}
func newInformerClient[T controllers.ComparableObject](inf informerfactory.StartableInformer, filter Filter) Informer[T] {
return &informerClient[T]{
informer: inf.Informer,
startInformer: inf.Start,
filter: filter.ObjectFilter,
}
}
// keyFunc is the internal API key function that returns "namespace"/"name" or
// "name" if "namespace" is empty
func keyFunc(name, namespace string) string {
if len(namespace) == 0 {
return name
}
return namespace + "/" + name
}
func ToOpts(c kube.Client, gvr schema.GroupVersionResource, filter Filter) kubetypes.InformerOptions {
ns := filter.Namespace
if !istiogvr.IsClusterScoped(gvr) && ns == "" {
ns = features.InformerWatchNamespace
}
return kubetypes.InformerOptions{
LabelSelector: filter.LabelSelector,
FieldSelector: filter.FieldSelector,
Namespace: ns,
ObjectTransform: filter.ObjectTransform,
Cluster: c.ClusterID(),
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clienttest
import (
"fmt"
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
metadatafake "k8s.io/client-go/metadata/fake"
"istio.io/istio/pkg/config/schema/gvr"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/test"
)
func MakeCRD(t test.Failer, c kube.Client, g schema.GroupVersionResource) {
t.Helper()
crd := &v1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s.%s", g.Resource, g.Group),
},
}
// Metadata client fake is not kept in sync, so if using a fake client update that as well
fmc, ok := c.Metadata().(*metadatafake.FakeMetadataClient)
if !ok {
return
}
fmg := fmc.Resource(gvr.CustomResourceDefinition)
fmd, ok := fmg.(metadatafake.MetadataClient)
if !ok {
return
}
if _, err := fmd.CreateFake(&metav1.PartialObjectMetadata{
TypeMeta: crd.TypeMeta,
ObjectMeta: crd.ObjectMeta,
}, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clienttest
import (
"context"
"reflect"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"istio.io/istio/pkg/config/schema/kubeclient"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/test"
)
type directClient[T controllers.Object, PT any, TL runtime.Object] struct {
kclient.Writer[T]
t test.Failer
client kube.Client
}
func (d *directClient[T, PT, TL]) Get(name, namespace string) T {
api := kubeclient.GetClient[T, TL](d.client, namespace)
res, err := api.Get(context.Background(), name, metav1.GetOptions{})
if err != nil && !kerrors.IsNotFound(err) {
d.t.Fatalf("get: %v", err)
}
return res
}
func (d *directClient[T, PT, TL]) List(namespace string, selector klabels.Selector) []T {
api := kubeclient.GetClient[T, TL](d.client, namespace)
res, err := api.List(context.Background(), metav1.ListOptions{
LabelSelector: selector.String(),
})
if err != nil {
d.t.Fatalf("list: %v", err)
}
items := reflect.ValueOf(res).Elem().FieldByName("Items")
ret := make([]T, 0, items.Len())
for i := 0; i < items.Len(); i++ {
itm := items.Index(i).Interface().(PT)
ret = append(ret, any(&itm).(T))
}
return ret
}
var _ kclient.ReadWriter[controllers.Object] = &directClient[controllers.Object, any, controllers.Object]{}
// NewWriter returns a new client for the given type.
// Any errors will call t.Fatal.
func NewWriter[T controllers.ComparableObject](t test.Failer, c kube.Client) TestWriter[T] {
return TestWriter[T]{t: t, c: kclient.NewWriteClient[T](c)}
}
// NewDirectClient returns a new client for the given type. Reads are directly to the API server.
// Any errors will call t.Fatal.
// Typically, clienttest.WrapReadWriter should be used to simply wrap an existing client when testing an informer.
// However, NewDirectClient can be useful if we do not need/want an informer and need direct reads.
// Generic parameters represent the type with and without a pointer, and the list type.
// Example: NewDirectClient[*Pod, Pod, PodList]
func NewDirectClient[T controllers.ComparableObject, PT any, TL runtime.Object](t test.Failer, c kube.Client) TestClient[T] {
return WrapReadWriter[T](t, &directClient[T, PT, TL]{
t: t,
client: c,
Writer: kclient.NewWriteClient[T](c),
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clienttest
import (
klabels "k8s.io/apimachinery/pkg/labels"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/test"
"istio.io/istio/pkg/test/util/assert"
)
type TestClient[T controllers.Object] struct {
c kclient.ReadWriter[T]
t test.Failer
TestWriter[T]
}
type TestWriter[T controllers.Object] struct {
c kclient.Writer[T]
t test.Failer
}
func (t TestClient[T]) Get(name, namespace string) T {
return t.c.Get(name, namespace)
}
func (t TestClient[T]) List(namespace string, selector klabels.Selector) []T {
return t.c.List(namespace, selector)
}
func (t TestWriter[T]) Create(object T) T {
t.t.Helper()
res, err := t.c.Create(object)
if err != nil {
t.t.Fatalf("create %v/%v: %v", object.GetNamespace(), object.GetName(), err)
}
return res
}
func (t TestWriter[T]) Update(object T) T {
t.t.Helper()
res, err := t.c.Update(object)
if err != nil {
t.t.Fatalf("update %v/%v: %v", object.GetNamespace(), object.GetName(), err)
}
return res
}
func (t TestWriter[T]) UpdateStatus(object T) T {
t.t.Helper()
res, err := t.c.UpdateStatus(object)
if err != nil {
t.t.Fatalf("update status %v/%v: %v", object.GetNamespace(), object.GetName(), err)
}
return res
}
func (t TestWriter[T]) CreateOrUpdate(object T) T {
t.t.Helper()
res, err := kclient.CreateOrUpdate[T](t.c, object)
if err != nil {
t.t.Fatalf("createOrUpdate %v/%v: %v", object.GetNamespace(), object.GetName(), err)
}
return res
}
func (t TestWriter[T]) CreateOrUpdateStatus(object T) T {
t.t.Helper()
_, err := kclient.CreateOrUpdate(t.c, object)
if err != nil {
t.t.Fatalf("createOrUpdate %v/%v: %v", object.GetNamespace(), object.GetName(), err)
}
return t.UpdateStatus(object)
}
func (t TestWriter[T]) Delete(name, namespace string) {
t.t.Helper()
err := t.c.Delete(name, namespace)
if err != nil {
t.t.Fatalf("delete %v/%v: %v", namespace, name, err)
}
}
// WrapReadWriter returns a client that calls t.Fatal on errors.
// Reads may be cached or uncached, depending on the input client.
func WrapReadWriter[T controllers.Object](t test.Failer, c kclient.ReadWriter[T]) TestClient[T] {
return TestClient[T]{
c: c,
t: t,
TestWriter: TestWriter[T]{
c: c,
t: t,
},
}
}
// Wrap returns a client that calls t.Fatal on errors.
// Reads may be cached or uncached, depending on the input client.
// Note: this is identical to WrapReadWriter but works around Go limitations, allowing calling w/o specifying
// generic parameters in the common case.
func Wrap[T controllers.Object](t test.Failer, c kclient.Client[T]) TestClient[T] {
return WrapReadWriter[T](t, c)
}
// TrackerHandler returns an object handler that records each event
func TrackerHandler(tracker *assert.Tracker[string]) controllers.EventHandler[controllers.Object] {
return controllers.EventHandler[controllers.Object]{
AddFunc: func(obj controllers.Object) {
tracker.Record("add/" + obj.GetName())
},
UpdateFunc: func(oldObj, newObj controllers.Object) {
tracker.Record("update/" + newObj.GetName())
},
DeleteFunc: func(obj controllers.Object) {
tracker.Record("delete/" + obj.GetName())
},
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kclient
import (
"fmt"
"sync"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config/schema/gvr"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/log"
)
type crdWatcher struct {
crds Informer[*metav1.PartialObjectMetadata]
queue controllers.Queue
mutex sync.RWMutex
callbacks map[string][]func()
running chan struct{}
stop <-chan struct{}
}
func init() {
// Unfortunate hack needed to avoid circular imports
kube.NewCrdWatcher = newCrdWatcher
}
// newCrdWatcher returns a new CRD watcher controller.
func newCrdWatcher(client kube.Client) kubetypes.CrdWatcher {
c := &crdWatcher{
running: make(chan struct{}),
callbacks: map[string][]func(){},
}
c.queue = controllers.NewQueue("crd watcher",
controllers.WithReconciler(c.Reconcile))
c.crds = NewMetadata(client, gvr.CustomResourceDefinition, Filter{})
c.crds.AddEventHandler(controllers.ObjectHandler(c.queue.AddObject))
return c
}
// HasSynced returns whether the underlying cache has synced and the callback has been called at least once.
func (c *crdWatcher) HasSynced() bool {
return c.queue.HasSynced()
}
// Run starts the controller. This must be called.
func (c *crdWatcher) Run(stop <-chan struct{}) {
c.mutex.Lock()
if c.stop != nil {
// Run already called. Because we call this from client.RunAndWait this isn't uncommon
c.mutex.Unlock()
return
}
c.stop = stop
c.mutex.Unlock()
kube.WaitForCacheSync("crd watcher", stop, c.crds.HasSynced)
c.queue.Run(stop)
c.crds.ShutdownHandlers()
}
// WaitForCRD waits until the request CRD exists, and returns true on success. A false return value
// indicates the CRD does not exist but the wait failed or was canceled.
// This is useful to conditionally enable controllers based on CRDs being created.
func (c *crdWatcher) WaitForCRD(s schema.GroupVersionResource, stop <-chan struct{}) bool {
done := make(chan struct{})
if c.KnownOrCallback(s, func(stop <-chan struct{}) {
close(done)
}) {
// Already known
return true
}
select {
case <-stop:
return false
case <-done:
return true
}
}
// KnownOrCallback returns `true` immediately if the resource is known.
// If it is not known, `false` is returned. If the resource is later added, the callback will be triggered.
func (c *crdWatcher) KnownOrCallback(s schema.GroupVersionResource, f func(stop <-chan struct{})) bool {
c.mutex.Lock()
defer c.mutex.Unlock()
// If we are already synced, return immediately if the CRD is present.
if c.crds.HasSynced() && c.known(s) {
// Already known, return early
return true
}
name := fmt.Sprintf("%s.%s", s.Resource, s.Group)
c.callbacks[name] = append(c.callbacks[name], func() {
if features.EnableUnsafeAssertions && c.stop == nil {
log.Fatalf("CRD Watcher callback called without stop set")
}
// Call the callback
f(c.stop)
})
return false
}
func (c *crdWatcher) known(s schema.GroupVersionResource) bool {
// From the spec: "Its name MUST be in the format <.spec.name>.<.spec.group>."
name := fmt.Sprintf("%s.%s", s.Resource, s.Group)
return c.crds.Get(name, "") != nil
}
func (c *crdWatcher) Reconcile(key types.NamespacedName) error {
c.mutex.Lock()
callbacks, f := c.callbacks[key.Name]
if !f {
c.mutex.Unlock()
return nil
}
// Delete them so we do not run again
delete(c.callbacks, key.Name)
c.mutex.Unlock()
for _, cb := range callbacks {
cb()
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kclient
import (
"sync"
"sync/atomic"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/cache"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/ptr"
)
// delayedClient is a client wrapper that initially starts with an "empty client",
// but can later be swapped with a real client.
// The "empty client" returns empty responses for all reads, and fails all writes.
type delayedClient[T controllers.ComparableObject] struct {
inf *atomic.Pointer[Informer[T]]
delayed kubetypes.DelayedFilter
hm sync.Mutex
handlers []cache.ResourceEventHandler
started <-chan struct{}
}
func (s *delayedClient[T]) Get(name, namespace string) T {
if c := s.inf.Load(); c != nil {
return (*c).Get(name, namespace)
}
return ptr.Empty[T]()
}
func (s *delayedClient[T]) List(namespace string, selector klabels.Selector) []T {
if c := s.inf.Load(); c != nil {
return (*c).List(namespace, selector)
}
return nil
}
func (s *delayedClient[T]) ListUnfiltered(namespace string, selector klabels.Selector) []T {
if c := s.inf.Load(); c != nil {
return (*c).ListUnfiltered(namespace, selector)
}
return nil
}
func (s *delayedClient[T]) AddEventHandler(h cache.ResourceEventHandler) {
if c := s.inf.Load(); c != nil {
(*c).AddEventHandler(h)
} else {
s.hm.Lock()
defer s.hm.Unlock()
s.handlers = append(s.handlers, h)
}
}
func (s *delayedClient[T]) HasSynced() bool {
if c := s.inf.Load(); c != nil {
return (*c).HasSynced()
}
// If we haven't loaded the informer yet, we want to check if the delayed filter is synced.
// This ensures that at startup, we only return HasSynced=true if we are sure the CRD is not ready.
hs := s.delayed.HasSynced()
return hs
}
func (s *delayedClient[T]) ShutdownHandlers() {
if c := s.inf.Load(); c != nil {
(*c).ShutdownHandlers()
} else {
s.hm.Lock()
defer s.hm.Unlock()
s.handlers = nil
}
}
func (s *delayedClient[T]) Start(stop <-chan struct{}) {
if c := s.inf.Load(); c != nil {
(*c).Start(stop)
}
s.hm.Lock()
defer s.hm.Unlock()
s.started = stop
}
var _ Informer[controllers.Object] = &delayedClient[controllers.Object]{}
func (s *delayedClient[T]) set(inf Informer[T]) {
if inf != nil {
s.inf.Swap(&inf)
s.hm.Lock()
defer s.hm.Unlock()
for _, h := range s.handlers {
inf.AddEventHandler(h)
}
s.handlers = nil
if s.started != nil {
inf.Start(s.started)
}
}
}
type delayedFilter struct {
Watcher kubetypes.CrdWatcher
Resource schema.GroupVersionResource
}
func (d *delayedFilter) HasSynced() bool {
return d.Watcher.HasSynced()
}
func (d *delayedFilter) KnownOrCallback(f func(stop <-chan struct{})) bool {
return d.Watcher.KnownOrCallback(d.Resource, f)
}
func newDelayedFilter(resource schema.GroupVersionResource, watcher kubetypes.CrdWatcher) *delayedFilter {
return &delayedFilter{
Watcher: watcher,
Resource: resource,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kclient
import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"istio.io/istio/pkg/kube"
)
type EventRecorder struct {
eventRecorder record.EventRecorder
eventBroadcaster record.EventBroadcaster
}
// NewEventRecorder creates a new EventRecorder.
// This should be shutdown after usage.
func NewEventRecorder(client kube.Client, component string) EventRecorder {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(klog.V(5).Infof) // Will log at kube:debug level
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: client.Kube().CoreV1().Events("")})
eventRecorder := eventBroadcaster.NewRecorder(kube.IstioScheme, corev1.EventSource{
Component: component,
})
return EventRecorder{
eventRecorder: eventRecorder,
eventBroadcaster: eventBroadcaster,
}
}
// Write creates a single event.
func (e *EventRecorder) Write(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) {
e.eventRecorder.Eventf(object, eventtype, reason, messageFmt, args...)
}
// Shutdown terminates the event recorder. This must be called upon completion of writing events, and events should not be
// written once terminated.
func (e *EventRecorder) Shutdown() {
e.eventBroadcaster.Shutdown()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kclient
import (
kerrors "k8s.io/apimachinery/pkg/api/errors"
"istio.io/istio/pkg/kube/controllers"
)
func CreateOrUpdate[T controllers.Object](c Writer[T], object T) (T, error) {
res, err := c.Create(object)
if kerrors.IsAlreadyExists(err) {
// Already exist, update
return c.Update(object)
}
return res, err
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kclient
import (
"sync"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/util/sets"
)
// Index maintains a simple index over an informer
type Index[K comparable, O controllers.ComparableObject] struct {
mu sync.RWMutex
objects map[K]sets.Set[types.NamespacedName]
client Informer[O]
}
// Lookup finds all objects matching a given key
func (i *Index[K, O]) Lookup(k K) []O {
i.mu.RLock()
defer i.mu.RUnlock()
res := make([]O, 0)
for obj := range i.objects[k] {
item := i.client.Get(obj.Name, obj.Namespace)
if controllers.IsNil(item) {
// This should be extremely rare, maybe impossible due to the mutex.
continue
}
res = append(res, item)
}
return res
}
// CreateIndexWithDelegate creates a simple index, keyed by key K, over an informer for O. This is similar to
// Informer.AddIndex, but is easier to use and can be added after an informer has already started.
// An additional ResourceEventHandler can be passed in that is guaranteed to happen *after* the index is updated.
// This allows the delegate to depend on the contents of the index.
// TODO(https://github.com/kubernetes/kubernetes/pull/117046) remove this.
func CreateIndexWithDelegate[K comparable, O controllers.ComparableObject](
client Informer[O],
extract func(o O) []K,
delegate cache.ResourceEventHandler,
) *Index[K, O] {
idx := Index[K, O]{
objects: make(map[K]sets.Set[types.NamespacedName]),
client: client,
mu: sync.RWMutex{},
}
addObj := func(obj any) {
ro := controllers.ExtractObject(obj)
o := ro.(O)
objectKey := config.NamespacedName(o)
for _, indexKey := range extract(o) {
sets.InsertOrNew(idx.objects, indexKey, objectKey)
}
}
deleteObj := func(obj any) {
ro := controllers.ExtractObject(obj)
o := ro.(O)
objectKey := config.NamespacedName(o)
for _, indexKey := range extract(o) {
sets.DeleteCleanupLast(idx.objects, indexKey, objectKey)
}
}
handler := cache.ResourceEventHandlerDetailedFuncs{
AddFunc: func(obj any, initialList bool) {
idx.mu.Lock()
addObj(obj)
idx.mu.Unlock()
if delegate != nil {
delegate.OnAdd(obj, initialList)
}
},
UpdateFunc: func(oldObj, newObj any) {
idx.mu.Lock()
deleteObj(oldObj)
addObj(newObj)
idx.mu.Unlock()
if delegate != nil {
delegate.OnUpdate(oldObj, newObj)
}
},
DeleteFunc: func(obj any) {
idx.mu.Lock()
deleteObj(obj)
idx.mu.Unlock()
if delegate != nil {
delegate.OnDelete(obj)
}
},
}
client.AddEventHandler(handler)
return &idx
}
// CreateIndex creates a simple index, keyed by key K, over an informer for O. This is similar to
// Informer.AddIndex, but is easier to use and can be added after an informer has already started.
func CreateIndex[K comparable, O controllers.ComparableObject](
client Informer[O],
extract func(o O) []K,
) *Index[K, O] {
return CreateIndexWithDelegate(client, extract, nil)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package labels provides utility methods for retrieving Istio-specific labels
// from Kubernetes resources.
package labels
import "istio.io/istio/pilot/pkg/model"
var (
// These are the labels that are checked for canonical service name and revision.
// Note: the order of these labels is important.
nameLabels = []string{
model.IstioCanonicalServiceLabelName,
"app.kubernetes.io/name",
"app",
}
revisionLabels = []string{
model.IstioCanonicalServiceRevisionLabelName,
"app.kubernetes.io/version",
"version",
}
)
// CanonicalService returns the values of the following labels from the supplied map:
// - service.istio.io/canonical-name
// - service.istio.io/canonical-revision
//
// If the labels are not in the map, a set of fallbacks are checked. For canonical name,
// `app.kubernetes.io/name` is checked, then `app`, evenutually falling back to the
// supplied `workloadName`. For canonical revision, `app.kubernetes.io/version` is checked,
// followed by `version` and finally defaulting to the literal value of `"latest"`.
func CanonicalService(labels map[string]string, workloadName string) (string, string) {
return canonicalServiceName(labels, workloadName), canonicalServiceRevision(labels)
}
// lookupLabelValue returns the value of the first label in the supplied map that matches
// one of the supplied keys.
func lookupLabelValue(labels map[string]string, keys ...string) (string, bool) {
for _, key := range keys {
if value, ok := labels[key]; ok {
return value, true
}
}
return "", false
}
func HasCanonicalServiceName(labels map[string]string) bool {
_, ok := lookupLabelValue(labels, nameLabels...)
return ok
}
func HasCanonicalServiceRevision(labels map[string]string) bool {
_, ok := lookupLabelValue(labels, revisionLabels...)
return ok
}
func canonicalServiceRevision(labels map[string]string) string {
value, ok := lookupLabelValue(labels, revisionLabels...)
if !ok {
return "latest"
}
return value
}
func canonicalServiceName(labels map[string]string, workloadName string) string {
value, ok := lookupLabelValue(labels, nameLabels...)
if !ok {
return workloadName
}
return value
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mcs
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
mcs "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1"
"istio.io/istio/pilot/pkg/features"
)
var (
schemeBuilder = &runtime.SchemeBuilder{}
// AddToScheme is used to register MCS CRDs to a runtime.Scheme
AddToScheme = schemeBuilder.AddToScheme
// MCSSchemeGroupVersion is group version used to register Kubernetes Multi-Cluster Services (MCS) objects
MCSSchemeGroupVersion = schema.GroupVersion{Group: features.MCSAPIGroup, Version: features.MCSAPIVersion}
ServiceExportGVR = MCSSchemeGroupVersion.WithResource("serviceexports")
ServiceImportGVR = MCSSchemeGroupVersion.WithResource("serviceimports")
)
func init() {
schemeBuilder.Register(addKnownTypes)
}
func addKnownTypes(scheme *runtime.Scheme) error {
// Register Kubernetes Multi-Cluster Services (MCS) objects.
scheme.AddKnownTypes(MCSSchemeGroupVersion,
&mcs.ServiceExport{},
&mcs.ServiceExportList{},
&mcs.ServiceImport{},
&mcs.ServiceImportList{})
metav1.AddToGroupVersion(scheme, MCSSchemeGroupVersion)
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package multicluster
import (
"crypto/sha256"
"time"
"go.uber.org/atomic"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/log"
)
// Cluster defines cluster struct
type Cluster struct {
// ID of the cluster.
ID cluster.ID
// Client for accessing the cluster.
Client kube.Client
kubeConfigSha [sha256.Size]byte
stop chan struct{}
// initialSync is marked when RunAndWait completes
initialSync *atomic.Bool
// initialSyncTimeout is set when RunAndWait timed out
initialSyncTimeout *atomic.Bool
}
// Run starts the cluster's informers and waits for caches to sync. Once caches are synced, we mark the cluster synced.
// This should be called after each of the handlers have registered informers, and should be run in a goroutine.
func (r *Cluster) Run() {
if features.RemoteClusterTimeout > 0 {
time.AfterFunc(features.RemoteClusterTimeout, func() {
if !r.initialSync.Load() {
log.Errorf("remote cluster %s failed to sync after %v", r.ID, features.RemoteClusterTimeout)
timeouts.With(clusterLabel.Value(string(r.ID))).Increment()
}
r.initialSyncTimeout.Store(true)
})
}
r.Client.RunAndWait(r.stop)
r.initialSync.Store(true)
}
// Stop closes the stop channel, if is safe to be called multi times.
func (r *Cluster) Stop() {
select {
case <-r.stop:
return
default:
close(r.stop)
}
}
func (r *Cluster) HasSynced() bool {
// It could happen when a wrong crendential provide, this cluster has no chance to run.
// In this case, the `initialSyncTimeout` will never be set
// In order not block istiod start up, check close as well.
if r.Closed() {
return true
}
return r.initialSync.Load() || r.initialSyncTimeout.Load()
}
func (r *Cluster) Closed() bool {
select {
case <-r.stop:
return true
default:
return false
}
}
func (r *Cluster) SyncDidTimeout() bool {
return !r.initialSync.Load() && r.initialSyncTimeout.Load()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package multicluster
import (
"sync"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
// ClusterStore is a collection of clusters
type ClusterStore struct {
sync.RWMutex
// keyed by secret key(ns/name)->clusterID
remoteClusters map[string]map[cluster.ID]*Cluster
clusters sets.String
}
// newClustersStore initializes data struct to store clusters information
func newClustersStore() *ClusterStore {
return &ClusterStore{
remoteClusters: make(map[string]map[cluster.ID]*Cluster),
clusters: sets.New[string](),
}
}
func (c *ClusterStore) Store(secretKey string, clusterID cluster.ID, value *Cluster) {
c.Lock()
defer c.Unlock()
if _, ok := c.remoteClusters[secretKey]; !ok {
c.remoteClusters[secretKey] = make(map[cluster.ID]*Cluster)
}
c.remoteClusters[secretKey][clusterID] = value
c.clusters.Insert(string(clusterID))
}
func (c *ClusterStore) Delete(secretKey string, clusterID cluster.ID) {
c.Lock()
defer c.Unlock()
delete(c.remoteClusters[secretKey], clusterID)
c.clusters.Delete(string(clusterID))
if len(c.remoteClusters[secretKey]) == 0 {
delete(c.remoteClusters, secretKey)
}
}
func (c *ClusterStore) Get(secretKey string, clusterID cluster.ID) *Cluster {
c.RLock()
defer c.RUnlock()
if _, ok := c.remoteClusters[secretKey]; !ok {
return nil
}
return c.remoteClusters[secretKey][clusterID]
}
func (c *ClusterStore) Contains(clusterID cluster.ID) bool {
c.RLock()
defer c.RUnlock()
return c.clusters.Contains(string(clusterID))
}
func (c *ClusterStore) GetByID(clusterID cluster.ID) *Cluster {
c.RLock()
defer c.RUnlock()
for _, clusters := range c.remoteClusters {
c, ok := clusters[clusterID]
if ok {
return c
}
}
return nil
}
// All returns a copy of the current remote clusters.
func (c *ClusterStore) All() map[string]map[cluster.ID]*Cluster {
if c == nil {
return nil
}
c.RLock()
defer c.RUnlock()
out := make(map[string]map[cluster.ID]*Cluster, len(c.remoteClusters))
for secret, clusters := range c.remoteClusters {
out[secret] = make(map[cluster.ID]*Cluster, len(clusters))
for cid, c := range clusters {
outCluster := *c
out[secret][cid] = &outCluster
}
}
return out
}
// GetExistingClustersFor return existing clusters registered for the given secret
func (c *ClusterStore) GetExistingClustersFor(secretKey string) []*Cluster {
c.RLock()
defer c.RUnlock()
out := make([]*Cluster, 0, len(c.remoteClusters[secretKey]))
for _, cluster := range c.remoteClusters[secretKey] {
out = append(out, cluster)
}
return out
}
func (c *ClusterStore) Len() int {
c.Lock()
defer c.Unlock()
out := 0
for _, clusterMap := range c.remoteClusters {
out += len(clusterMap)
}
return out
}
func (c *ClusterStore) HasSynced() bool {
c.RLock()
defer c.RUnlock()
for _, clusterMap := range c.remoteClusters {
for _, cl := range clusterMap {
if !cl.HasSynced() {
log.Debugf("remote cluster %s registered informers have not been synced up yet", cl.ID)
return false
}
}
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package multicluster
import (
"bytes"
"crypto/sha256"
"fmt"
"time"
"github.com/hashicorp/go-multierror"
"go.uber.org/atomic"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
filter "istio.io/istio/pkg/kube/namespace"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/monitoring"
)
const (
MultiClusterSecretLabel = "istio/multiCluster"
)
var (
clusterLabel = monitoring.CreateLabel("cluster")
timeouts = monitoring.NewSum(
"remote_cluster_sync_timeouts_total",
"Number of times remote clusters took too long to sync, causing slow startup that excludes remote clusters.",
)
clusterType = monitoring.CreateLabel("cluster_type")
clustersCount = monitoring.NewGauge(
"istiod_managed_clusters",
"Number of clusters managed by istiod",
)
localClusters = clustersCount.With(clusterType.Value("local"))
remoteClusters = clustersCount.With(clusterType.Value("remote"))
)
type ClusterHandler interface {
ClusterAdded(cluster *Cluster, stop <-chan struct{})
ClusterUpdated(cluster *Cluster, stop <-chan struct{})
ClusterDeleted(clusterID cluster.ID)
}
// Controller is the controller implementation for Secret resources
type Controller struct {
namespace string
configClusterID cluster.ID
configClusterClient kube.Client
queue controllers.Queue
secrets kclient.Client[*corev1.Secret]
configOverrides []func(*rest.Config)
namespaces kclient.Client[*corev1.Namespace]
DiscoveryNamespacesFilter filter.DiscoveryNamespacesFilter
cs *ClusterStore
handlers []ClusterHandler
}
// NewController returns a new secret controller
func NewController(kubeclientset kube.Client, namespace string, clusterID cluster.ID,
meshWatcher mesh.Watcher, configOverrides ...func(*rest.Config),
) *Controller {
informerClient := kubeclientset
// When these two are set to true, Istiod will be watching the namespace in which
// Istiod is running on the external cluster. Use the inCluster credentials to
// create a kubeclientset
if features.LocalClusterSecretWatcher && features.ExternalIstiod {
config, err := kube.InClusterConfig(configOverrides...)
if err != nil {
log.Errorf("Could not get istiod incluster configuration: %v", err)
return nil
}
log.Info("Successfully retrieved incluster config.")
localKubeClient, err := kube.NewClient(kube.NewClientConfigForRestConfig(config), clusterID)
if err != nil {
log.Errorf("Could not create a client to access local cluster API server: %v", err)
return nil
}
log.Infof("Successfully created in cluster kubeclient at %s", localKubeClient.RESTConfig().Host)
informerClient = localKubeClient
}
secrets := kclient.NewFiltered[*corev1.Secret](informerClient, kclient.Filter{
Namespace: namespace,
LabelSelector: MultiClusterSecretLabel + "=true",
})
// init gauges
localClusters.Record(1.0)
remoteClusters.Record(0.0)
controller := &Controller{
namespace: namespace,
configClusterID: clusterID,
configClusterClient: kubeclientset,
cs: newClustersStore(),
secrets: secrets,
configOverrides: configOverrides,
}
namespaces := kclient.New[*corev1.Namespace](kubeclientset)
controller.namespaces = namespaces
controller.DiscoveryNamespacesFilter = filter.NewDiscoveryNamespacesFilter(namespaces, meshWatcher.Mesh().GetDiscoverySelectors())
// Queue does NOT retry. The only error that can occur is if the kubeconfig is
// malformed. This is a static analysis that cannot be resolved by retry. Actual
// connectivity issues would result in HasSynced returning false rather than an
// error. In this case, things will be retried automatically (via informers or
// others), and the time is capped by RemoteClusterTimeout).
controller.queue = controllers.NewQueue("multicluster secret",
controllers.WithReconciler(controller.processItem))
secrets.AddEventHandler(controllers.ObjectHandler(controller.queue.AddObject))
return controller
}
func (c *Controller) AddHandler(h ClusterHandler) {
c.handlers = append(c.handlers, h)
}
// Run starts the controller until it receives a message over stopCh
func (c *Controller) Run(stopCh <-chan struct{}) error {
// Normally, we let informers start after all controllers. However, in this case we need namespaces to start and sync
// first, so we have DiscoveryNamespacesFilter ready to go. This avoids processing objects that would be filtered during startup.
c.namespaces.Start(stopCh)
// Wait for namespace informer synced, which implies discovery filter is synced as well
if !kube.WaitForCacheSync("namespace", stopCh, c.namespaces.HasSynced) {
return fmt.Errorf("failed to sync namespaces")
}
// run handlers for the config cluster; do not store this *Cluster in the ClusterStore or give it a SyncTimeout
// this is done outside the goroutine, we should block other Run/startFuncs until this is registered
configCluster := &Cluster{Client: c.configClusterClient, ID: c.configClusterID}
c.handleAdd(configCluster, stopCh)
go func() {
t0 := time.Now()
log.Info("Starting multicluster remote secrets controller")
// we need to start here when local cluster secret watcher enabled
if features.LocalClusterSecretWatcher && features.ExternalIstiod {
c.secrets.Start(stopCh)
}
if !kube.WaitForCacheSync("multicluster remote secrets", stopCh, c.secrets.HasSynced) {
return
}
log.Infof("multicluster remote secrets controller cache synced in %v", time.Since(t0))
c.queue.Run(stopCh)
}()
return nil
}
func (c *Controller) HasSynced() bool {
if !c.queue.HasSynced() {
log.Debug("secret controller did not sync secrets presented at startup")
// we haven't finished processing the secrets that were present at startup
return false
}
return c.cs.HasSynced()
}
func (c *Controller) processItem(key types.NamespacedName) error {
log.Infof("processing secret event for secret %s", key)
scrt := c.secrets.Get(key.Name, key.Namespace)
if scrt != nil {
log.Debugf("secret %s exists in informer cache, processing it", key)
if err := c.addSecret(key, scrt); err != nil {
return fmt.Errorf("error adding secret %s: %v", key, err)
}
} else {
log.Debugf("secret %s does not exist in informer cache, deleting it", key)
c.deleteSecret(key.String())
}
remoteClusters.Record(float64(c.cs.Len()))
return nil
}
// BuildClientsFromConfig creates kube.Clients from the provided kubeconfig. This is overridden for testing only
var BuildClientsFromConfig = func(kubeConfig []byte, clusterId cluster.ID, configOverrides ...func(*rest.Config)) (kube.Client, error) {
restConfig, err := kube.NewUntrustedRestConfig(kubeConfig, configOverrides...)
if err != nil {
return nil, err
}
clients, err := kube.NewClient(kube.NewClientConfigForRestConfig(restConfig), clusterId)
if err != nil {
return nil, fmt.Errorf("failed to create kube clients: %v", err)
}
if features.WorkloadEntryCrossCluster {
clients = kube.EnableCrdWatcher(clients)
}
return clients, nil
}
func (c *Controller) createRemoteCluster(kubeConfig []byte, clusterID string) (*Cluster, error) {
clients, err := BuildClientsFromConfig(kubeConfig, cluster.ID(clusterID), c.configOverrides...)
if err != nil {
return nil, err
}
return &Cluster{
ID: cluster.ID(clusterID),
Client: clients,
stop: make(chan struct{}),
// for use inside the package, to close on cleanup
initialSync: atomic.NewBool(false),
initialSyncTimeout: atomic.NewBool(false),
kubeConfigSha: sha256.Sum256(kubeConfig),
}, nil
}
func (c *Controller) addSecret(name types.NamespacedName, s *corev1.Secret) error {
secretKey := name.String()
// First delete clusters
existingClusters := c.cs.GetExistingClustersFor(secretKey)
for _, existingCluster := range existingClusters {
if _, ok := s.Data[string(existingCluster.ID)]; !ok {
c.deleteCluster(secretKey, existingCluster)
}
}
var errs *multierror.Error
for clusterID, kubeConfig := range s.Data {
logger := log.WithLabels("cluster", clusterID, "secret", secretKey)
if cluster.ID(clusterID) == c.configClusterID {
logger.Infof("ignoring cluster as it would overwrite the config cluster")
continue
}
action, callback := "Adding", c.handleAdd
if prev := c.cs.Get(secretKey, cluster.ID(clusterID)); prev != nil {
action, callback = "Updating", c.handleUpdate
// clusterID must be unique even across multiple secrets
kubeConfigSha := sha256.Sum256(kubeConfig)
if bytes.Equal(kubeConfigSha[:], prev.kubeConfigSha[:]) {
logger.Infof("skipping update (kubeconfig are identical)")
continue
}
// stop previous remote cluster
prev.Stop()
} else if c.cs.Contains(cluster.ID(clusterID)) {
// if the cluster has been registered before by another secret, ignore the new one.
logger.Warnf("cluster has already been registered")
continue
}
logger.Infof("%s cluster", action)
remoteCluster, err := c.createRemoteCluster(kubeConfig, clusterID)
if err != nil {
logger.Errorf("%s cluster: create remote cluster failed: %v", action, err)
errs = multierror.Append(errs, err)
continue
}
callback(remoteCluster, remoteCluster.stop)
logger.Infof("finished callback for cluster and starting to sync")
c.cs.Store(secretKey, remoteCluster.ID, remoteCluster)
go remoteCluster.Run()
}
log.Infof("Number of remote clusters: %d", c.cs.Len())
return errs.ErrorOrNil()
}
func (c *Controller) deleteSecret(secretKey string) {
for _, cluster := range c.cs.GetExistingClustersFor(secretKey) {
if cluster.ID == c.configClusterID {
log.Infof("ignoring delete cluster %v from secret %v as it would overwrite the config cluster", c.configClusterID, secretKey)
continue
}
c.deleteCluster(secretKey, cluster)
}
log.Infof("Number of remote clusters: %d", c.cs.Len())
}
func (c *Controller) deleteCluster(secretKey string, cluster *Cluster) {
log.Infof("Deleting cluster_id=%v configured by secret=%v", cluster.ID, secretKey)
cluster.Stop()
c.handleDelete(cluster.ID)
c.cs.Delete(secretKey, cluster.ID)
log.Infof("Number of remote clusters: %d", c.cs.Len())
}
func (c *Controller) handleAdd(cluster *Cluster, stop <-chan struct{}) {
for _, handler := range c.handlers {
handler.ClusterAdded(cluster, stop)
}
}
func (c *Controller) handleUpdate(cluster *Cluster, stop <-chan struct{}) {
for _, handler := range c.handlers {
handler.ClusterUpdated(cluster, stop)
}
}
func (c *Controller) handleDelete(key cluster.ID) {
for _, handler := range c.handlers {
handler.ClusterDeleted(key)
}
}
// ListRemoteClusters provides debug info about connected remote clusters.
func (c *Controller) ListRemoteClusters() []cluster.DebugInfo {
var out []cluster.DebugInfo
for secretName, clusters := range c.cs.All() {
for clusterID, c := range clusters {
syncStatus := "syncing"
if c.Closed() {
syncStatus = "closed"
} else if c.SyncDidTimeout() {
syncStatus = "timeout"
} else if c.HasSynced() {
syncStatus = "synced"
}
out = append(out, cluster.DebugInfo{
ID: clusterID,
SecretName: secretName,
SyncStatus: syncStatus,
})
}
}
return out
}
func (c *Controller) GetRemoteKubeClient(clusterID cluster.ID) kubernetes.Interface {
if remoteCluster := c.cs.GetByID(clusterID); remoteCluster != nil {
return remoteCluster.Client.Kube()
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package namespace
import (
"sync"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
type DiscoveryFilter func(obj any) bool
// DiscoveryNamespacesFilter tracks the set of namespaces selected for discovery, which are updated by the discovery namespace controller.
// It exposes a filter function used for filtering out objects that don't reside in namespaces selected for discovery.
type DiscoveryNamespacesFilter interface {
// Filter returns true if the input object or namespace string resides in a namespace selected for discovery
Filter(obj any) bool
// SelectorsChanged is invoked when meshConfig's discoverySelectors change
SelectorsChanged(discoverySelectors []*metav1.LabelSelector)
// GetMembers returns the namespaces selected for discovery
GetMembers() sets.String
// AddHandler registers a handler on namespace, which will be triggered when namespace selected or deselected.
// If the namespaces have been synced, it will trigger the new added handler.
AddHandler(func(ns string, event model.Event))
}
type discoveryNamespacesFilter struct {
lock sync.RWMutex
namespaces kclient.Client[*corev1.Namespace]
discoveryNamespaces sets.String
discoverySelectors []labels.Selector // nil if discovery selectors are not specified, permits all namespaces for discovery
handlers []func(ns string, event model.Event)
}
func NewDiscoveryNamespacesFilter(
namespaces kclient.Client[*corev1.Namespace],
discoverySelectors []*metav1.LabelSelector,
) DiscoveryNamespacesFilter {
// convert LabelSelectors to Selectors
selectors := make([]labels.Selector, 0, len(discoverySelectors))
for _, selector := range discoverySelectors {
ls, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
log.Errorf("error initializing discovery namespaces filter, invalid discovery selector: %v", err)
}
selectors = append(selectors, ls)
}
f := &discoveryNamespacesFilter{
namespaces: namespaces,
discoveryNamespaces: sets.New[string](),
discoverySelectors: selectors,
}
namespaces.AddEventHandler(controllers.EventHandler[*corev1.Namespace]{
AddFunc: func(ns *corev1.Namespace) {
if f.namespaceCreated(ns.ObjectMeta) {
f.lock.RLock()
defer f.lock.RUnlock()
f.notifyNamespaceHandlers(ns.Name, model.EventAdd)
}
},
UpdateFunc: func(old, new *corev1.Namespace) {
membershipChanged, namespaceAdded := f.namespaceUpdated(old.ObjectMeta, new.ObjectMeta)
if membershipChanged {
if namespaceAdded {
f.lock.RLock()
defer f.lock.RUnlock()
f.notifyNamespaceHandlers(new.Name, model.EventAdd)
} else {
f.lock.RLock()
defer f.lock.RUnlock()
f.notifyNamespaceHandlers(new.Name, model.EventDelete)
}
}
},
DeleteFunc: func(ns *corev1.Namespace) {
f.namespaceDeleted(ns.ObjectMeta)
// no need to invoke object handlers since objects within the namespace will trigger delete events
},
})
return f
}
func (d *discoveryNamespacesFilter) Filter(obj any) bool {
d.lock.RLock()
defer d.lock.RUnlock()
// permit all objects if discovery selectors are not specified
if len(d.discoverySelectors) == 0 {
return true
}
if ns, ok := obj.(string); ok {
return d.discoveryNamespaces.Contains(ns)
}
// When an object is deleted, obj could be a DeletionFinalStateUnknown marker item.
object := controllers.ExtractObject(obj)
if object == nil {
return false
}
ns := object.GetNamespace()
if _, ok := object.(*corev1.Namespace); ok {
ns = object.GetName()
}
// permit if object resides in a namespace labeled for discovery
return d.discoveryNamespaces.Contains(ns)
}
// SelectorsChanged initializes the discovery filter state with the discovery selectors and selected namespaces
func (d *discoveryNamespacesFilter) SelectorsChanged(
discoverySelectors []*metav1.LabelSelector,
) {
d.lock.Lock()
defer d.lock.Unlock()
var selectors []labels.Selector
newDiscoveryNamespaces := sets.New[string]()
namespaceList := d.namespaces.List("", labels.Everything())
// convert LabelSelectors to Selectors
for _, selector := range discoverySelectors {
ls, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
log.Errorf("error initializing discovery namespaces filter, invalid discovery selector: %v", err)
return
}
selectors = append(selectors, ls)
}
// range over all namespaces to get discovery namespaces
for _, ns := range namespaceList {
for _, selector := range selectors {
if selector.Matches(labels.Set(ns.Labels)) {
newDiscoveryNamespaces.Insert(ns.Name)
}
}
// omitting discoverySelectors indicates discovering all namespaces
if len(selectors) == 0 {
for _, ns := range namespaceList {
newDiscoveryNamespaces.Insert(ns.Name)
}
}
}
oldDiscoveryNamespaces := d.discoveryNamespaces
selectedNamespaces := sets.SortedList(newDiscoveryNamespaces.Difference(oldDiscoveryNamespaces))
deselectedNamespaces := sets.SortedList(oldDiscoveryNamespaces.Difference(newDiscoveryNamespaces))
for _, ns := range selectedNamespaces {
d.notifyNamespaceHandlers(ns, model.EventAdd)
}
for _, ns := range deselectedNamespaces {
d.notifyNamespaceHandlers(ns, model.EventDelete)
}
// update filter state
d.discoveryNamespaces = newDiscoveryNamespaces
d.discoverySelectors = selectors
}
func (d *discoveryNamespacesFilter) notifyNamespaceHandlers(ns string, event model.Event) {
for _, h := range d.handlers {
h(ns, event)
}
}
// namespaceCreated : if newly created namespace is selected, update namespace membership
func (d *discoveryNamespacesFilter) namespaceCreated(ns metav1.ObjectMeta) (membershipChanged bool) {
if d.isSelected(ns.Labels) {
d.addNamespace(ns.Name)
return true
}
return false
}
// namespaceUpdated : if updated namespace was a member and no longer selected, or was not a member and now selected, update namespace membership
func (d *discoveryNamespacesFilter) namespaceUpdated(oldNs, newNs metav1.ObjectMeta) (membershipChanged bool, namespaceAdded bool) {
if d.hasNamespace(oldNs.Name) && !d.isSelected(newNs.Labels) {
d.removeNamespace(oldNs.Name)
return true, false
}
if !d.hasNamespace(oldNs.Name) && d.isSelected(newNs.Labels) {
d.addNamespace(oldNs.Name)
return true, true
}
return false, false
}
// namespaceDeleted : if deleted namespace was a member, remove it
func (d *discoveryNamespacesFilter) namespaceDeleted(ns metav1.ObjectMeta) (membershipChanged bool) {
if d.isSelected(ns.Labels) {
d.removeNamespace(ns.Name)
return true
}
return false
}
// GetMembers returns member namespaces
func (d *discoveryNamespacesFilter) GetMembers() sets.String {
d.lock.RLock()
defer d.lock.RUnlock()
return d.discoveryNamespaces.Copy()
}
// AddHandler registers a handler on namespace, which will be triggered when namespace selected or deselected.
// If the namespaces have been synced, trigger the new added handler.
func (d *discoveryNamespacesFilter) AddHandler(f func(ns string, event model.Event)) {
d.lock.Lock()
defer d.lock.Unlock()
for ns := range d.discoveryNamespaces {
f(ns, model.EventAdd)
}
d.handlers = append(d.handlers, f)
}
func (d *discoveryNamespacesFilter) addNamespace(ns string) {
d.lock.Lock()
defer d.lock.Unlock()
d.discoveryNamespaces.Insert(ns)
}
func (d *discoveryNamespacesFilter) hasNamespace(ns string) bool {
d.lock.RLock()
defer d.lock.RUnlock()
return d.discoveryNamespaces.Contains(ns)
}
func (d *discoveryNamespacesFilter) removeNamespace(ns string) {
d.lock.Lock()
defer d.lock.Unlock()
d.discoveryNamespaces.Delete(ns)
}
func (d *discoveryNamespacesFilter) isSelected(labels labels.Set) bool {
d.lock.RLock()
defer d.lock.RUnlock()
// permit all objects if discovery selectors are not specified
if len(d.discoverySelectors) == 0 {
return true
}
for _, selector := range d.discoverySelectors {
if selector.Matches(labels) {
return true
}
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"context"
"fmt"
"io"
"net"
"net/http"
"os"
"strconv"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/portforward"
"k8s.io/client-go/transport/spdy"
"istio.io/istio/pkg/log"
)
// PortForwarder manages the forwarding of a single port.
type PortForwarder interface {
// Start runs this forwarder.
Start() error
// Address returns the local forwarded address. Only valid while the forwarder is running.
Address() string
// Close this forwarder and release an resources.
Close()
// ErrChan returns a channel that returns an error when one is encountered. While Start() may return an initial error,
// the port-forward connection may be lost at anytime. The ErrChan can be read to determine if/when the port-forwarding terminates.
// This can return nil if the port forwarding stops gracefully.
ErrChan() <-chan error
// WaitForStop blocks until connection closed (e.g. control-C interrupt)
WaitForStop()
}
var _ PortForwarder = &forwarder{}
type forwarder struct {
stopCh chan struct{}
restConfig *rest.Config
podName string
ns string
localAddress string
localPort int
podPort int
errCh chan error
}
func (f *forwarder) Start() error {
f.errCh = make(chan error, 1)
readyCh := make(chan struct{}, 1)
var fw *portforward.PortForwarder
go func() {
for {
select {
case <-f.stopCh:
return
default:
}
var err error
// Build a new port forwarder.
fw, err = f.buildK8sPortForwarder(readyCh)
if err != nil {
f.errCh <- fmt.Errorf("building port forwarded: %v", err)
return
}
if err = fw.ForwardPorts(); err != nil {
log.Errorf("port forward failed: %v", err)
f.errCh <- fmt.Errorf("port forward: %v", err)
return
}
log.Infof("port forward completed without error")
f.errCh <- nil
// At this point, either the stopCh has been closed, or port forwarder connection is broken.
// the port forwarder should have already been ready before.
// No need to notify the ready channel anymore when forwarding again.
readyCh = nil
}
}()
// We want to block Start() until we have either gotten an error or have started
// We may later get an error, but that is handled async.
select {
case err := <-f.errCh:
return fmt.Errorf("failure running port forward process: %v", err)
case <-readyCh:
p, err := fw.GetPorts()
if err != nil {
return fmt.Errorf("failed to get ports: %v", err)
}
if len(p) == 0 {
return fmt.Errorf("got no ports")
}
// Set local port now, as it may have been 0 as input
f.localPort = int(p[0].Local)
log.Debugf("Port forward established %v -> %v.%v:%v", f.Address(), f.podName, f.podName, f.podPort)
// The forwarder is now ready.
return nil
}
}
func (f *forwarder) Address() string {
return net.JoinHostPort(f.localAddress, strconv.Itoa(f.localPort))
}
func (f *forwarder) Close() {
close(f.stopCh)
// Closing the stop channel should close anything
// opened by f.forwarder.ForwardPorts()
}
func (f *forwarder) ErrChan() <-chan error {
return f.errCh
}
func (f *forwarder) WaitForStop() {
<-f.stopCh
}
func newPortForwarder(c *client, podName, ns, localAddress string, localPort, podPort int) (PortForwarder, error) {
if localAddress == "" {
localAddress = defaultLocalAddress
}
f := &forwarder{
stopCh: make(chan struct{}),
restConfig: c.config,
podName: podName,
ns: ns,
localAddress: localAddress,
localPort: localPort,
podPort: podPort,
}
return f, nil
}
func (f *forwarder) buildK8sPortForwarder(readyCh chan struct{}) (*portforward.PortForwarder, error) {
restClient, err := rest.RESTClientFor(f.restConfig)
if err != nil {
return nil, err
}
req := restClient.Post().Resource("pods").Namespace(f.ns).Name(f.podName).SubResource("portforward")
serverURL := req.URL()
roundTripper, upgrader, err := roundTripperFor(f.restConfig)
if err != nil {
return nil, fmt.Errorf("failure creating roundtripper: %v", err)
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, serverURL)
fw, err := portforward.NewOnAddresses(dialer,
[]string{f.localAddress},
[]string{fmt.Sprintf("%d:%d", f.localPort, f.podPort)},
f.stopCh,
readyCh,
io.Discard,
os.Stderr)
if err != nil {
return nil, fmt.Errorf("failed establishing port-forward: %v", err)
}
// Run the same check as k8s.io/kubectl/pkg/cmd/portforward/portforward.go
// so that we will fail early if there is a problem contacting API server.
podGet := restClient.Get().Resource("pods").Namespace(f.ns).Name(f.podName)
obj, err := podGet.Do(context.TODO()).Get()
if err != nil {
return nil, fmt.Errorf("failed retrieving: %v in the %q namespace", err, f.ns)
}
pod, ok := obj.(*v1.Pod)
if !ok {
return nil, fmt.Errorf("failed getting pod, object type is %T", obj)
}
if pod.Status.Phase != v1.PodRunning {
return nil, fmt.Errorf("pod is not running. Status=%v", pod.Status.Phase)
}
return fw, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"context"
"sync"
"time"
"google.golang.org/grpc/credentials"
authenticationv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/pkg/log"
)
type tokenSupplier struct {
// The token itself. (These are public in case we need to serialize)
Token string
Expires time.Time
// regenerate tokens using this
mu sync.RWMutex
tokenNamespace string
tokenServiceAccount string
audiences []string
expirationSeconds int64
kubeClient Client
// sunsetPeriod is how long before expiration that we start trying to renew (a minute)
sunsetPeriod time.Duration
}
var _ credentials.PerRPCCredentials = &tokenSupplier{}
// NewRPCCredentials creates a PerRPCCredentials capable of getting tokens from Istio and tracking their expiration
func NewRPCCredentials(kubeClient Client, tokenNamespace, tokenSA string,
tokenAudiences []string, expirationSeconds, sunsetPeriodSeconds int64,
) (credentials.PerRPCCredentials, error) {
tokenRequest, err := createServiceAccountToken(context.TODO(), kubeClient, tokenNamespace, tokenSA, tokenAudiences, expirationSeconds)
if err != nil {
return nil, err
}
return &tokenSupplier{
Token: tokenRequest.Status.Token,
Expires: tokenRequest.Status.ExpirationTimestamp.Time,
// Save in case we need to renew during a very long-lived gRPC
tokenNamespace: tokenNamespace,
tokenServiceAccount: tokenSA,
audiences: tokenAudiences,
expirationSeconds: expirationSeconds,
sunsetPeriod: time.Duration(sunsetPeriodSeconds) * time.Second,
kubeClient: kubeClient,
}, nil
}
// GetRequestMetadata fulfills the grpc/credentials.PerRPCCredentials interface
func (its *tokenSupplier) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
its.mu.RLock()
token := its.Token
needRecreate := time.Until(its.Expires) < its.sunsetPeriod
its.mu.RUnlock()
if needRecreate {
its.mu.Lock()
// This checks the same condition as above. (The outer check is to bypass the mutex when it is too early to renew)
if time.Until(its.Expires) < its.sunsetPeriod {
log.Debug("GetRequestMetadata will generate a new token to replace one that is about to expire")
// We have no 'renew' method, just request a new token
tokenRequest, err := createServiceAccountToken(ctx, its.kubeClient, its.tokenNamespace, its.tokenServiceAccount,
its.audiences, its.expirationSeconds)
if err == nil {
its.Token = tokenRequest.Status.Token
its.Expires = tokenRequest.Status.ExpirationTimestamp.Time
} else {
log.Infof("GetRequestMetadata failed to recreate token: %v", err.Error())
}
}
token = its.Token
its.mu.Unlock()
}
return map[string]string{
"authorization": "Bearer " + token,
}, nil
}
// RequireTransportSecurity fulfills the grpc/credentials.PerRPCCredentials interface
func (its *tokenSupplier) RequireTransportSecurity() bool {
return false
}
func createServiceAccountToken(ctx context.Context, client Client,
tokenNamespace, tokenServiceAccount string, audiences []string, expirationSeconds int64,
) (*authenticationv1.TokenRequest, error) {
return client.Kube().CoreV1().ServiceAccounts(tokenNamespace).CreateToken(ctx, tokenServiceAccount,
&authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: audiences,
ExpirationSeconds: &expirationSeconds,
},
}, metav1.CreateOptions{})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"crypto/tls"
"fmt"
"net/http"
spdyStream "k8s.io/apimachinery/pkg/util/httpstream/spdy"
"k8s.io/client-go/rest"
"k8s.io/client-go/transport/spdy"
)
// roundTripperFor creates a SPDY upgrader that will work over custom transports.
func roundTripperFor(restConfig *rest.Config) (http.RoundTripper, spdy.Upgrader, error) {
// Get the TLS config.
tlsConfig, err := rest.TLSConfigFor(restConfig)
if err != nil {
return nil, nil, fmt.Errorf("failed getting TLS config: %w", err)
}
if tlsConfig == nil && restConfig.Transport != nil {
// If using a custom transport, skip server verification on the upgrade.
// nolint: gosec
tlsConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
var upgrader *spdyStream.SpdyRoundTripper
if restConfig.Proxy != nil {
upgrader, err = spdyStream.NewRoundTripperWithProxy(tlsConfig, restConfig.Proxy)
if err != nil {
return nil, nil, err
}
} else {
upgrader, err = spdyStream.NewRoundTripper(tlsConfig)
if err != nil {
return nil, nil, err
}
}
wrapper, err := rest.HTTPWrappersForConfig(restConfig, upgrader)
if err != nil {
return nil, nil, fmt.Errorf("failed creating SPDY upgrade wrapper: %w", err)
}
return wrapper, upgrader, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"fmt"
"io"
"net/http"
"os"
"path/filepath"
"regexp"
"strings"
corev1 "k8s.io/api/core/v1"
io2 "github.com/AdamKorcz/bugdetectors/io"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
_ "k8s.io/client-go/plugin/pkg/client/auth" // allow out of cluster authentication
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
"istio.io/istio/pilot/pkg/config/kube/crd"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/util/sets"
istioversion "istio.io/istio/pkg/version"
)
var cronJobNameRegexp = regexp.MustCompile(`(.+)-\d{8,10}$`)
// BuildClientConfig builds a client rest config from a kubeconfig filepath and context.
// It overrides the current context with the one provided (empty to use default).
//
// This is a modified version of k8s.io/client-go/tools/clientcmd/BuildConfigFromFlags with the
// difference that it loads default configs if not running in-cluster.
func BuildClientConfig(kubeconfig, context string) (*rest.Config, error) {
c, err := BuildClientCmd(kubeconfig, context).ClientConfig()
if err != nil {
return nil, err
}
return SetRestDefaults(c), nil
}
// BuildClientCmd builds a client cmd config from a kubeconfig filepath and context.
// It overrides the current context with the one provided (empty to use default).
//
// This is a modified version of k8s.io/client-go/tools/clientcmd/BuildConfigFromFlags with the
// difference that it loads default configs if not running in-cluster.
func BuildClientCmd(kubeconfig, context string, overrides ...func(*clientcmd.ConfigOverrides)) clientcmd.ClientConfig {
if kubeconfig != "" {
info, err := os.Stat(kubeconfig)
if err != nil || info.Size() == 0 {
// If the specified kubeconfig doesn't exists / empty file / any other error
// from file stat, fall back to default
kubeconfig = ""
}
}
// Config loading rules:
// 1. kubeconfig if it not empty string
// 2. Config(s) in KUBECONFIG environment variable
// 3. In cluster config if running in-cluster
// 4. Use $HOME/.kube/config
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
loadingRules.DefaultClientConfig = &clientcmd.DefaultClientConfig
loadingRules.ExplicitPath = kubeconfig
configOverrides := &clientcmd.ConfigOverrides{
ClusterDefaults: clientcmd.ClusterDefaults,
CurrentContext: context,
}
for _, fn := range overrides {
fn(configOverrides)
}
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
}
// NewUntrustedRestConfig returns the rest.Config for the given kube config context.
// This is suitable for access to remote clusters from untrusted kubeConfig inputs.
// The kubeconfig is sanitized and unsafe auth methods are denied.
func NewUntrustedRestConfig(kubeConfig []byte, configOverrides ...func(*rest.Config)) (*rest.Config, error) {
if len(kubeConfig) == 0 {
return nil, fmt.Errorf("kubeconfig is empty")
}
rawConfig, err := clientcmd.Load(kubeConfig)
if err != nil {
return nil, fmt.Errorf("kubeconfig cannot be loaded: %v", err)
}
if err := clientcmd.Validate(*rawConfig); err != nil {
return nil, fmt.Errorf("kubeconfig is not valid: %v", err)
}
if err := sanitizeKubeConfig(*rawConfig, features.InsecureKubeConfigOptions); err != nil {
return nil, fmt.Errorf("kubeconfig is not allowed: %v", err)
}
clientConfig := clientcmd.NewDefaultClientConfig(*rawConfig, &clientcmd.ConfigOverrides{})
restConfig, err := clientConfig.ClientConfig()
if err != nil {
return nil, err
}
for _, co := range configOverrides {
co(restConfig)
}
return SetRestDefaults(restConfig), nil
}
// InClusterConfig returns the rest.Config for in cluster usage.
// Typically, DefaultRestConfig is used and this is auto detected; usage directly allows explicitly overriding to use in-cluster.
func InClusterConfig(fns ...func(*rest.Config)) (*rest.Config, error) {
config, err := rest.InClusterConfig()
if err != nil {
return nil, err
}
for _, fn := range fns {
fn(config)
}
return SetRestDefaults(config), nil
}
// DefaultRestConfig returns the rest.Config for the given kube config file and context.
func DefaultRestConfig(kubeconfig, configContext string, fns ...func(*rest.Config)) (*rest.Config, error) {
config, err := BuildClientConfig(kubeconfig, configContext)
if err != nil {
return nil, err
}
for _, fn := range fns {
fn(config)
}
return config, nil
}
// adjustCommand returns the last component of the
// OS-specific command path for use in User-Agent.
func adjustCommand(p string) string {
// Unlikely, but better than returning "".
if len(p) == 0 {
return "unknown"
}
return filepath.Base(p)
}
// IstioUserAgent returns the user agent string based on the command being used.
// example: pilot-discovery/1.9.5 or istioctl/1.10.0
// This is a specialized version of rest.DefaultKubernetesUserAgent()
func IstioUserAgent() string {
return adjustCommand(os.Args[0]) + "/" + istioversion.Info.Version
}
// SetRestDefaults is a helper function that sets default values for the given rest.Config.
// This function is idempotent.
func SetRestDefaults(config *rest.Config) *rest.Config {
if config.GroupVersion == nil || config.GroupVersion.Empty() {
config.GroupVersion = &corev1.SchemeGroupVersion
}
if len(config.APIPath) == 0 {
if len(config.GroupVersion.Group) == 0 {
config.APIPath = "/api"
} else {
config.APIPath = "/apis"
}
}
if len(config.ContentType) == 0 {
if (len(features.KubernetesClientContentType) >= 1 && string(features.KubernetesClientContentType[0]) == "j") && (len(features.KubernetesClientContentType) >= 2 && string(features.KubernetesClientContentType[1]) == "s") && (len(features.KubernetesClientContentType) >= 3 && string(features.KubernetesClientContentType[2]) == "o") && (len(features.KubernetesClientContentType) >= 4 && string(features.KubernetesClientContentType[3]) == "n") {
config.ContentType = runtime.ContentTypeJSON
} else {
// Prefer to accept protobuf, but send JSON. This is due to some types (CRDs)
// not accepting protobuf.
// If we end up writing many core types in the future we may want to set ContentType to
// ContentTypeProtobuf only for the core client.
config.AcceptContentTypes = runtime.ContentTypeProtobuf + "," + runtime.ContentTypeJSON
config.ContentType = runtime.ContentTypeJSON
}
}
if config.NegotiatedSerializer == nil {
// This codec factory ensures the resources are not converted. Therefore, resources
// will not be round-tripped through internal versions. Defaulting does not happen
// on the client.
config.NegotiatedSerializer = serializer.NewCodecFactory(IstioScheme).WithoutConversion()
}
if len(config.UserAgent) == 0 {
config.UserAgent = IstioUserAgent()
}
return config
}
// CheckPodReadyOrComplete returns nil if the given pod and all of its containers are ready or terminated
// successfully.
func CheckPodReadyOrComplete(pod *corev1.Pod) error {
switch pod.Status.Phase {
case corev1.PodSucceeded:
return nil
case corev1.PodRunning:
return CheckPodReady(pod)
default:
return fmt.Errorf("%s", pod.Status.Phase)
}
}
// CheckPodReady returns nil if the given pod and all of its containers are ready.
func CheckPodReady(pod *corev1.Pod) error {
switch pod.Status.Phase {
case corev1.PodRunning:
// Wait until all containers are ready.
for _, containerStatus := range pod.Status.InitContainerStatuses {
if !containerStatus.Ready {
return fmt.Errorf("init container not ready: '%s'", containerStatus.Name)
}
}
for _, containerStatus := range pod.Status.ContainerStatuses {
if !containerStatus.Ready {
return fmt.Errorf("container not ready: '%s'", containerStatus.Name)
}
}
if len(pod.Status.Conditions) > 0 {
for _, condition := range pod.Status.Conditions {
if condition.Type == corev1.PodReady && condition.Status != corev1.ConditionTrue {
return fmt.Errorf("pod not ready, condition message: %v", condition.Message)
}
}
}
return nil
default:
return fmt.Errorf("%s", pod.Status.Phase)
}
}
// GetDeployMetaFromPod heuristically derives deployment metadata from the pod spec.
func GetDeployMetaFromPod(pod *corev1.Pod) (metav1.ObjectMeta, metav1.TypeMeta) {
if pod == nil {
return metav1.ObjectMeta{}, metav1.TypeMeta{}
}
// try to capture more useful namespace/name info for deployments, etc.
// TODO(dougreid): expand to enable lookup of OWNERs recursively a la kubernetesenv
deployMeta := pod.ObjectMeta
deployMeta.ManagedFields = nil
deployMeta.OwnerReferences = nil
typeMetadata := metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
}
if len(pod.GenerateName) > 0 {
// if the pod name was generated (or is scheduled for generation), we can begin an investigation into the controlling reference for the pod.
var controllerRef metav1.OwnerReference
controllerFound := false
for _, ref := range pod.GetOwnerReferences() {
if ref.Controller != nil && *ref.Controller {
controllerRef = ref
controllerFound = true
break
}
}
if controllerFound {
typeMetadata.APIVersion = controllerRef.APIVersion
typeMetadata.Kind = controllerRef.Kind
// heuristic for deployment detection
deployMeta.Name = controllerRef.Name
if typeMetadata.Kind == "ReplicaSet" && pod.Labels["pod-template-hash"] != "" && strings.HasSuffix(controllerRef.Name, pod.Labels["pod-template-hash"]) {
name := strings.TrimSuffix(controllerRef.Name, "-"+pod.Labels["pod-template-hash"])
deployMeta.Name = name
typeMetadata.Kind = "Deployment"
} else if typeMetadata.Kind == "ReplicationController" && pod.Labels["deploymentconfig"] != "" {
// If the pod is controlled by the replication controller, which is created by the DeploymentConfig resource in
// Openshift platform, set the deploy name to the deployment config's name, and the kind to 'DeploymentConfig'.
//
// nolint: lll
// For DeploymentConfig details, refer to
// https://docs.openshift.com/container-platform/4.1/applications/deployments/what-deployments-are.html#deployments-and-deploymentconfigs_what-deployments-are
//
// For the reference to the pod label 'deploymentconfig', refer to
// https://github.com/openshift/library-go/blob/7a65fdb398e28782ee1650959a5e0419121e97ae/pkg/apps/appsutil/const.go#L25
deployMeta.Name = pod.Labels["deploymentconfig"]
typeMetadata.Kind = "DeploymentConfig"
delete(deployMeta.Labels, "deploymentconfig")
} else if (len(typeMetadata.Kind) >= 1 && string(typeMetadata.Kind[0]) == "J") && (len(typeMetadata.Kind) >= 2 && string(typeMetadata.Kind[1]) == "o") && (len(typeMetadata.Kind) >= 3 && string(typeMetadata.Kind[2]) == "b") {
// If job name suffixed with `-<digit-timestamp>`, where the length of digit timestamp is 8~10,
// trim the suffix and set kind to cron job.
if jn := cronJobNameRegexp.FindStringSubmatch(controllerRef.Name); len(jn) == 2 {
deployMeta.Name = jn[1]
typeMetadata.Kind = "CronJob"
// heuristically set cron job api version to v1beta1 as it cannot be derived from pod metadata.
// Cronjob is not GA yet and latest version is v1beta1: https://github.com/kubernetes/enhancements/pull/978
typeMetadata.APIVersion = "batch/v1beta1"
}
}
}
}
if deployMeta.Name == "" {
// if we haven't been able to extract a deployment name, then just give it the pod name
deployMeta.Name = pod.Name
}
return deployMeta, typeMetadata
}
// MaxRequestBodyBytes represents the max size of Kubernetes objects we read. Kubernetes allows a 2x
// buffer on the max etcd size
// (https://github.com/kubernetes/kubernetes/blob/0afa569499d480df4977568454a50790891860f5/staging/src/k8s.io/apiserver/pkg/server/config.go#L362).
// We allow an additional 2x buffer, as it is still fairly cheap (6mb)
const MaxRequestBodyBytes = int64(6 * 1024 * 1024)
// HTTPConfigReader is reads an HTTP request, imposing size restrictions aligned with Kubernetes limits
func HTTPConfigReader(req *http.Request) ([]byte, error) {
defer req.Body.Close()
lr := &io.LimitedReader{
R: req.Body,
N: MaxRequestBodyBytes + 1,
}
data, err := io2.ReadAll(lr, "/src/istio/pkg/kube/util.go:331:15 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
return nil, err
}
if lr.N <= 0 {
return nil, errors.NewRequestEntityTooLargeError(fmt.Sprintf("limit is %d", MaxRequestBodyBytes))
}
return data, nil
}
// StripUnusedFields is the transform function for shared informers,
// it removes unused fields from objects before they are stored in the cache to save memory.
func StripUnusedFields(obj any) (any, error) {
t, ok := obj.(metav1.ObjectMetaAccessor)
if !ok {
// shouldn't happen
return obj, nil
}
// ManagedFields is large and we never use it
t.GetObjectMeta().SetManagedFields(nil)
return obj, nil
}
// StripNodeUnusedFields is the transform function for shared node informers,
// it removes unused fields from objects before they are stored in the cache to save memory.
func StripNodeUnusedFields(obj any) (any, error) {
t, ok := obj.(metav1.ObjectMetaAccessor)
if !ok {
// shouldn't happen
return obj, nil
}
// ManagedFields is large and we never use it
t.GetObjectMeta().SetManagedFields(nil)
// Annotation is never used
t.GetObjectMeta().SetAnnotations(nil)
// OwnerReference is never used
t.GetObjectMeta().SetOwnerReferences(nil)
// only node labels and addressed are useful
if node := obj.(*corev1.Node); node != nil {
node.Status.Allocatable = nil
node.Status.Capacity = nil
node.Status.Images = nil
node.Status.Conditions = nil
}
return obj, nil
}
// StripPodUnusedFields is the transform function for shared pod informers,
// it removes unused fields from objects before they are stored in the cache to save memory.
func StripPodUnusedFields(obj any) (any, error) {
t, ok := obj.(metav1.ObjectMetaAccessor)
if !ok {
// shouldn't happen
return obj, nil
}
// ManagedFields is large and we never use it
t.GetObjectMeta().SetManagedFields(nil)
// only container ports can be used
if pod := obj.(*corev1.Pod); pod != nil {
containers := []corev1.Container{}
for _, c := range pod.Spec.Containers {
if len(c.Ports) > 0 {
containers = append(containers, corev1.Container{
Ports: c.Ports,
})
}
}
oldSpec := pod.Spec
newSpec := corev1.PodSpec{
Containers: containers,
ServiceAccountName: oldSpec.ServiceAccountName,
NodeName: oldSpec.NodeName,
HostNetwork: oldSpec.HostNetwork,
Hostname: oldSpec.Hostname,
Subdomain: oldSpec.Subdomain,
}
pod.Spec = newSpec
pod.Status.InitContainerStatuses = nil
pod.Status.ContainerStatuses = nil
}
return obj, nil
}
func SlowConvertKindsToRuntimeObjects(in []crd.IstioKind) ([]runtime.Object, error) {
res := make([]runtime.Object, 0, len(in))
for _, o := range in {
r, err := SlowConvertToRuntimeObject(&o)
if err != nil {
return nil, err
}
res = append(res, r)
}
return res, nil
}
// SlowConvertToRuntimeObject converts an IstioKind to a runtime.Object.
// As the name implies, it is not efficient.
func SlowConvertToRuntimeObject(in *crd.IstioKind) (runtime.Object, error) {
by, err := config.ToJSON(in)
if err != nil {
return nil, err
}
gvk := in.GetObjectKind().GroupVersionKind()
obj, _, err := IstioCodec.UniversalDeserializer().Decode(by, &gvk, nil)
if err != nil {
return nil, err
}
return obj, nil
}
// sanitizeKubeConfig sanitizes a kubeconfig file to strip out insecure settings which may leak
// confidential materials.
// See https://github.com/kubernetes/kubectl/issues/697
func sanitizeKubeConfig(config api.Config, allowlist sets.String) error {
for k, auths := range config.AuthInfos {
if ap := auths.AuthProvider; ap != nil {
// We currently are importing 5 authenticators: gcp, azure, exec, and openstack
switch ap.Name {
case "oidc":
// OIDC is safe as it doesn't read files or execute code.
// create-remote-secret specifically supports OIDC so its probably important to not break this.
default:
if !allowlist.Contains(ap.Name) {
// All the others - gcp, azure, exec, and openstack - are unsafe
return fmt.Errorf("auth provider %s is not allowed", ap.Name)
}
}
}
if auths.ClientKey != "" && !allowlist.Contains("clientKey") {
return fmt.Errorf("clientKey is not allowed")
}
if auths.ClientCertificate != "" && !allowlist.Contains("clientCertificate") {
return fmt.Errorf("clientCertificate is not allowed")
}
if auths.TokenFile != "" && !allowlist.Contains("tokenFile") {
return fmt.Errorf("tokenFile is not allowed")
}
if auths.Exec != nil && !allowlist.Contains("exec") {
return fmt.Errorf("exec is not allowed")
}
// Reconstruct the AuthInfo so if a new field is added we will not include it without review
config.AuthInfos[k] = &api.AuthInfo{
// LocationOfOrigin: Not needed
ClientCertificate: auths.ClientCertificate,
ClientCertificateData: auths.ClientCertificateData,
ClientKey: auths.ClientKey,
ClientKeyData: auths.ClientKeyData,
Token: auths.Token,
TokenFile: auths.TokenFile,
Impersonate: auths.Impersonate,
ImpersonateGroups: auths.ImpersonateGroups,
ImpersonateUserExtra: auths.ImpersonateUserExtra,
Username: auths.Username,
Password: auths.Password,
AuthProvider: auths.AuthProvider, // Included because it is sanitized above
Exec: auths.Exec,
// Extensions: Not needed,
}
// Other relevant fields that are not acted on:
// * Cluster.Server (and ProxyURL). This allows the user to send requests to arbitrary URLs, enabling potential SSRF attacks.
// However, we don't actually know what valid URLs are, so we cannot reasonably constrain this. Instead,
// we try to limit what confidential information could be exfiltrated (from AuthInfo). Additionally, the user cannot control
// the paths we send requests to, limiting potential attack scope.
// * Cluster.CertificateAuthority. While this reads from files, the result is not attached to the request and is instead
// entirely local
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kube
import (
"fmt"
"strconv"
"k8s.io/apimachinery/pkg/util/version"
kubeVersion "k8s.io/apimachinery/pkg/version"
)
// IsAtLeastVersion returns true if the client is at least the specified version.
// For example, on Kubernetes v1.15.2, IsAtLeastVersion(13) == true, IsAtLeastVersion(17) == false
func IsAtLeastVersion(client Client, minorVersion uint) bool {
clusterVersion, err := client.GetKubernetesVersion()
if err != nil {
return true
}
return IsKubeAtLeastOrLessThanVersion(clusterVersion, minorVersion, true)
}
// IsLessThanVersion returns true if the client version is less than the specified version.
// For example, on Kubernetes v1.15.2, IsLessThanVersion(13) == false, IsLessThanVersion(17) == true
func IsLessThanVersion(client Client, minorVersion uint) bool {
clusterVersion, err := client.GetKubernetesVersion()
if err != nil {
return true
}
return IsKubeAtLeastOrLessThanVersion(clusterVersion, minorVersion, false)
}
// IsKubeAtLeastOrLessThanVersion returns if the kubernetes version is at least or less than the specified version.
func IsKubeAtLeastOrLessThanVersion(clusterVersion *kubeVersion.Info, minorVersion uint, atLeast bool) bool {
if clusterVersion == nil {
return true
}
cv, err := version.ParseGeneric(fmt.Sprintf("v%s.%s.0", clusterVersion.Major, clusterVersion.Minor))
if err != nil {
return true
}
ev, err := version.ParseGeneric(fmt.Sprintf("v1.%d.0", minorVersion))
if err != nil {
return true
}
if atLeast {
return cv.AtLeast(ev)
}
return cv.LessThan(ev)
}
// GetVersionAsInt returns the the kubernetes version as an integer.
// For example, on Kubernetes v1.15.2, GetVersionAsInt returns 115
func GetVersionAsInt(client Client) int {
clusterVersion, err := client.GetKubernetesVersion()
if err != nil {
return -1
}
version, err := strconv.Atoi(clusterVersion.Major + clusterVersion.Minor)
if err != nil {
return -1
}
return version
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package configmapwatcher
import (
"go.uber.org/atomic"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
)
// Controller watches a ConfigMap and calls the given callback when the ConfigMap changes.
// The ConfigMap is passed to the callback, or nil if it doesn't exist.
type Controller struct {
configmaps kclient.Client[*v1.ConfigMap]
queue controllers.Queue
configMapNamespace string
configMapName string
callback func(*v1.ConfigMap)
hasSynced atomic.Bool
}
// NewController returns a new ConfigMap watcher controller.
func NewController(client kube.Client, namespace, name string, callback func(*v1.ConfigMap)) *Controller {
c := &Controller{
configMapNamespace: namespace,
configMapName: name,
callback: callback,
}
c.configmaps = kclient.NewFiltered[*v1.ConfigMap](client, kclient.Filter{
Namespace: namespace,
FieldSelector: fields.OneTermEqualSelector(metav1.ObjectNameField, name).String(),
})
c.queue = controllers.NewQueue("configmap "+name, controllers.WithReconciler(c.processItem))
c.configmaps.AddEventHandler(controllers.FilteredObjectSpecHandler(c.queue.AddObject, func(o controllers.Object) bool {
// Filter out other configmaps
return o.GetName() == name && o.GetNamespace() == namespace
}))
return c
}
func (c *Controller) Run(stop <-chan struct{}) {
// Start informer immediately instead of with the rest. This is because we use configmapwatcher for
// single types (so its never shared), and for use cases where we need the results immediately
// during startup.
c.configmaps.Start(stop)
if !kube.WaitForCacheSync("configmap "+c.configMapName, stop, c.configmaps.HasSynced) {
return
}
c.queue.Run(stop)
}
// HasSynced returns whether the underlying cache has synced and the callback has been called at least once.
func (c *Controller) HasSynced() bool {
return c.queue.HasSynced()
}
func (c *Controller) processItem(name types.NamespacedName) error {
cm := c.configmaps.Get(name.Name, name.Namespace)
c.callback(cm)
c.hasSynced.Store(true)
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package lazy is a package to expose lazily computed values.
// The concepts and code are heavily influenced by https://cs.opensource.google/go/go/+/go1.19:src/sync/once.go.
package lazy
import (
"sync"
"sync/atomic"
)
// Lazy represents a value whose computation is deferred until the first access
type Lazy[T any] interface {
// Get returns the value, computing it if necessary.
Get() (T, error)
}
type lazyImpl[T any] struct {
getter func() (T, error)
// retry, if true, will ensure getter() is called for each Get() until a non-nil error is returned.
retry bool
// Cached responses. Note: with retry enabled, this will be unset until a non-nil error
res T
err error
done uint32
m sync.Mutex
}
var _ Lazy[any] = &lazyImpl[any]{}
// New returns a new lazily computed value. The value is guaranteed to only be computed a single time.
func New[T any](f func() (T, error)) Lazy[T] {
return &lazyImpl[T]{getter: f}
}
// NewWithRetry returns a new lazily computed value. The value will be computed on each call until a
// non-nil error is returned.
func NewWithRetry[T any](f func() (T, error)) Lazy[T] {
return &lazyImpl[T]{getter: f, retry: true}
}
func (l *lazyImpl[T]) Get() (T, error) {
if atomic.LoadUint32(&l.done) == 0 {
// Outlined slow-path to allow inlining of the fast-path.
return l.doSlow()
}
return l.res, l.err
}
func (l *lazyImpl[T]) doSlow() (T, error) {
l.m.Lock()
defer l.m.Unlock()
if l.done == 0 {
done := uint32(1)
// Defer in case of panic
defer func() {
atomic.StoreUint32(&l.done, done)
}()
res, err := l.getter()
if l.retry && err != nil {
done = 0
} else {
l.res, l.err = res, err
}
return res, err
}
return l.res, l.err
}
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ledger implements a modified map with three unique characteristics:
// 1. every unique state of the map is given a unique hash
// 2. prior states of the map are retained for a fixed period of time
// 2. given a previous hash, we can retrieve a previous state from the map, if it is still retained.
package ledger
import (
"encoding/base64"
"time"
"github.com/spaolacci/murmur3"
)
// Ledger exposes a modified map with three unique characteristics:
// 1. every unique state of the map is given a unique hash
// 2. prior states of the map are retained for a fixed period of time
// 2. given a previous hash, we can retrieve a previous state from the map, if it is still retained.
type Ledger interface {
// Put adds or overwrites a key in the Ledger
Put(key, value string) (string, error)
// Delete removes a key from the Ledger, which may still be read using GetPreviousValue
Delete(key string) error
// Get returns a the value of the key from the Ledger's current state
Get(key string) (string, error)
// RootHash is the hash of all keys and values currently in the Ledger
RootHash() string
// GetPreviousValue executes a get against a previous version of the ledger, using that version's root hash.
GetPreviousValue(previousRootHash, key string) (result string, err error)
}
type smtLedger struct {
tree *smt
}
// Make returns a Ledger which will retain previous nodes after they are deleted.
func Make(retention time.Duration) Ledger {
return smtLedger{tree: newSMT(hasher, nil, retention)}
}
// Put adds a key value pair to the ledger, overwriting previous values and marking them for
// removal after the retention specified in Make()
func (s smtLedger) Put(key, value string) (result string, err error) {
b, err := s.tree.Update([][]byte{coerceKeyToHashLen(key)}, [][]byte{coerceToHashLen(value)})
result = string(b)
return
}
// Delete removes a key value pair from the ledger, marking it for removal after the retention specified in Make()
func (s smtLedger) Delete(key string) (err error) {
_, err = s.tree.Update([][]byte{[]byte(key)}, [][]byte{defaultLeaf})
return
}
// GetPreviousValue returns the value of key when the ledger's RootHash was previousHash, if it is still retained.
func (s smtLedger) GetPreviousValue(previousRootHash, key string) (result string, err error) {
prevBytes, err := base64.StdEncoding.DecodeString(previousRootHash)
if err != nil {
return "", err
}
b, err := s.tree.GetPreviousValue(prevBytes, coerceKeyToHashLen(key))
var i int
// trim leading 0's from b
for i = range b {
if b[i] != 0 {
break
}
}
result = string(b[i:])
return
}
// Get returns the current value of key.
func (s smtLedger) Get(key string) (result string, err error) {
return s.GetPreviousValue(s.RootHash(), key)
}
// RootHash represents the hash of the current state of the ledger.
func (s smtLedger) RootHash() string {
return base64.StdEncoding.EncodeToString(s.tree.Root())
}
func coerceKeyToHashLen(val string) []byte {
hasher := murmur3.New64()
_, _ = hasher.Write([]byte(val))
return hasher.Sum(nil)
}
func coerceToHashLen(val string) []byte {
// hash length is fixed at 64 bits until generic support is added
const hashLen = 64
byteVal := []byte(val)
if len(byteVal) < hashLen/8 {
// zero fill the left side of the slice
zerofill := make([]byte, hashLen/8)
byteVal = append(zerofill[:hashLen/8-len(byteVal)], byteVal...)
}
return byteVal[:hashLen/8]
}
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ledger
import (
"bytes"
"fmt"
"sync"
"time"
"istio.io/istio/pkg/cache"
)
// The smt is derived from https://github.com/aergoio/SMT with modifications
// to remove unneeded features, and to support retention of old nodes for a fixed time.
// The aergoio smt license is as follows:
/*
MIT License
Copyright (c) 2018 aergo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
© 2019 GitHub, Inc.
*/
// TODO when using the smt, make sure keys and values are same length as hash
// smt is a sparse Merkle tree.
type smt struct {
rootMu sync.RWMutex
// root is the current root of the smt.
root []byte
// defaultHashes are the default values of empty trees
defaultHashes [][]byte
// db holds the cache and related locks
db *cacheDB
// hash is the hash function used in the trie
hash func(data ...[]byte) []byte
// trieHeight is the number if bits in a key
trieHeight int
// the minimum length of time old nodes will be retained.
retentionDuration time.Duration
// lock is for the whole struct
lock sync.RWMutex
// atomicUpdate, commit all the changes made by intermediate update calls
atomicUpdate bool
}
// this is the closest time.Duration comes to Forever, with a duration of ~145 years
// we can'tree use int64 max because the duration gets added to Now(), and the ints
// rollover, causing an immediate expiration (ironic, eh?)
const forever time.Duration = 1<<(63-1) - 1
// newSMT creates a new smt given a keySize, hash function, cache (nil will be defaulted to TTLCache), and retention
// duration for old nodes.
func newSMT(hash func(data ...[]byte) []byte, updateCache cache.ExpiringCache, retentionDuration time.Duration) *smt {
if updateCache == nil {
updateCache = cache.NewTTL(forever, time.Second)
}
s := &smt{
hash: hash,
trieHeight: len(hash([]byte("height"))) * 8, // hash any string to get output length
retentionDuration: retentionDuration,
}
s.db = &cacheDB{
updatedNodes: byteCache{cache: updateCache},
}
s.loadDefaultHashes()
return s
}
func (s *smt) Root() []byte {
s.rootMu.RLock()
defer s.rootMu.RUnlock()
return s.root
}
// loadDefaultHashes creates the default hashes
func (s *smt) loadDefaultHashes() {
s.defaultHashes = make([][]byte, s.trieHeight+1)
s.defaultHashes[0] = defaultLeaf
var h []byte
for i := 1; i <= s.trieHeight; i++ {
h = s.hash(s.defaultHashes[i-1], s.defaultHashes[i-1])
s.defaultHashes[i] = h
}
}
// Update adds a sorted list of keys and their values to the trie
// If Update is called multiple times, only the state after the last update
// is committed.
// When calling Update multiple times without commit, make sure the
// values of different keys are unique(hash contains the key for example)
// otherwise some subtree may get overwritten with the wrong hash.
func (s *smt) Update(keys, values [][]byte) ([]byte, error) {
s.lock.Lock()
defer s.lock.Unlock()
s.atomicUpdate = true
ch := make(chan result, 1)
s.update(s.Root(), keys, values, nil, 0, s.trieHeight, false, true, ch)
result := <-ch
if result.err != nil {
return nil, result.err
}
s.rootMu.Lock()
defer s.rootMu.Unlock()
if len(result.update) != 0 {
s.root = result.update[:hashLength]
} else {
s.root = nil
}
return s.root, nil
}
// result is used to contain the result of goroutines and is sent through a channel.
type result struct {
update []byte
err error
}
// update adds a sorted list of keys and their values to the trie.
// It returns the root of the updated tree.
func (s *smt) update(root []byte, keys, values, batch [][]byte, iBatch, height int, shortcut, store bool, ch chan<- result) {
if height == 0 {
if bytes.Equal(values[0], defaultLeaf) {
ch <- result{nil, nil}
} else {
ch <- result{values[0], nil}
}
return
}
batch, iBatch, lnode, rnode, isShortcut, err := s.loadChildren(root, height, iBatch, batch)
if err != nil {
ch <- result{nil, err}
return
}
if isShortcut {
keys, values = s.maybeAddShortcutToKV(keys, values, lnode[:hashLength], rnode[:hashLength])
// The shortcut node was added to keys and values so consider this subtree default.
lnode, rnode = nil, nil
// update in the batch (set key, value to default to the next loadChildren is correct)
batch[2*iBatch+1] = nil
batch[2*iBatch+2] = nil
}
// Split the keys array so each branch can be updated in parallel
// Does this require that keys are sorted? Yes, see Update()
lkeys, rkeys := s.splitKeys(keys, s.trieHeight-height)
splitIndex := len(lkeys)
lvalues, rvalues := values[:splitIndex], values[splitIndex:]
if shortcut {
store = false // stop storing only after the shortcut node.
shortcut = false // remove shortcut node flag
}
if len(lnode) == 0 && len(rnode) == 0 && len(keys) == 1 && store {
if !bytes.Equal(values[0], defaultLeaf) {
shortcut = true
} else {
// if the subtree contains only one key, store the key/value in a shortcut node
store = false
}
}
switch {
case len(lkeys) == 0 && len(rkeys) > 0:
s.updateRight(lnode, rnode, root, keys, values, batch, iBatch, height, shortcut, store, ch)
case len(lkeys) > 0 && len(rkeys) == 0:
s.updateLeft(lnode, rnode, root, keys, values, batch, iBatch, height, shortcut, store, ch)
default:
s.updateParallel(lnode, rnode, root, keys, values, batch, lkeys, rkeys, lvalues, rvalues, iBatch, height,
shortcut, store, ch)
}
}
// updateParallel updates both sides of the trie simultaneously
func (s *smt) updateParallel(lnode, rnode, root []byte, keys, values, batch, lkeys, rkeys, lvalues, rvalues [][]byte,
iBatch, height int, shortcut, store bool, ch chan<- result,
) {
// keys are separated between the left and right branches
// update the branches in parallel
lch := make(chan result, 1)
rch := make(chan result, 1)
go s.update(lnode, lkeys, lvalues, batch, 2*iBatch+1, height-1, shortcut, store, lch)
go s.update(rnode, rkeys, rvalues, batch, 2*iBatch+2, height-1, shortcut, store, rch)
lresult := <-lch
rresult := <-rch
if lresult.err != nil {
ch <- result{nil, lresult.err}
return
}
if rresult.err != nil {
ch <- result{nil, rresult.err}
return
}
ch <- result{s.interiorHash(lresult.update, rresult.update, height, iBatch, root, shortcut, store, keys,
values, batch), nil}
}
// updateRight updates the right side of the tree
func (s *smt) updateRight(lnode, rnode, root []byte, keys, values, batch [][]byte, iBatch, height int, shortcut,
store bool, ch chan<- result,
) {
// all the keys go in the right subtree
newch := make(chan result, 1)
s.update(rnode, keys, values, batch, 2*iBatch+2, height-1, shortcut, store, newch)
res := <-newch
if res.err != nil {
ch <- result{nil, res.err}
return
}
ch <- result{s.interiorHash(lnode, res.update, height, iBatch, root, shortcut, store, keys, values,
batch), nil}
}
// updateLeft updates the left side of the tree
func (s *smt) updateLeft(lnode, rnode, root []byte, keys, values, batch [][]byte, iBatch, height int, shortcut,
store bool, ch chan<- result,
) {
// all the keys go in the left subtree
newch := make(chan result, 1)
s.update(lnode, keys, values, batch, 2*iBatch+1, height-1, shortcut, store, newch)
res := <-newch
if res.err != nil {
ch <- result{nil, res.err}
return
}
ch <- result{s.interiorHash(res.update, rnode, height, iBatch, root, shortcut, store, keys, values,
batch), nil}
}
// splitKeys divides the array of keys into 2 so they can update left and right branches in parallel
func (s *smt) splitKeys(keys [][]byte, height int) ([][]byte, [][]byte) {
for i, key := range keys {
if bitIsSet(key, height) {
return keys[:i], keys[i:]
}
}
return keys, nil
}
// maybeAddShortcutToKV adds a shortcut key to the keys array to be updated.
// this is used when a subtree containing a shortcut node is being updated
func (s *smt) maybeAddShortcutToKV(keys, values [][]byte, shortcutKey, shortcutVal []byte) ([][]byte, [][]byte) {
newKeys := make([][]byte, 0, len(keys)+1)
newVals := make([][]byte, 0, len(keys)+1)
if bytes.Compare(shortcutKey, keys[0]) < 0 {
newKeys = append(newKeys, shortcutKey)
newKeys = append(newKeys, keys...)
newVals = append(newVals, shortcutVal)
newVals = append(newVals, values...)
} else if bytes.Compare(shortcutKey, keys[len(keys)-1]) > 0 {
newKeys = append(newKeys, keys...)
newKeys = append(newKeys, shortcutKey)
newVals = append(newVals, values...)
newVals = append(newVals, shortcutVal)
} else {
higher := false
for i, key := range keys {
if bytes.Equal(shortcutKey, key) {
// the shortcut keys is being updated
return keys, values
}
if !higher && bytes.Compare(shortcutKey, key) > 0 {
higher = true
continue
}
if higher && bytes.Compare(shortcutKey, key) < 0 {
// insert shortcut in slices
newKeys = append(newKeys, keys[:i]...)
newKeys = append(newKeys, shortcutKey)
newKeys = append(newKeys, keys[i:]...)
newVals = append(newVals, values[:i]...)
newVals = append(newVals, shortcutVal)
newVals = append(newVals, values[i:]...)
break
}
}
}
return newKeys, newVals
}
const batchLen int = 31
// loadChildren looks for the children of a node.
// if the node is not stored in cache, it will be loaded from db.
func (s *smt) loadChildren(root []byte, height, iBatch int, batch [][]byte) ([][]byte, int, []byte, []byte, bool,
error,
) {
isShortcut := false
if height%4 == 0 {
if len(root) == 0 {
// create a new default batch
batch = make([][]byte, batchLen)
batch[0] = []byte{0}
} else {
var err error
batch, err = s.loadBatch(root[:hashLength])
if err != nil {
return nil, 0, nil, nil, false, err
}
}
iBatch = 0
if batch[0][0] == 1 {
isShortcut = true
}
} else if len(batch[iBatch]) != 0 && batch[iBatch][hashLength] == 1 {
isShortcut = true
}
return batch, iBatch, batch[2*iBatch+1], batch[2*iBatch+2], isShortcut, nil
}
// loadBatch fetches a batch of nodes in cache or db
func (s *smt) loadBatch(root []byte) ([][]byte, error) {
var node hash
copy(node[:], root)
// checking updated nodes is useful if get() or update() is called twice in a row without db commit
s.db.updatedMux.RLock()
val, exists := s.db.updatedNodes.Get(node)
s.db.updatedMux.RUnlock()
if exists {
if s.atomicUpdate {
// Return a copy so that Commit() doesn't have to be called at
// each block and still commit every state transition.
newVal := make([][]byte, batchLen)
copy(newVal, val)
return newVal, nil
}
return val, nil
}
return nil, fmt.Errorf("the trie node %x is unavailable in the disk db, db may be corrupted", root)
}
// interiorHash hashes 2 children to get the parent hash and stores it in the updatedNodes and maybe in liveCache.
// the key is the hash and the value is the appended child nodes or the appended key/value in case of a shortcut.
// keys of go mappings cannot be byte slices so the hash is copied to a byte array
func (s *smt) interiorHash(left, right []byte, height, iBatch int, oldRoot []byte, shortcut, store bool, keys, values,
batch [][]byte,
) []byte {
var h []byte
if len(left) == 0 && len(right) == 0 {
// if a key was deleted, the node becomes default
batch[2*iBatch+1] = left
batch[2*iBatch+2] = right
s.deleteOldNode(oldRoot)
return nil
} else if len(left) == 0 {
h = s.hash(s.defaultHashes[height-1], right[:hashLength])
} else if len(right) == 0 {
h = s.hash(left[:hashLength], s.defaultHashes[height-1])
} else {
h = s.hash(left[:hashLength], right[:hashLength])
}
if !store {
// a shortcut node cannot move up
return append(h, 0)
}
if !shortcut {
h = append(h, 0)
} else {
// store the value at the shortcut node instead of height 0.
h = append(h, 1)
left = append(keys[0], 2)
right = append(values[0], 2)
}
batch[2*iBatch+2] = right
batch[2*iBatch+1] = left
// maybe store batch node
if (height)%4 == 0 {
if shortcut {
batch[0] = []byte{1}
} else {
batch[0] = []byte{0}
}
s.storeNode(batch, h, oldRoot)
}
return h
}
// storeNode stores a batch and deletes the old node from cache
func (s *smt) storeNode(batch [][]byte, h, oldRoot []byte) {
if !bytes.Equal(h, oldRoot) {
var node hash
copy(node[:], h)
// record new node
s.db.updatedMux.Lock()
s.db.updatedNodes.Set(node, batch)
s.db.updatedMux.Unlock()
s.deleteOldNode(oldRoot)
}
}
// deleteOldNode deletes an old node that has been updated
func (s *smt) deleteOldNode(root []byte) {
var node hash
copy(node[:], root)
if !s.atomicUpdate {
// dont delete old nodes with atomic updated except when
// moving up a shortcut, we dont record every single move
s.db.updatedMux.Lock()
// mark for expiration?
if val, ok := s.db.updatedNodes.Get(node); ok {
s.db.updatedNodes.SetWithExpiration(node, val, s.retentionDuration)
}
s.db.updatedMux.Unlock()
}
}
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ledger
import (
"bytes"
)
// Get fetches the value of a key by going down the current trie root.
func (s *smt) Get(key []byte) ([]byte, error) {
return s.GetPreviousValue(s.Root(), key)
}
// GetPreviousValue returns the value as of the specified root hash.
func (s *smt) GetPreviousValue(prevRoot []byte, key []byte) ([]byte, error) {
s.lock.RLock()
defer s.lock.RUnlock()
s.atomicUpdate = false
return s.get(prevRoot, key, nil, 0, s.trieHeight)
}
// get fetches the value of a key given a trie root
func (s *smt) get(root []byte, key []byte, batch [][]byte, iBatch, height int) ([]byte, error) {
if len(root) == 0 {
return nil, nil
}
if height == 0 {
return root[:hashLength], nil
}
// Fetch the children of the node
batch, iBatch, lnode, rnode, isShortcut, err := s.loadChildren(root, height, iBatch, batch)
if err != nil {
return nil, err
}
if isShortcut {
if bytes.Equal(lnode[:hashLength], key) {
return rnode[:hashLength], nil
}
return nil, nil
}
if bitIsSet(key, s.trieHeight-height) {
// visit right node
return s.get(rnode, key, batch, 2*iBatch+2, height-1)
}
// visit left node
return s.get(lnode, key, batch, 2*iBatch+1, height-1)
}
// DefaultHash is a getter for the defaultHashes array
func (s *smt) DefaultHash(height int) []byte {
return s.defaultHashes[height]
}
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ledger
import (
"sync"
"time"
"istio.io/istio/pkg/cache"
)
type cacheDB struct {
// updatedNodes that have will be flushed to disk
updatedNodes byteCache
// updatedMux is a lock for updatedNodes
updatedMux sync.RWMutex
}
// byteCache implements a modified ExpiringCache interface, returning byte arrays
// for ease of integration with smt calls.
type byteCache struct {
cache cache.ExpiringCache
}
// Set inserts an entry in the cache. This will replace any entry with
// the same key that is already in the cache. The entry may be automatically
// expunged from the cache at some point, depending on the eviction policies
// of the cache and the options specified when the cache was created.
func (b *byteCache) Set(key hash, value [][]byte) {
b.cache.Set(key, value)
}
// Get retrieves the value associated with the supplied key if the key
// is present in the cache.
func (b *byteCache) Get(key hash) (value [][]byte, ok bool) {
ivalue, ok := b.cache.Get(key)
if ok {
value, _ = ivalue.([][]byte)
}
return
}
// SetWithExpiration inserts an entry in the cache with a requested expiration time.
// This will replace any entry with the same key that is already in the cache.
// The entry will be automatically expunged from the cache at or slightly after the
// requested expiration time.
func (b *byteCache) SetWithExpiration(key hash, value [][]byte, expiration time.Duration) {
b.cache.SetWithExpiration(key, value, expiration)
}
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ledger
import (
"bytes"
"github.com/spaolacci/murmur3"
)
// defaultLeaf is the Trie default value : hash of 0x0
var defaultLeaf = hasher([]byte{0x0})
const (
hashLength = 8
)
type hash [hashLength]byte
func bitIsSet(bits []byte, i int) bool {
return bits[i/8]&(1<<uint(7-i%8)) != 0
}
func hasher(data ...[]byte) []byte {
hasher := murmur3.New64()
for i := 0; i < len(data); i++ {
_, _ = hasher.Write(data[i])
}
result := hasher.Sum(nil)
return result
}
// for sorting
type dataArray [][]byte
func (d dataArray) Len() int {
return len(d)
}
func (d dataArray) Swap(i, j int) {
d[i], d[j] = d[j], d[i]
}
func (d dataArray) Less(i, j int) bool {
return bytes.Compare(d[i], d[j]) == -1
}
// Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package log provides the canonical logging functionality used by Go-based
// Istio components.
//
// Istio's logging subsystem is built on top of the [Zap](https://godoc.org/go.uber.org/zap) package.
// High performance scenarios should use the Error, Warn, Info, and Debug methods. Lower perf
// scenarios can use the more expensive convenience methods such as Debugf and Warnw.
//
// The package provides direct integration with the Cobra command-line processor which makes it
// easy to build programs that use a consistent interface for logging. Here's an example
// of a simple Cobra-based program using this log package:
//
// func main() {
// // get the default logging options
// options := log.DefaultOptions()
//
// rootCmd := &cobra.Command{
// Run: func(cmd *cobra.Command, args []string) {
//
// // configure the logging system
// if err := log.Configure(options); err != nil {
// // print an error and quit
// }
//
// // output some logs
// log.Info("Hello")
// log.Sync()
// },
// }
//
// // add logging-specific flags to the cobra command
// options.AttachCobraFlags(rootCmd)
// rootCmd.SetArgs(os.Args[1:])
// rootCmd.Execute()
// }
//
// Once configured, this package intercepts the output of the standard golang "log" package as well as anything
// sent to the global zap logger (zap.L()).
package log
import (
"os"
"strings"
"sync/atomic"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"go.uber.org/zap/zapgrpc"
"google.golang.org/grpc/grpclog"
"gopkg.in/natefinch/lumberjack.v2"
"k8s.io/klog/v2"
)
const (
// none is used to disable logging output as well as to disable stack tracing.
none zapcore.Level = 100
GrpcScopeName string = "grpc"
)
var levelToZap = map[Level]zapcore.Level{
DebugLevel: zapcore.DebugLevel,
InfoLevel: zapcore.InfoLevel,
WarnLevel: zapcore.WarnLevel,
ErrorLevel: zapcore.ErrorLevel,
FatalLevel: zapcore.FatalLevel,
NoneLevel: none,
}
var defaultEncoderConfig = zapcore.EncoderConfig{
TimeKey: "time",
LevelKey: "level",
NameKey: "scope",
CallerKey: "caller",
MessageKey: "msg",
StacktraceKey: "stack",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.LowercaseLevelEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
EncodeDuration: zapcore.StringDurationEncoder,
EncodeTime: formatDate,
}
// functions that can be replaced in a test setting
type patchTable struct {
write func(ent zapcore.Entry, fields []zapcore.Field) error
sync func() error
exitProcess func(code int)
errorSink zapcore.WriteSyncer
close func() error
}
var (
// function table that can be replaced by tests
funcs = &atomic.Value{}
// controls whether all output is JSON or CLI style. This makes it easier to query how the zap encoder is configured
// vs. reading it's internal state.
useJSON atomic.Value
logGrpc bool
)
func init() {
// use our defaults for starters so that logging works even before everything is fully configured
_ = Configure(DefaultOptions())
}
// See: https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#LogSeverity
var stackdriverSeverityMapping = map[zapcore.Level]string{
zapcore.DebugLevel: "Debug",
zapcore.InfoLevel: "Info",
zapcore.WarnLevel: "Warning",
zapcore.ErrorLevel: "Error",
zapcore.DPanicLevel: "Critical",
zapcore.FatalLevel: "Critical",
zapcore.PanicLevel: "Critical",
}
func encodeStackdriverLevel(l zapcore.Level, enc zapcore.PrimitiveArrayEncoder) {
enc.AppendString(stackdriverSeverityMapping[l])
}
// prepZap is a utility function used by the Configure function.
func prepZap(options *Options) (zapcore.Core, zapcore.Core, zapcore.WriteSyncer, error) {
var enc zapcore.Encoder
if options.useStackdriverFormat {
// See also: https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
encCfg := zapcore.EncoderConfig{
TimeKey: "timestamp",
LevelKey: "severity",
NameKey: "logger",
CallerKey: "caller",
MessageKey: "message",
StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: encodeStackdriverLevel,
EncodeTime: zapcore.RFC3339NanoTimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
}
enc = zapcore.NewJSONEncoder(encCfg)
useJSON.Store(true)
} else {
encCfg := defaultEncoderConfig
if options.JSONEncoding {
enc = zapcore.NewJSONEncoder(encCfg)
useJSON.Store(true)
} else {
enc = zapcore.NewConsoleEncoder(encCfg)
useJSON.Store(false)
}
}
var rotaterSink zapcore.WriteSyncer
if options.RotateOutputPath != "" {
rotaterSink = zapcore.AddSync(&lumberjack.Logger{
Filename: options.RotateOutputPath,
MaxSize: options.RotationMaxSize,
MaxBackups: options.RotationMaxBackups,
MaxAge: options.RotationMaxAge,
})
}
errSink, closeErrorSink, err := zap.Open(options.ErrorOutputPaths...)
if err != nil {
return nil, nil, nil, err
}
var outputSink zapcore.WriteSyncer
if len(options.OutputPaths) > 0 {
outputSink, _, err = zap.Open(options.OutputPaths...)
if err != nil {
closeErrorSink()
return nil, nil, nil, err
}
}
var sink zapcore.WriteSyncer
if rotaterSink != nil && outputSink != nil {
sink = zapcore.NewMultiWriteSyncer(outputSink, rotaterSink)
} else if rotaterSink != nil {
sink = rotaterSink
} else {
sink = outputSink
}
var enabler zap.LevelEnablerFunc = func(lvl zapcore.Level) bool {
switch lvl {
case zapcore.ErrorLevel:
return defaultScope.ErrorEnabled()
case zapcore.WarnLevel:
return defaultScope.WarnEnabled()
case zapcore.InfoLevel:
return defaultScope.InfoEnabled()
}
return defaultScope.DebugEnabled()
}
return zapcore.NewCore(enc, sink, zap.NewAtomicLevelAt(zapcore.DebugLevel)),
zapcore.NewCore(enc, sink, enabler),
errSink, nil
}
func formatDate(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
t = t.UTC()
year, month, day := t.Date()
hour, minute, second := t.Clock()
micros := t.Nanosecond() / 1000
buf := make([]byte, 27)
buf[0] = byte((year/1000)%10) + '0'
buf[1] = byte((year/100)%10) + '0'
buf[2] = byte((year/10)%10) + '0'
buf[3] = byte(year%10) + '0'
buf[4] = '-'
buf[5] = byte((month)/10) + '0'
buf[6] = byte((month)%10) + '0'
buf[7] = '-'
buf[8] = byte((day)/10) + '0'
buf[9] = byte((day)%10) + '0'
buf[10] = 'T'
buf[11] = byte((hour)/10) + '0'
buf[12] = byte((hour)%10) + '0'
buf[13] = ':'
buf[14] = byte((minute)/10) + '0'
buf[15] = byte((minute)%10) + '0'
buf[16] = ':'
buf[17] = byte((second)/10) + '0'
buf[18] = byte((second)%10) + '0'
buf[19] = '.'
buf[20] = byte((micros/100000)%10) + '0'
buf[21] = byte((micros/10000)%10) + '0'
buf[22] = byte((micros/1000)%10) + '0'
buf[23] = byte((micros/100)%10) + '0'
buf[24] = byte((micros/10)%10) + '0'
buf[25] = byte((micros)%10) + '0'
buf[26] = 'Z'
enc.AppendString(string(buf))
}
func updateScopes(options *Options) error {
// snapshot what's there
allScopes := Scopes()
// update the output levels of all listed scopes
if err := processLevels(allScopes, options.outputLevels, func(s *Scope, l Level) { s.SetOutputLevel(l) }); err != nil {
return err
}
// update the stack tracing levels of all listed scopes
if err := processLevels(allScopes, options.stackTraceLevels, func(s *Scope, l Level) { s.SetStackTraceLevel(l) }); err != nil {
return err
}
// update the caller location setting of all listed scopes
sc := strings.Split(options.logCallers, ",")
for _, s := range sc {
if s == "" {
continue
}
if s == OverrideScopeName {
// ignore everything else and just apply the override value
for _, scope := range allScopes {
scope.SetLogCallers(true)
}
return nil
}
if scope, ok := allScopes[s]; ok {
scope.SetLogCallers(true)
}
}
// update LogGrpc if necessary
if logGrpc {
options.LogGrpc = true
}
return nil
}
// processLevels breaks down an argument string into a set of scope & levels and then
// tries to apply the result to the scopes. It supports the use of a global override.
func processLevels(allScopes map[string]*Scope, arg string, setter func(*Scope, Level)) error {
levels := strings.Split(arg, ",")
for _, sl := range levels {
s, l, err := convertScopedLevel(sl)
if err != nil {
return err
}
if scope, ok := allScopes[s]; ok {
setter(scope, l)
} else if s == OverrideScopeName {
// override replaces everything
for _, scope := range allScopes {
setter(scope, l)
}
return nil
} else if s == GrpcScopeName {
grpcScope := registerScope(GrpcScopeName, "", 3)
logGrpc = true
setter(grpcScope, l)
return nil
}
}
return nil
}
// Configure initializes Istio's logging subsystem.
//
// You typically call this once at process startup.
// Once this call returns, the logging system is ready to accept data.
// nolint: staticcheck
func Configure(options *Options) error {
core, captureCore, errSink, err := prepZap(options)
if err != nil {
return err
}
if err := updateScopes(options); err != nil {
return err
}
closeFns := make([]func() error, 0)
for _, ext := range options.extensions {
var closeFn, captureCloseFn func() error
var err error
core, closeFn, err = ext(core)
if err != nil {
return err
}
captureCore, captureCloseFn, err = ext(captureCore)
if err != nil {
return err
}
closeFns = append(closeFns, closeFn, captureCloseFn)
}
pt := patchTable{
write: func(ent zapcore.Entry, fields []zapcore.Field) error {
err := core.Write(ent, fields)
if ent.Level == zapcore.FatalLevel {
funcs.Load().(patchTable).exitProcess(1)
}
return err
},
sync: core.Sync,
exitProcess: os.Exit,
errorSink: errSink,
close: func() error {
// best-effort to sync
core.Sync() // nolint: errcheck
for _, f := range closeFns {
if err := f(); err != nil {
return err
}
}
return nil
},
}
funcs.Store(pt)
opts := []zap.Option{
zap.ErrorOutput(errSink),
zap.AddCallerSkip(1),
}
if defaultScope.GetLogCallers() {
opts = append(opts, zap.AddCaller())
}
l := defaultScope.GetStackTraceLevel()
if l != NoneLevel {
opts = append(opts, zap.AddStacktrace(levelToZap[l]))
}
captureLogger := zap.New(captureCore, opts...)
// capture global zap logging and force it through our logger
_ = zap.ReplaceGlobals(captureLogger)
// capture standard golang "log" package output and force it through our logger
_ = zap.RedirectStdLog(captureLogger)
// capture gRPC logging
if options.LogGrpc {
grpclog.SetLogger(zapgrpc.NewLogger(captureLogger.WithOptions(zap.AddCallerSkip(3))))
}
// capture klog (Kubernetes logging) through our logging
configureKlog.Do(func() {
klog.SetLogger(NewLogrAdapter(KlogScope))
})
// --vklog is non zero then KlogScope should be increased.
// klog is a special case.
if klogVerbose() {
KlogScope.SetOutputLevel(DebugLevel)
}
return nil
}
// Sync flushes any buffered log entries.
// Processes should normally take care to call Sync before exiting.
func Sync() error {
return funcs.Load().(patchTable).sync()
}
// Close implements io.Closer.
func Close() error {
return funcs.Load().(patchTable).close()
}
// Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
// These functions enable logging using a global Scope. See scope.go for usage information.
func registerDefaultScope() *Scope {
return registerScope(DefaultScopeName, "Unscoped logging messages.", 1)
}
var defaultScope = registerDefaultScope()
// Fatal outputs a message at fatal level.
func Fatal(fields any) {
defaultScope.Fatal(fields)
}
// Fatalf uses fmt.Sprintf to construct and log a message at fatal level.
func Fatalf(format string, args ...any) {
defaultScope.Fatalf(format, args...)
}
// FatalEnabled returns whether output of messages using this scope is currently enabled for fatal-level output.
func FatalEnabled() bool {
return defaultScope.FatalEnabled()
}
// Error outputs a message at error level.
func Error(fields any) {
defaultScope.Error(fields)
}
// Errorf uses fmt.Sprintf to construct and log a message at error level.
func Errorf(format string, args ...any) {
defaultScope.Errorf(format, args...)
}
// ErrorEnabled returns whether output of messages using this scope is currently enabled for error-level output.
func ErrorEnabled() bool {
return defaultScope.ErrorEnabled()
}
// Warn outputs a message at warn level.
func Warn(fields any) {
defaultScope.Warn(fields)
}
// Warnf uses fmt.Sprintf to construct and log a message at warn level.
func Warnf(format string, args ...any) {
defaultScope.Warnf(format, args...)
}
// WarnEnabled returns whether output of messages using this scope is currently enabled for warn-level output.
func WarnEnabled() bool {
return defaultScope.WarnEnabled()
}
// Info outputs a message at info level.
func Info(fields any) {
defaultScope.Info(fields)
}
// Infof uses fmt.Sprintf to construct and log a message at info level.
func Infof(format string, args ...any) {
defaultScope.Infof(format, args...)
}
// InfoEnabled returns whether output of messages using this scope is currently enabled for info-level output.
func InfoEnabled() bool {
return defaultScope.InfoEnabled()
}
// Debug outputs a message at debug level.
func Debug(fields any) {
defaultScope.Debug(fields)
}
// Debugf uses fmt.Sprintf to construct and log a message at debug level.
func Debugf(format string, args ...any) {
defaultScope.Debugf(format, args...)
}
// DebugEnabled returns whether output of messages using this scope is currently enabled for debug-level output.
func DebugEnabled() bool {
return defaultScope.DebugEnabled()
}
// WithLabels adds a key-value pairs to the labels in s. The key must be a string, while the value may be any type.
// It returns a copy of the default scope, with the labels added.
func WithLabels(kvlist ...any) *Scope {
return defaultScope.WithLabels(kvlist...)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
goflag "flag"
"fmt"
"sync"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
)
var (
KlogScope = RegisterScope("klog", "")
configureKlog = sync.Once{}
)
// EnableKlogWithCobra enables klog to work with cobra / pflags.
// k8s libraries like client-go use klog.
func EnableKlogWithCobra() {
gf := klogVerboseFlag()
pflag.CommandLine.AddFlag(pflag.PFlagFromGoFlag(
&goflag.Flag{
Name: "vklog",
Value: gf.Value,
DefValue: gf.DefValue,
Usage: gf.Usage + ". Like -v flag. ex: --vklog=9",
}))
}
// EnableKlogWithCobra enables klog to work with go flags.
// k8s libraries like client-go use klog.
func EnableKlogWithGoFlag() {
gf := klogVerboseFlag()
goflag.CommandLine.Var(gf.Value, "vklog", gf.Usage+". Like -v flag. ex: --vklog=9")
}
// isKlogVerbose returns true if klog verbosity is non-zero.
func klogVerbose() bool {
gf := klogVerboseFlag()
return gf.Value.String() != "0"
}
var (
klogFlagSet = &goflag.FlagSet{}
klogFlagSetOnce = sync.Once{}
)
// KlogVerboseFlag returns verbose flag from the klog library.
// After parsing it contains the parsed verbosity value.
func klogVerboseFlag() *goflag.Flag {
klogFlagSetOnce.Do(func() {
klog.InitFlags(klogFlagSet)
})
// --v= flag of klog.
return klogFlagSet.Lookup("v")
}
// EnableKlogWithVerbosity sets the klog verbosity directly.
// When using in an application, EnableKlogWithCobra is preferred to expose a --vklog flag.
func EnableKlogWithVerbosity(v int) {
_ = klogFlagSet.Set("v", fmt.Sprint(v))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"fmt"
"github.com/go-logr/logr"
)
// zapLogger is a logr.Logger that uses Zap to log. This is needed to get
// libraries, namely Kubernetes/klog, that use logr, to use our standard logging.
// This enables standard formatting, scope filtering, and options. The logr
// interface does not have a concept of Debug/Info/Warn/Error as we do. Instead,
// logging is based on Verbosity levels, where 0 is the most important. We treat
// levels 0-3 as info level and 4+ as debug; there are no warnings. This
// threshold is fairly arbitrary based on inspection of Kubernetes usage and
// https://kubernetes.io/docs/reference/kubectl/cheatsheet/#kubectl-output-verbosity-and-debugging.
// Errors are passed through as errors.
// Zap does come with its own logr implementation, but we have chosen to re-implement to allow usage of
// our Scope - in particular, this allows changing the logging level of kubernetes logs by users.
type zapLogger struct {
l *Scope
}
const debugLevelThreshold = 3
func (zl *zapLogger) Enabled(level int) bool {
if level > debugLevelThreshold {
return zl.l.DebugEnabled()
}
return zl.l.InfoEnabled()
}
// Logs will come in with newlines, but our logger auto appends newline
func trimNewline(msg string) string {
if len(msg) == 0 {
return msg
}
lc := len(msg) - 1
if msg[lc] == '\n' {
return msg[:lc]
}
return msg
}
func (zl *zapLogger) Init(logr.RuntimeInfo) {
}
func (zl *zapLogger) Info(level int, msg string, keysAndVals ...any) {
if level > debugLevelThreshold {
zl.l.WithLabels(keysAndVals...).Debug(trimNewline(msg))
} else {
zl.l.WithLabels(keysAndVals...).Info(trimNewline(msg))
}
}
func (zl *zapLogger) Error(err error, msg string, keysAndVals ...any) {
if zl.l.ErrorEnabled() {
if err == nil {
zl.l.WithLabels(keysAndVals...).Error(trimNewline(msg))
} else {
zl.l.WithLabels(keysAndVals...).Error(fmt.Sprintf("%v: %s", err.Error(), msg))
}
}
}
func (zl *zapLogger) V(int) logr.Logger {
zlog := &zapLogger{
l: zl.l,
}
return logr.New(zlog)
}
func (zl *zapLogger) WithValues(keysAndValues ...any) logr.LogSink {
return NewLogrAdapter(zl.l.WithLabels(keysAndValues...)).GetSink()
}
func (zl *zapLogger) WithName(string) logr.LogSink {
return zl
}
// NewLogrAdapter creates a new logr.Logger using the given Zap Logger to log.
func NewLogrAdapter(l *Scope) logr.Logger {
zlog := &zapLogger{
l: l,
}
return logr.New(zlog)
}
// Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"fmt"
"sort"
"strings"
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
)
const (
DefaultScopeName = "default"
OverrideScopeName = "all"
defaultOutputLevel = InfoLevel
defaultStackTraceLevel = NoneLevel
defaultOutputPath = "stdout"
defaultErrorOutputPath = "stderr"
defaultRotationMaxAge = 30
defaultRotationMaxSize = 100 * 1024 * 1024
defaultRotationMaxBackups = 1000
)
// Level is an enumeration of all supported log levels.
type Level int
const (
// NoneLevel disables logging
NoneLevel Level = iota
// FatalLevel enables fatal level logging
FatalLevel
// ErrorLevel enables error level logging
ErrorLevel
// WarnLevel enables warn level logging
WarnLevel
// InfoLevel enables info level logging
InfoLevel
// DebugLevel enables debug level logging
DebugLevel
)
var levelToString = map[Level]string{
DebugLevel: "debug",
InfoLevel: "info",
WarnLevel: "warn",
ErrorLevel: "error",
FatalLevel: "fatal",
NoneLevel: "none",
}
var stringToLevel = map[string]Level{
"debug": DebugLevel,
"info": InfoLevel,
"warn": WarnLevel,
"error": ErrorLevel,
"fatal": FatalLevel,
"none": NoneLevel,
}
// Options defines the set of options supported by Istio's component logging package.
type Options struct {
// OutputPaths is a list of file system paths to write the log data to.
// The special values stdout and stderr can be used to output to the
// standard I/O streams. This defaults to stdout.
OutputPaths []string
// ErrorOutputPaths is a list of file system paths to write logger errors to.
// The special values stdout and stderr can be used to output to the
// standard I/O streams. This defaults to stderr.
ErrorOutputPaths []string
// RotateOutputPath is the path to a rotating log file. This file should
// be automatically rotated over time, based on the rotation parameters such
// as RotationMaxSize and RotationMaxAge. The default is to not rotate.
//
// This path is used as a foundational path. This is where log output is normally
// saved. When a rotation needs to take place because the file got too big or too
// old, then the file is renamed by appending a timestamp to the name. Such renamed
// files are called backups. Once a backup has been created,
// output resumes to this path.
RotateOutputPath string
// RotationMaxSize is the maximum size in megabytes of a log file before it gets
// rotated. It defaults to 100 megabytes.
RotationMaxSize int
// RotationMaxAge is the maximum number of days to retain old log files based on the
// timestamp encoded in their filename. Note that a day is defined as 24
// hours and may not exactly correspond to calendar days due to daylight
// savings, leap seconds, etc. The default is to remove log files
// older than 30 days.
RotationMaxAge int
// RotationMaxBackups is the maximum number of old log files to retain. The default
// is to retain at most 1000 logs.
RotationMaxBackups int
// JSONEncoding controls whether the log is formatted as JSON.
JSONEncoding bool
// LogGrpc indicates that Grpc logs should be captured. The default is true.
// This is not exposed through the command-line flags, as this flag is mainly useful for testing: Grpc
// stack will hold on to the logger even though it gets closed. This causes data races.
LogGrpc bool
outputLevels string
logCallers string
stackTraceLevels string
useStackdriverFormat bool
extensions []Extension
}
// DefaultOptions returns a new set of options, initialized to the defaults
func DefaultOptions() *Options {
return &Options{
OutputPaths: []string{defaultOutputPath},
ErrorOutputPaths: []string{defaultErrorOutputPath},
RotationMaxSize: defaultRotationMaxSize,
RotationMaxAge: defaultRotationMaxAge,
RotationMaxBackups: defaultRotationMaxBackups,
outputLevels: DefaultScopeName + ":" + levelToString[defaultOutputLevel],
stackTraceLevels: DefaultScopeName + ":" + levelToString[defaultStackTraceLevel],
LogGrpc: false,
useStackdriverFormat: false,
}
}
// WithStackdriverLoggingFormat configures logging output to match Stackdriver structured logging conventions.
func (o *Options) WithStackdriverLoggingFormat() *Options {
o.useStackdriverFormat = true
return o
}
// WithTeeToUDS configures a parallel logging pipeline that writes logs to a server over UDS.
// addr is the socket that the server listens on, and path is the HTTP path that process the log message.
func (o *Options) WithTeeToUDS(addr, path string) *Options {
return o.WithExtension(func(c zapcore.Core) (zapcore.Core, func() error, error) {
return teeToUDSServer(c, addr, path), func() error { return nil }, nil
})
}
// Extension provides an extension mechanism for logs.
// This is essentially like https://pkg.go.dev/golang.org/x/exp/slog#Handler.
// This interface should be considered unstable; we will likely swap it for slog in the future and not expose zap internals.
// Returns a modified Core interface, and a Close() function.
type Extension func(c zapcore.Core) (zapcore.Core, func() error, error)
func (o *Options) WithExtension(e Extension) *Options {
o.extensions = append(o.extensions, e)
return o
}
// SetOutputLevel sets the minimum log output level for a given scope.
func (o *Options) SetOutputLevel(scope string, level Level) {
sl := scope + ":" + levelToString[level]
levels := strings.Split(o.outputLevels, ",")
if scope == DefaultScopeName {
// see if we have an entry without a scope prefix (which represents the default scope)
for i, ol := range levels {
if !strings.Contains(ol, ":") {
levels[i] = sl
o.outputLevels = strings.Join(levels, ",")
return
}
}
}
prefix := scope + ":"
for i, ol := range levels {
if strings.HasPrefix(ol, prefix) {
levels[i] = sl
o.outputLevels = strings.Join(levels, ",")
return
}
}
levels = append(levels, sl)
o.outputLevels = strings.Join(levels, ",")
}
// GetOutputLevel returns the minimum log output level for a given scope.
func (o *Options) GetOutputLevel(scope string) (Level, error) {
levels := strings.Split(o.outputLevels, ",")
if scope == DefaultScopeName {
// see if we have an entry without a scope prefix (which represents the default scope)
for _, ol := range levels {
if !strings.Contains(ol, ":") {
_, l, err := convertScopedLevel(ol)
return l, err
}
}
}
prefix := scope + ":"
for _, ol := range levels {
if strings.HasPrefix(ol, prefix) {
_, l, err := convertScopedLevel(ol)
return l, err
}
}
return NoneLevel, fmt.Errorf("no level defined for scope '%s'", scope)
}
// SetStackTraceLevel sets the minimum stack tracing level for a given scope.
func (o *Options) SetStackTraceLevel(scope string, level Level) {
sl := scope + ":" + levelToString[level]
levels := strings.Split(o.stackTraceLevels, ",")
if scope == DefaultScopeName {
// see if we have an entry without a scope prefix (which represents the default scope)
for i, ol := range levels {
if !strings.Contains(ol, ":") {
levels[i] = sl
o.stackTraceLevels = strings.Join(levels, ",")
return
}
}
}
prefix := scope + ":"
for i, ol := range levels {
if strings.HasPrefix(ol, prefix) {
levels[i] = sl
o.stackTraceLevels = strings.Join(levels, ",")
return
}
}
levels = append(levels, sl)
o.stackTraceLevels = strings.Join(levels, ",")
}
// GetStackTraceLevel returns the minimum stack tracing level for a given scope.
func (o *Options) GetStackTraceLevel(scope string) (Level, error) {
levels := strings.Split(o.stackTraceLevels, ",")
if scope == DefaultScopeName {
// see if we have an entry without a scope prefix (which represents the default scope)
for _, ol := range levels {
if !strings.Contains(ol, ":") {
_, l, err := convertScopedLevel(ol)
return l, err
}
}
}
prefix := scope + ":"
for _, ol := range levels {
if strings.HasPrefix(ol, prefix) {
_, l, err := convertScopedLevel(ol)
return l, err
}
}
return NoneLevel, fmt.Errorf("no level defined for scope '%s'", scope)
}
// SetLogCallers sets whether to output the caller's source code location for a given scope.
func (o *Options) SetLogCallers(scope string, include bool) {
scopes := strings.Split(o.logCallers, ",")
// remove any occurrence of the scope
for i, s := range scopes {
if s == scope {
scopes[i] = ""
}
}
if include {
// find a free slot if there is one
for i, s := range scopes {
if s == "" {
scopes[i] = scope
o.logCallers = strings.Join(scopes, ",")
return
}
}
scopes = append(scopes, scope)
}
o.logCallers = strings.Join(scopes, ",")
}
// GetLogCallers returns whether the caller's source code location is output for a given scope.
func (o *Options) GetLogCallers(scope string) bool {
scopes := strings.Split(o.logCallers, ",")
for _, s := range scopes {
if s == scope {
return true
}
}
return false
}
func convertScopedLevel(sl string) (string, Level, error) {
var s string
var l string
pieces := strings.Split(sl, ":")
if len(pieces) == 1 {
s = DefaultScopeName
l = pieces[0]
} else if len(pieces) == 2 {
s = pieces[0]
l = pieces[1]
} else {
return "", NoneLevel, fmt.Errorf("invalid output level format '%s'", sl)
}
level, ok := stringToLevel[l]
if !ok {
return "", NoneLevel, fmt.Errorf("invalid output level '%s'", sl)
}
return s, level, nil
}
// AttachCobraFlags attaches a set of Cobra flags to the given Cobra command.
//
// Cobra is the command-line processor that Istio uses. This command attaches
// the necessary set of flags to expose a CLI to let the user control all
// logging options.
func (o *Options) AttachCobraFlags(cmd *cobra.Command) {
o.AttachFlags(
cmd.PersistentFlags().StringArrayVar,
cmd.PersistentFlags().StringVar,
cmd.PersistentFlags().IntVar,
cmd.PersistentFlags().BoolVar)
}
// AttachFlags allows attaching of flags through a set of lambda functions.
func (o *Options) AttachFlags(
stringArrayVar func(p *[]string, name string, value []string, usage string),
stringVar func(p *string, name string, value string, usage string),
intVar func(p *int, name string, value int, usage string),
boolVar func(p *bool, name string, value bool, usage string),
) {
stringArrayVar(&o.OutputPaths, "log_target", o.OutputPaths,
"The set of paths where to output the log. This can be any path as well as the special values stdout and stderr")
stringVar(&o.RotateOutputPath, "log_rotate", o.RotateOutputPath,
"The path for the optional rotating log file")
intVar(&o.RotationMaxAge, "log_rotate_max_age", o.RotationMaxAge,
"The maximum age in days of a log file beyond which the file is rotated (0 indicates no limit)")
intVar(&o.RotationMaxSize, "log_rotate_max_size", o.RotationMaxSize,
"The maximum size in megabytes of a log file beyond which the file is rotated")
intVar(&o.RotationMaxBackups, "log_rotate_max_backups", o.RotationMaxBackups,
"The maximum number of log file backups to keep before older files are deleted (0 indicates no limit)")
boolVar(&o.JSONEncoding, "log_as_json", o.JSONEncoding,
"Whether to format output as JSON or in plain console-friendly format")
levelListString := fmt.Sprintf("[%s, %s, %s, %s, %s, %s]",
levelToString[DebugLevel],
levelToString[InfoLevel],
levelToString[WarnLevel],
levelToString[ErrorLevel],
levelToString[FatalLevel],
levelToString[NoneLevel])
allScopes := Scopes()
if len(allScopes) > 1 {
keys := make([]string, 0, len(allScopes))
for name := range allScopes {
keys = append(keys, name)
}
keys = append(keys, OverrideScopeName)
sort.Strings(keys)
s := strings.Join(keys, ", ")
stringVar(&o.outputLevels, "log_output_level", o.outputLevels,
fmt.Sprintf("Comma-separated minimum per-scope logging level of messages to output, in the form of "+
"<scope>:<level>,<scope>:<level>,... where scope can be one of [%s] and level can be one of %s",
s, levelListString))
stringVar(&o.stackTraceLevels, "log_stacktrace_level", o.stackTraceLevels,
fmt.Sprintf("Comma-separated minimum per-scope logging level at which stack traces are captured, in the form of "+
"<scope>:<level>,<scope:level>,... where scope can be one of [%s] and level can be one of %s",
s, levelListString))
stringVar(&o.logCallers, "log_caller", o.logCallers,
fmt.Sprintf("Comma-separated list of scopes for which to include caller information, scopes can be any of [%s]", s))
} else {
stringVar(&o.outputLevels, "log_output_level", o.outputLevels,
fmt.Sprintf("The minimum logging level of messages to output, can be one of %s",
levelListString))
stringVar(&o.stackTraceLevels, "log_stacktrace_level", o.stackTraceLevels,
fmt.Sprintf("The minimum logging level at which stack traces are captured, can be one of %s",
levelListString))
stringVar(&o.logCallers, "log_caller", o.logCallers,
"Comma-separated list of scopes for which to include called information, scopes can be any of [default]")
}
// NOTE: we don't currently expose a command-line option to control ErrorOutputPaths since it
// seems too esoteric.
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"fmt"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Scope constrains logging control to a named scope level. It gives users a fine grained control over output severity
// threshold and stack traces.
//
// Scope supports structured logging using WithLabels:
//
// s := RegisterScope("MyScope", "Description", 0)
// s = s.WithLabels("foo", "bar", "baz", 123, "qux", 0.123)
// s.Info("Hello") // <time> info MyScope Hello foo=bar baz=123 qux=0.123
//
// The output format can be globally configured to be JSON instead, using Options in this package.
//
// e.g. <time> info MyScope { "message":"Hello","foo":"bar","baz":123 }
//
// Scope also supports an error dictionary. The caller can pass a *structured.Error object as the first parameter
// to any of the output functions (Fatal*, Error* etc.) and this will append the fields in the object to the output:
//
// e := &structured.Error{MoreInfo:"See the documentation in istio.io/helpful_link"}
// s.WithLabels("foo", "bar").Error(e, "Hello")
// <time> info MyScope Hello moreInfo=See the documentation in istio.io/helpful_link foo=bar
//
// See structured.Error for additional guidance on defining errors in a dictionary.
type Scope struct {
// immutable, set at creation
name string
nameToEmit string
description string
callerSkip int
// set by the Configure method and adjustable dynamically
outputLevel atomic.Value
stackTraceLevel atomic.Value
logCallers atomic.Value
// labels data - key slice to preserve ordering
labelKeys []string
labels map[string]any
}
var (
scopes = make(map[string]*Scope)
lock sync.RWMutex
)
// RegisterScope registers a new logging scope. If the same name is used multiple times
// for a single process, the same Scope struct is returned.
//
// Scope names cannot include colons, commas, or periods.
func RegisterScope(name string, description string) *Scope {
// We only allow internal callers to set callerSkip
return registerScope(name, description, 0)
}
func registerScope(name string, description string, callerSkip int) *Scope {
if strings.ContainsAny(name, ":,.") {
panic(fmt.Sprintf("scope name %s is invalid, it cannot contain colons, commas, or periods", name))
}
lock.Lock()
defer lock.Unlock()
s, ok := scopes[name]
if !ok {
s = &Scope{
name: name,
description: description,
callerSkip: callerSkip,
}
s.SetOutputLevel(InfoLevel)
s.SetStackTraceLevel(NoneLevel)
s.SetLogCallers(false)
if name != DefaultScopeName {
s.nameToEmit = name
}
scopes[name] = s
}
s.labels = make(map[string]any)
return s
}
// FindScope returns a previously registered scope, or nil if the named scope wasn't previously registered
func FindScope(scope string) *Scope {
lock.RLock()
defer lock.RUnlock()
s := scopes[scope]
return s
}
// Scopes returns a snapshot of the currently defined set of scopes
func Scopes() map[string]*Scope {
lock.RLock()
defer lock.RUnlock()
s := make(map[string]*Scope, len(scopes))
for k, v := range scopes {
s[k] = v
}
return s
}
// Fatal uses fmt.Sprint to construct and log a message at fatal level.
func (s *Scope) Fatal(msg any) {
if s.GetOutputLevel() >= FatalLevel {
s.emit(zapcore.FatalLevel, fmt.Sprint(msg))
}
}
// Fatalf uses fmt.Sprintf to construct and log a message at fatal level.
func (s *Scope) Fatalf(format string, args ...any) {
if s.GetOutputLevel() >= FatalLevel {
msg := maybeSprintf(format, args)
s.emit(zapcore.FatalLevel, msg)
}
}
// FatalEnabled returns whether output of messages using this scope is currently enabled for fatal-level output.
func (s *Scope) FatalEnabled() bool {
return s.GetOutputLevel() >= FatalLevel
}
// Error outputs a message at error level.
func (s *Scope) Error(msg any) {
if s.GetOutputLevel() >= ErrorLevel {
s.emit(zapcore.ErrorLevel, fmt.Sprint(msg))
}
}
// Errorf uses fmt.Sprintf to construct and log a message at error level.
func (s *Scope) Errorf(format string, args ...any) {
if s.GetOutputLevel() >= ErrorLevel {
msg := maybeSprintf(format, args)
s.emit(zapcore.ErrorLevel, msg)
}
}
// ErrorEnabled returns whether output of messages using this scope is currently enabled for error-level output.
func (s *Scope) ErrorEnabled() bool {
return s.GetOutputLevel() >= ErrorLevel
}
// Warn outputs a message at warn level.
func (s *Scope) Warn(msg any) {
if s.GetOutputLevel() >= WarnLevel {
s.emit(zapcore.WarnLevel, fmt.Sprint(msg))
}
}
// Warnf uses fmt.Sprintf to construct and log a message at warn level.
func (s *Scope) Warnf(format string, args ...any) {
if s.GetOutputLevel() >= WarnLevel {
msg := maybeSprintf(format, args)
s.emit(zapcore.WarnLevel, msg)
}
}
// WarnEnabled returns whether output of messages using this scope is currently enabled for warn-level output.
func (s *Scope) WarnEnabled() bool {
return s.GetOutputLevel() >= WarnLevel
}
// Info outputs a message at info level.
func (s *Scope) Info(msg any) {
if s.GetOutputLevel() >= InfoLevel {
s.emit(zapcore.InfoLevel, fmt.Sprint(msg))
}
}
// Infof uses fmt.Sprintf to construct and log a message at info level.
func (s *Scope) Infof(format string, args ...any) {
if s.GetOutputLevel() >= InfoLevel {
msg := maybeSprintf(format, args)
s.emit(zapcore.InfoLevel, msg)
}
}
// InfoEnabled returns whether output of messages using this scope is currently enabled for info-level output.
func (s *Scope) InfoEnabled() bool {
return s.GetOutputLevel() >= InfoLevel
}
// Debug outputs a message at debug level.
func (s *Scope) Debug(msg any) {
if s.GetOutputLevel() >= DebugLevel {
s.emit(zapcore.DebugLevel, fmt.Sprint(msg))
}
}
// LogWithTime outputs a message with a given timestamp.
func (s *Scope) LogWithTime(level Level, msg string, t time.Time) {
if s.GetOutputLevel() >= level {
s.emitWithTime(levelToZap[level], msg, t)
}
}
// Debugf uses fmt.Sprintf to construct and log a message at debug level.
func (s *Scope) Debugf(format string, args ...any) {
if s.GetOutputLevel() >= DebugLevel {
msg := maybeSprintf(format, args)
s.emit(zapcore.DebugLevel, msg)
}
}
// DebugEnabled returns whether output of messages using this scope is currently enabled for debug-level output.
func (s *Scope) DebugEnabled() bool {
return s.GetOutputLevel() >= DebugLevel
}
// Name returns this scope's name.
func (s *Scope) Name() string {
return s.name
}
// Description returns this scope's description
func (s *Scope) Description() string {
return s.description
}
// SetOutputLevel adjusts the output level associated with the scope.
func (s *Scope) SetOutputLevel(l Level) {
s.outputLevel.Store(l)
}
// GetOutputLevel returns the output level associated with the scope.
func (s *Scope) GetOutputLevel() Level {
return s.outputLevel.Load().(Level)
}
// SetStackTraceLevel adjusts the stack tracing level associated with the scope.
func (s *Scope) SetStackTraceLevel(l Level) {
s.stackTraceLevel.Store(l)
}
// GetStackTraceLevel returns the stack tracing level associated with the scope.
func (s *Scope) GetStackTraceLevel() Level {
return s.stackTraceLevel.Load().(Level)
}
// SetLogCallers adjusts the output level associated with the scope.
func (s *Scope) SetLogCallers(logCallers bool) {
s.logCallers.Store(logCallers)
}
// GetLogCallers returns the output level associated with the scope.
func (s *Scope) GetLogCallers() bool {
return s.logCallers.Load().(bool)
}
// copy makes a copy of s and returns a pointer to it.
func (s *Scope) copy() *Scope {
out := *s
out.labels = copyStringInterfaceMap(s.labels)
return &out
}
// WithLabels adds a key-value pairs to the labels in s. The key must be a string, while the value may be any type.
// It returns a copy of s, with the labels added.
// e.g. newScope := oldScope.WithLabels("foo", "bar", "baz", 123, "qux", 0.123)
func (s *Scope) WithLabels(kvlist ...any) *Scope {
out := s.copy()
if len(kvlist)%2 != 0 {
out.labels["WithLabels error"] = fmt.Sprintf("even number of parameters required, got %d", len(kvlist))
return out
}
for i := 0; i < len(kvlist); i += 2 {
keyi := kvlist[i]
key, ok := keyi.(string)
if !ok {
out.labels["WithLabels error"] = fmt.Sprintf("label name %v must be a string, got %T ", keyi, keyi)
return out
}
_, override := out.labels[key]
out.labels[key] = kvlist[i+1]
if override {
// Key already set, just modify the value
continue
}
out.labelKeys = append(out.labelKeys, key)
}
return out
}
func (s *Scope) emit(level zapcore.Level, msg string) {
s.emitWithTime(level, msg, time.Now())
}
func (s *Scope) emitWithTime(level zapcore.Level, msg string, t time.Time) {
if t.IsZero() {
t = time.Now()
}
e := zapcore.Entry{
Message: msg,
Level: level,
Time: t,
LoggerName: s.nameToEmit,
}
if s.GetLogCallers() {
e.Caller = zapcore.NewEntryCaller(runtime.Caller(s.callerSkip + callerSkipOffset))
}
if dumpStack(level, s) {
e.Stack = zap.Stack("").String
}
var fields []zapcore.Field
if useJSON.Load().(bool) {
fields = make([]zapcore.Field, 0, len(s.labelKeys))
for _, k := range s.labelKeys {
v := s.labels[k]
fields = append(fields, zap.Field{
Key: k,
Interface: v,
Type: zapcore.ReflectType,
})
}
} else if len(s.labelKeys) > 0 {
sb := &strings.Builder{}
// Assume roughly 15 chars per kv pair. Its fine to be off, this is just an optimization
sb.Grow(len(msg) + 15*len(s.labelKeys))
sb.WriteString(msg)
sb.WriteString("\t")
space := false
for _, k := range s.labelKeys {
if space {
sb.WriteString(" ")
}
sb.WriteString(k)
sb.WriteString("=")
sb.WriteString(fmt.Sprint(s.labels[k]))
space = true
}
e.Message = sb.String()
}
pt := funcs.Load().(patchTable)
if pt.write != nil {
if err := pt.write(e, fields); err != nil {
_, _ = fmt.Fprintf(pt.errorSink, "%v log write error: %v\n", time.Now(), err)
_ = pt.errorSink.Sync()
}
}
}
func copyStringInterfaceMap(m map[string]any) map[string]any {
out := make(map[string]any, len(m))
for k, v := range m {
out[k] = v
}
return out
}
func maybeSprintf(format string, args []any) string {
msg := format
if len(args) > 0 {
msg = fmt.Sprintf(format, args...)
}
return msg
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"sync"
"time"
"go.uber.org/zap/buffer"
"go.uber.org/zap/zapcore"
)
// An udsCore write entries to an UDS server with HTTP Post. Log messages will be encoded into a JSON array.
type udsCore struct {
client http.Client
minimumLevel zapcore.Level
url string
enc zapcore.Encoder
buffers []*buffer.Buffer
mu sync.Mutex
}
// teeToUDSServer returns a zapcore.Core that writes entries to both the provided core and to an uds server.
func teeToUDSServer(baseCore zapcore.Core, address, path string) zapcore.Core {
c := http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial("unix", address)
},
},
Timeout: 100 * time.Millisecond,
}
uc := &udsCore{
client: c,
url: "http://unix" + path,
enc: zapcore.NewJSONEncoder(defaultEncoderConfig),
buffers: make([]*buffer.Buffer, 0),
}
for l := zapcore.DebugLevel; l <= zapcore.FatalLevel; l++ {
if baseCore.Enabled(l) {
uc.minimumLevel = l
break
}
}
return zapcore.NewTee(baseCore, uc)
}
// Enabled implements zapcore.Core.
func (u *udsCore) Enabled(l zapcore.Level) bool {
return l >= u.minimumLevel
}
// With implements zapcore.Core.
func (u *udsCore) With(fields []zapcore.Field) zapcore.Core {
return &udsCore{
client: u.client,
minimumLevel: u.minimumLevel,
}
}
// Check implements zapcore.Core.
func (u *udsCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry {
if u.Enabled(e.Level) {
return ce.AddCore(e, u)
}
return ce
}
// Sync implements zapcore.Core. It sends log messages with HTTP POST.
func (u *udsCore) Sync() error {
logs := u.logsFromBuffer()
msg, err := json.Marshal(logs)
if err != nil {
return fmt.Errorf("failed to sync uds log: %v", err)
}
resp, err := u.client.Post(u.url, "application/json", bytes.NewReader(msg))
if err != nil {
return fmt.Errorf("failed to send logs to uds server %v: %v", u.url, err)
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("uds server returns non-ok status %v: %v", u.url, resp.Status)
}
return nil
}
// Write implements zapcore.Core. Log messages will be temporarily buffered and sent to
// UDS server asyncrhonously.
func (u *udsCore) Write(entry zapcore.Entry, fields []zapcore.Field) error {
buffer, err := u.enc.EncodeEntry(entry, fields)
if err != nil {
return fmt.Errorf("failed to write log to uds logger: %v", err)
}
u.mu.Lock()
u.buffers = append(u.buffers, buffer)
u.mu.Unlock()
return nil
}
func (u *udsCore) logsFromBuffer() []string {
u.mu.Lock()
defer u.mu.Unlock()
logs := make([]string, 0, len(u.buffers))
for _, b := range u.buffers {
logs = append(logs, b.String())
b.Free()
}
u.buffers = make([]*buffer.Buffer, 0)
return logs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package log
import (
"go.uber.org/zap/zapcore"
)
var toLevel = map[zapcore.Level]Level{
zapcore.FatalLevel: FatalLevel,
zapcore.ErrorLevel: ErrorLevel,
zapcore.WarnLevel: WarnLevel,
zapcore.InfoLevel: InfoLevel,
zapcore.DebugLevel: DebugLevel,
}
// callerSkipOffset is how many callers to pop off the stack to determine the caller function locality, used for
// adding file/line number to log output.
const callerSkipOffset = 3
func dumpStack(level zapcore.Level, scope *Scope) bool {
thresh := toLevel[level]
if scope != defaultScope {
thresh = ErrorLevel
switch level {
case zapcore.FatalLevel:
thresh = FatalLevel
}
}
return scope.GetStackTraceLevel() >= thresh
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package maps
import "maps" // nolint: depguard
// Equal reports whether two maps contain the same key/value pairs.
// Values are compared using ==.
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
return maps.Equal(m1, m2)
}
// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
func Clone[M ~map[K]V, K comparable, V any](m M) M {
return maps.Clone(m)
}
// Values returns the values of the map m.
// The values will be in an indeterminate order.
func Values[M ~map[K]V, K comparable, V any](m M) []V {
r := make([]V, 0, len(m))
for _, v := range m {
r = append(r, v)
}
return r
}
// Keys returns the keys of the map m.
// The keys will be in an indeterminate order.
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
r := make([]K, 0, len(m))
for k := range m {
r = append(r, k)
}
return r
}
// MergeCopy creates a new map by merging all key/value pairs from base and override.
// When a key in override is already present in base,
// the value in base will be overwritten by the value associated
// with the key in override.
func MergeCopy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](base M1, override M2) M1 {
dst := make(M1, len(base)+len(override))
maps.Copy(dst, base)
maps.Copy(dst, override)
return dst
}
// Contains checks if all key-value pairs in 'subset' are present in 'superset'.
// It returns true only if every key in 'subset' exists in 'superset' and their corresponding values are equal.
func Contains[M1, M2 ~map[K]V, K comparable, V comparable](superset M1, subset M2) bool {
for key, value := range subset {
if supersetValue, ok := superset[key]; !ok || supersetValue != value {
return false
}
}
return true
}
// EqualFunc is like Equal, but compares values using eq.
// Keys are still compared with ==.
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
return maps.EqualFunc(m1, m2, eq)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package monitoring
import (
"go.opentelemetry.io/otel/attribute"
"istio.io/istio/pkg/slices"
)
type baseMetric struct {
name string
// attrs stores all attrs for the metrics
attrs []attribute.KeyValue
rest Metric
}
func (f baseMetric) Name() string {
return f.name
}
func (f baseMetric) Increment() {
f.rest.Record(1)
}
func (f baseMetric) Decrement() {
f.rest.Record(-1)
}
func (f baseMetric) runRecordHook(value float64) {
recordHookMutex.RLock()
if rh, ok := recordHooks[f.name]; ok {
lv := slices.Map(f.attrs, func(e attribute.KeyValue) LabelValue {
return LabelValue{e}
})
rh.OnRecord(f.name, lv, value)
}
recordHookMutex.RUnlock()
}
func (f baseMetric) Register() error {
return nil
}
func (f baseMetric) RecordInt(value int64) {
f.rest.Record(float64(value))
}
func rebuildAttributes(bm baseMetric, labelValues []LabelValue) ([]attribute.KeyValue, attribute.Set) {
attrs := make([]attribute.KeyValue, 0, len(bm.attrs)+len(labelValues))
attrs = append(attrs, bm.attrs...)
for _, v := range labelValues {
attrs = append(attrs, v.keyValue)
}
set := attribute.NewSet(attrs...)
return attrs, set
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package monitoring
import (
"context"
api "go.opentelemetry.io/otel/metric"
"istio.io/istio/pkg/log"
)
type counter struct {
baseMetric
c api.Float64Counter
// precomputedAddOption is just a precomputation to avoid allocations on each record call
precomputedAddOption []api.AddOption
}
var _ Metric = &counter{}
func newCounter(o options) *counter {
c, err := meter().Float64Counter(o.name,
api.WithDescription(o.description),
api.WithUnit(string(o.unit)))
if err != nil {
log.Fatalf("failed to create counter: %v", err)
}
r := &counter{c: c}
r.baseMetric = baseMetric{
name: o.name,
rest: r,
}
return r
}
func (f *counter) Record(value float64) {
f.runRecordHook(value)
if f.precomputedAddOption != nil {
f.c.Add(context.Background(), value, f.precomputedAddOption...)
} else {
f.c.Add(context.Background(), value)
}
}
func (f *counter) With(labelValues ...LabelValue) Metric {
attrs, set := rebuildAttributes(f.baseMetric, labelValues)
nm := &counter{
c: f.c,
precomputedAddOption: []api.AddOption{api.WithAttributeSet(set)},
}
nm.baseMetric = baseMetric{
name: f.name,
attrs: attrs,
rest: nm,
}
return nm
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package monitoring
import (
"context"
"sync"
"go.opentelemetry.io/otel/attribute"
api "go.opentelemetry.io/otel/metric"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/slices"
)
type derivedGauge struct {
mu sync.RWMutex
attrs map[attribute.Set]func() float64
name string
}
var _ DerivedMetric = &derivedGauge{}
func newDerivedGauge(name, description string) DerivedMetric {
dm := &derivedGauge{
name: name,
attrs: map[attribute.Set]func() float64{},
}
_, err := meter().Float64ObservableGauge(name,
api.WithDescription(description),
api.WithFloat64Callback(func(ctx context.Context, observer api.Float64Observer) error {
dm.mu.RLock()
defer dm.mu.RUnlock()
for kv, compute := range dm.attrs {
observer.Observe(compute(), api.WithAttributeSet(kv))
}
return nil
}))
if err != nil {
log.Fatalf("failed to create derived gauge: %v", err)
}
return dm
}
func (d *derivedGauge) Name() string {
return d.name
}
func (d *derivedGauge) Register() error {
return nil
}
func (d *derivedGauge) ValueFrom(valueFn func() float64, labelValues ...LabelValue) DerivedMetric {
d.mu.Lock()
defer d.mu.Unlock()
lv := slices.Map(labelValues, func(e LabelValue) attribute.KeyValue {
return e.keyValue
})
as := attribute.NewSet(lv...)
d.attrs[as] = valueFn
return d
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package monitoring
type disabledMetric struct {
name string
}
// Decrement implements Metric
func (dm *disabledMetric) Decrement() {}
// Increment implements Metric
func (dm *disabledMetric) Increment() {}
// Name implements Metric
func (dm *disabledMetric) Name() string {
return dm.name
}
// Record implements Metric
func (dm *disabledMetric) Record(value float64) {}
// RecordInt implements Metric
func (dm *disabledMetric) RecordInt(value int64) {}
// Register implements Metric
func (dm *disabledMetric) Register() error {
return nil
}
// With implements Metric
func (dm *disabledMetric) With(labelValues ...LabelValue) Metric {
return dm
}
var _ Metric = &disabledMetric{}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package monitoring
import (
"context"
api "go.opentelemetry.io/otel/metric"
"istio.io/istio/pkg/log"
)
type distribution struct {
baseMetric
d api.Float64Histogram
// precomputedRecordOption is just a precomputation to avoid allocations on each record call
precomputedRecordOption []api.RecordOption
}
var _ Metric = &distribution{}
func newDistribution(o options) *distribution {
d, err := meter().Float64Histogram(o.name,
api.WithDescription(o.description),
api.WithUnit(string(o.unit)))
if err != nil {
log.Fatalf("failed to create distribution: %v", err)
}
r := &distribution{d: d}
r.baseMetric = baseMetric{
name: o.name,
rest: r,
}
return r
}
func (f *distribution) Record(value float64) {
f.runRecordHook(value)
if f.precomputedRecordOption != nil {
f.d.Record(context.Background(), value, f.precomputedRecordOption...)
} else {
f.d.Record(context.Background(), value)
}
}
func (f *distribution) With(labelValues ...LabelValue) Metric {
attrs, set := rebuildAttributes(f.baseMetric, labelValues)
nm := &distribution{
d: f.d,
precomputedRecordOption: []api.RecordOption{api.WithAttributeSet(set)},
}
nm.baseMetric = baseMetric{
name: f.name,
attrs: attrs,
rest: nm,
}
return nm
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package monitoring
import (
"context"
"sync"
"go.opentelemetry.io/otel/attribute"
api "go.opentelemetry.io/otel/metric"
"istio.io/istio/pkg/log"
)
type gauge struct {
baseMetric
g api.Float64ObservableGauge
// attributeSets stores a map of attributes -> values, for gauges.
attributeSetsMutex *sync.RWMutex
attributeSets map[attribute.Set]*gaugeValues
currentGaugeSet *gaugeValues
}
var _ Metric = &gauge{}
func newGauge(o options) *gauge {
r := &gauge{
attributeSetsMutex: &sync.RWMutex{},
}
r.attributeSets = map[attribute.Set]*gaugeValues{}
g, err := meter().Float64ObservableGauge(o.name,
api.WithFloat64Callback(func(ctx context.Context, observer api.Float64Observer) error {
r.attributeSetsMutex.Lock()
defer r.attributeSetsMutex.Unlock()
for _, gv := range r.attributeSets {
observer.Observe(gv.val, gv.opt...)
}
return nil
}),
api.WithDescription(o.description),
api.WithUnit(string(o.unit)))
if err != nil {
log.Fatalf("failed to create gauge: %v", err)
}
r.g = g
r.baseMetric = baseMetric{
name: o.name,
rest: r,
}
return r
}
func (f *gauge) Record(value float64) {
f.runRecordHook(value)
// TODO: https://github.com/open-telemetry/opentelemetry-specification/issues/2318 use synchronous gauge so we don't need to deal with this
f.attributeSetsMutex.Lock()
// Special case: we lazy-load the non-labeled value. This ensures that metrics which should always have labels do not end up with a un-labeled zero-value
// If a metric really requires `metric{} 0`, they can explicitly call .Record(0).
if f.currentGaugeSet == nil {
f.currentGaugeSet = &gaugeValues{}
f.attributeSets[attribute.NewSet()] = f.currentGaugeSet
}
f.currentGaugeSet.val = value
f.attributeSetsMutex.Unlock()
}
func (f *gauge) With(labelValues ...LabelValue) Metric {
attrs, set := rebuildAttributes(f.baseMetric, labelValues)
nm := &gauge{
g: f.g,
attributeSetsMutex: f.attributeSetsMutex,
attributeSets: f.attributeSets,
}
if _, f := nm.attributeSets[set]; !f {
nm.attributeSets[set] = &gaugeValues{
opt: []api.ObserveOption{api.WithAttributeSet(set)},
}
}
nm.currentGaugeSet = nm.attributeSets[set]
nm.baseMetric = baseMetric{
name: f.name,
attrs: attrs,
rest: nm,
}
return nm
}
type gaugeValues struct {
val float64
opt []api.ObserveOption
}
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package monitoring
import (
"net/http"
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
otelprom "go.opentelemetry.io/otel/exporters/prometheus"
api "go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/sdk/metric"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/maps"
"istio.io/istio/pkg/slices"
)
var (
meter = func() api.Meter {
return otel.GetMeterProvider().Meter("istio")
}
monitoringLogger = log.RegisterScope("monitoring", "metrics monitoring")
)
func init() {
otel.SetLogger(log.NewLogrAdapter(monitoringLogger))
}
// RegisterPrometheusExporter sets the global metrics handler to the provided Prometheus registerer and gatherer.
// Returned is an HTTP handler that can be used to read metrics from.
func RegisterPrometheusExporter(reg prometheus.Registerer, gatherer prometheus.Gatherer) (http.Handler, error) {
if reg == nil {
reg = prometheus.DefaultRegisterer
}
if gatherer == nil {
gatherer = prometheus.DefaultGatherer
}
promOpts := []otelprom.Option{
otelprom.WithoutScopeInfo(),
otelprom.WithoutTargetInfo(),
otelprom.WithoutUnits(),
otelprom.WithRegisterer(reg),
otelprom.WithoutCounterSuffixes(),
}
prom, err := otelprom.New(promOpts...)
if err != nil {
return nil, err
}
opts := []metric.Option{metric.WithReader(prom)}
opts = append(opts, knownMetrics.toHistogramViews()...)
mp := metric.NewMeterProvider(opts...)
otel.SetMeterProvider(mp)
handler := promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})
return handler, nil
}
// A Metric collects numerical observations.
type Metric interface {
// Increment records a value of 1 for the current measure. For Sums,
// this is equivalent to adding 1 to the current value. For Gauges,
// this is equivalent to setting the value to 1. For Distributions,
// this is equivalent to making an observation of value 1.
Increment()
// Decrement records a value of -1 for the current measure. For Sums,
// this is equivalent to subtracting -1 to the current value. For Gauges,
// this is equivalent to setting the value to -1. For Distributions,
// this is equivalent to making an observation of value -1.
Decrement()
// Name returns the name value of a Metric.
Name() string
// Record makes an observation of the provided value for the given measure.
Record(value float64)
// RecordInt makes an observation of the provided value for the measure.
RecordInt(value int64)
// With creates a new Metric, with the LabelValues provided. This allows creating
// a set of pre-dimensioned data for recording purposes. This is primarily used
// for documentation and convenience. Metrics created with this method do not need
// to be registered (they share the registration of their parent Metric).
With(labelValues ...LabelValue) Metric
// Register configures the Metric for export. It MUST be called before collection
// of values for the Metric. An error will be returned if registration fails.
Register() error
}
// DerivedMetric can be used to supply values that dynamically derive from internal
// state, but are not updated based on any specific event. Their value will be calculated
// based on a value func that executes when the metrics are exported.
//
// At the moment, only a Gauge type is supported.
type DerivedMetric interface {
// Name returns the name value of a DerivedMetric.
Name() string
// Register handles any required setup to ensure metric export.
Register() error
// ValueFrom is used to update the derived value with the provided
// function and the associated label values. If the metric is unlabeled,
// ValueFrom may be called without any labelValues. Otherwise, the labelValues
// supplied MUST match the label keys supplied at creation time both in number
// and in order.
ValueFrom(valueFn func() float64, labelValues ...LabelValue) DerivedMetric
}
// CreateLabel will attempt to create a new Label.
func CreateLabel(key string) Label {
return Label{attribute.Key(key)}
}
// A Label provides a named dimension for a Metric.
type Label struct {
key attribute.Key
}
// Value creates a new LabelValue for the Label.
func (l Label) Value(value string) LabelValue {
return LabelValue{l.key.String(value)}
}
// A LabelValue represents a Label with a specific value. It is used to record
// values for a Metric.
type LabelValue struct {
keyValue attribute.KeyValue
}
func (l LabelValue) Key() Label {
return Label{l.keyValue.Key}
}
func (l LabelValue) Value() string {
return l.keyValue.Value.AsString()
}
// RecordHook has a callback function which a measure is recorded.
type RecordHook interface {
OnRecord(name string, tags []LabelValue, value float64)
}
var (
recordHooks = map[string]RecordHook{}
recordHookMutex sync.RWMutex
)
// RegisterRecordHook adds a RecordHook for a given measure.
func RegisterRecordHook(name string, h RecordHook) {
recordHookMutex.Lock()
defer recordHookMutex.Unlock()
recordHooks[name] = h
}
// NewSum creates a new Sum Metric (the values will be cumulative).
// That means that data collected by the new Metric will be summed before export.
func NewSum(name, description string, opts ...Options) Metric {
knownMetrics.register(MetricDefinition{
Name: name,
Type: "Sum",
Description: description,
})
o, dm := createOptions(name, description, opts...)
if dm != nil {
return dm
}
return newCounter(o)
}
// NewGauge creates a new Gauge Metric. That means that data collected by the new
// Metric will export only the last recorded value.
func NewGauge(name, description string, opts ...Options) Metric {
knownMetrics.register(MetricDefinition{
Name: name,
Type: "LastValue",
Description: description,
})
o, dm := createOptions(name, description, opts...)
if dm != nil {
return dm
}
return newGauge(o)
}
// NewDerivedGauge creates a new Gauge Metric. That means that data collected by the new
// Metric will export only the last recorded value.
// Unlike NewGauge, the DerivedGauge accepts functions which are called to get the current value.
func NewDerivedGauge(name, description string) DerivedMetric {
knownMetrics.register(MetricDefinition{
Name: name,
Type: "LastValue",
Description: description,
})
return newDerivedGauge(name, description)
}
// NewDistribution creates a new Metric with an aggregation type of Distribution. This means that the
// data collected by the Metric will be collected and exported as a histogram, with the specified bounds.
func NewDistribution(name, description string, bounds []float64, opts ...Options) Metric {
knownMetrics.register(MetricDefinition{
Name: name,
Type: "Distribution",
Description: description,
Bounds: bounds,
})
o, dm := createOptions(name, description, opts...)
if dm != nil {
return dm
}
return newDistribution(o)
}
// MetricDefinition records a metric's metadata.
// This is used to work around two limitations of OpenTelemetry:
// - (https://github.com/open-telemetry/opentelemetry-go/issues/4003) Histogram buckets cannot be defined per instrument.
// instead, we record all metric definitions and add them as Views at registration time.
// - Support pkg/collateral, which wants to query all metrics. This cannot use a simple Collect() call, as this ignores any unused metrics.
type MetricDefinition struct {
Name string
Type string
Description string
Bounds []float64
}
// metrics stores known metrics
type metrics struct {
started bool
mu sync.Mutex
known map[string]MetricDefinition
}
// knownMetrics is a global that stores all registered metrics
var knownMetrics = metrics{
known: map[string]MetricDefinition{},
}
// ExportMetricDefinitions reports all currently registered metric definitions.
func ExportMetricDefinitions() []MetricDefinition {
knownMetrics.mu.Lock()
defer knownMetrics.mu.Unlock()
return slices.SortBy(maps.Values(knownMetrics.known), func(a MetricDefinition) string {
return a.Name
})
}
// register records a newly defined metric. Only valid before an exporter is set.
func (d *metrics) register(def MetricDefinition) {
d.mu.Lock()
defer d.mu.Unlock()
if d.started {
log.Fatalf("Attempting to initialize metric %q after metrics have started", def.Name)
}
d.known[def.Name] = def
}
// toHistogramViews works around https://github.com/open-telemetry/opentelemetry-go/issues/4003; in the future we can define
// this when we create the histogram.
func (d *metrics) toHistogramViews() []metric.Option {
d.mu.Lock()
defer d.mu.Unlock()
d.started = true
opts := []metric.Option{}
for name, def := range d.known {
if def.Bounds == nil {
continue
}
// for each histogram metric (i.e. those with bounds), set up a view explicitly defining those buckets.
v := metric.WithView(metric.NewView(
metric.Instrument{Name: name},
metric.Stream{Aggregation: metric.AggregationExplicitBucketHistogram{
Boundaries: def.Bounds,
}},
))
opts = append(opts, v)
}
return opts
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package monitoring
// Options encode changes to the options passed to a Metric at creation time.
type Options func(*options)
type options struct {
enabledCondition func() bool
unit Unit
name string
description string
}
// WithUnit provides configuration options for a new Metric, providing unit of measure
// information for a new Metric.
func WithUnit(unit Unit) Options {
return func(opts *options) {
opts.unit = unit
}
}
// WithEnabled allows a metric to be condition enabled if the provided function returns true.
// If disabled, metric operations will do nothing.
func WithEnabled(enabled func() bool) Options {
return func(o *options) {
o.enabledCondition = enabled
}
}
func createOptions(name, description string, opts ...Options) (options, Metric) {
o := options{unit: None, name: name, description: description}
for _, opt := range opts {
opt(&o)
}
if o.enabledCondition != nil && !o.enabledCondition() {
return o, &disabledMetric{name: name}
}
return o, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package network
import (
"errors"
"net"
"net/http"
)
func IsUnexpectedListenerError(err error) bool {
if err == nil {
return false
}
if errors.Is(err, net.ErrClosed) {
return false
}
if errors.Is(err, http.ErrServerClosed) {
return false
}
return true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package network
import "istio.io/istio/pkg/util/identifier"
// ID is the unique identifier for a network.
type ID string
func (id ID) Equals(other ID) bool {
return identifier.IsSameOrEmpty(string(id), string(other))
}
func (id ID) String() string {
return string(id)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package platform
import "istio.io/istio/pkg/env"
const (
Default = ""
OpenShift = "openshift"
GCP = "gcp"
)
var Platform = env.Register(
"PLATFORM",
Default,
"Platform where Istio is deployed. Possible values are \"openshift\" and \"gcp\"",
).Get()
// IsDefault returns true if the platform is the Default one
func IsDefault() bool {
return Platform == Default
}
// IsOpenShift returns true if the platform is OpenShift
func IsOpenShift() bool {
return Platform == OpenShift
}
// IsGCP returns true if the platform is GCP
func IsGCP() bool {
return Platform == GCP
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package merge
/*
CODE Copied and modified from https://github.com/kumahq/kuma/blob/master/pkg/util/proto/google_proto.go
because of: https://github.com/golang/protobuf/issues/1359
Copyright 2019 The Go Authors. All rights reserved.
Use of this source code is governed by a BSD-style
license that can be found in the LICENSE file.
*/
import (
"fmt"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/durationpb"
)
type (
MergeFunction func(dst, src protoreflect.Message)
mergeOptions struct {
customMergeFn map[protoreflect.FullName]MergeFunction
}
)
type OptionFn func(options mergeOptions) mergeOptions
func MergeFunctionOptionFn(name protoreflect.FullName, function MergeFunction) OptionFn {
return func(options mergeOptions) mergeOptions {
options.customMergeFn[name] = function
return options
}
}
// ReplaceMergeFn instead of merging all subfields one by one, takes src and set it to dest
var ReplaceMergeFn MergeFunction = func(dst, src protoreflect.Message) {
dst.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
dst.Clear(fd)
return true
})
src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
dst.Set(fd, v)
return true
})
}
var options = []OptionFn{
// Workaround https://github.com/golang/protobuf/issues/1359, merge duration properly
MergeFunctionOptionFn((&durationpb.Duration{}).ProtoReflect().Descriptor().FullName(), ReplaceMergeFn),
}
func Merge(dst, src proto.Message) {
merge(dst, src, options...)
}
// Merge Code of proto.Merge with modifications to support custom types
func merge(dst, src proto.Message, opts ...OptionFn) {
mo := mergeOptions{customMergeFn: map[protoreflect.FullName]MergeFunction{}}
for _, opt := range opts {
mo = opt(mo)
}
dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect()
if dstMsg.Descriptor() != srcMsg.Descriptor() {
if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want {
panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want))
}
panic("descriptor mismatch")
}
mo.mergeMessage(dstMsg, srcMsg)
}
func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) {
// The regular proto.mergeMessage would have a fast path method option here.
// As we want to have exceptions we always use the slow path.
if !dst.IsValid() {
panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName()))
}
src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
switch {
case fd.IsList():
o.mergeList(dst.Mutable(fd).List(), v.List(), fd)
case fd.IsMap():
o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue())
case fd.Message() != nil:
mergeFn, exists := o.customMergeFn[fd.Message().FullName()]
if exists {
mergeFn(dst.Mutable(fd).Message(), v.Message())
} else {
o.mergeMessage(dst.Mutable(fd).Message(), v.Message())
}
case fd.Kind() == protoreflect.BytesKind:
dst.Set(fd, o.cloneBytes(v))
default:
dst.Set(fd, v)
}
return true
})
if len(src.GetUnknown()) > 0 {
dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...))
}
}
func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) {
// Merge semantics appends to the end of the existing list.
for i, n := 0, src.Len(); i < n; i++ {
switch v := src.Get(i); {
case fd.Message() != nil:
dstv := dst.NewElement()
o.mergeMessage(dstv.Message(), v.Message())
dst.Append(dstv)
case fd.Kind() == protoreflect.BytesKind:
dst.Append(o.cloneBytes(v))
default:
dst.Append(v)
}
}
}
func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) {
// Merge semantics replaces, rather than merges into existing entries.
src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
switch {
case fd.Message() != nil:
dstv := dst.NewValue()
o.mergeMessage(dstv.Message(), v.Message())
dst.Set(k, dstv)
case fd.Kind() == protoreflect.BytesKind:
dst.Set(k, o.cloneBytes(v))
default:
dst.Set(k, v)
}
return true
})
}
func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value {
return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package proxy
import (
"context"
"encoding/json"
"fmt"
"istio.io/istio/pilot/pkg/xds"
"istio.io/istio/pkg/kube"
istioVersion "istio.io/istio/pkg/version"
)
type sidecarSyncStatus struct {
// nolint: structcheck, unused
pilot string
xds.SyncStatus
}
// GetProxyInfo retrieves infos of proxies that connect to the Istio control plane of specific revision.
func GetProxyInfo(kubeClient kube.CLIClient, istioNamespace string) (*[]istioVersion.ProxyInfo, error) {
// Ask Pilot for the Envoy sidecar sync status, which includes the sidecar version info
allSyncz, err := kubeClient.AllDiscoveryDo(context.TODO(), istioNamespace, "debug/syncz")
if err != nil {
return nil, err
}
pi := []istioVersion.ProxyInfo{}
for _, syncz := range allSyncz {
var sss []*sidecarSyncStatus
err = json.Unmarshal(syncz, &sss)
if err != nil {
return nil, err
}
for _, ss := range sss {
pi = append(pi, istioVersion.ProxyInfo{
ID: ss.ProxyID,
IstioVersion: ss.SyncStatus.IstioVersion,
Type: istioVersion.ToUserFacingNodeType(string(ss.ProxyType)),
})
}
}
return &pi, nil
}
// GetIDsFromProxyInfo is a helper function to retrieve list of IDs from Proxy.
func GetIDsFromProxyInfo(kubeClient kube.CLIClient, istioNamespace string) ([]string, error) {
var IDs []string
pi, err := GetProxyInfo(kubeClient, istioNamespace)
if err != nil {
return IDs, fmt.Errorf("failed to get proxy infos: %v", err)
}
for _, pi := range *pi {
IDs = append(IDs, pi.ID)
}
return IDs, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ptr
import (
"fmt"
)
// Of returns a pointer to the input. In most cases, callers should just do &t. However, in some cases
// Go cannot take a pointer. For example, `ptr.Of(f())`.
func Of[T any](t T) *T {
return &t
}
// OrEmpty returns *t if its non-nil, or else an empty T
func OrEmpty[T any](t *T) T {
if t != nil {
return *t
}
var empty T
return empty
}
// OrDefault returns *t if its non-nil, or else def.
func OrDefault[T any](t *T, def T) T {
if t != nil {
return *t
}
return def
}
// NonEmptyOrDefault returns t if its non-empty, or else def.
func NonEmptyOrDefault[T comparable](t T, def T) T {
var empty T
if t != empty {
return t
}
return def
}
// Empty returns an empty T type
func Empty[T any]() T {
var empty T
return empty
}
// ToList returns an empty list if t is nil, or a list with a single element
func ToList[T any](t *T) []T {
if t == nil {
return nil
}
return []T{*t}
}
// TypeName returns the name of the type
func TypeName[T any]() string {
var empty T
return fmt.Sprintf("%T", empty)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package queue
import (
"container/heap"
"runtime"
"sync"
"time"
"istio.io/istio/pkg/log"
)
type delayTask struct {
do func() error
runAt time.Time
retries int
}
const maxTaskRetry = 3
var _ heap.Interface = &pq{}
// pq implements an internal priority queue so that tasks with the soonest expiry will be run first.
// Methods on pq are not threadsafe, access should be protected.
// much of this is taken from the example at https://golang.org/pkg/container/heap/
type pq []*delayTask
func (q pq) Len() int {
return len(q)
}
func (q pq) Less(i, j int) bool {
return q[i].runAt.Before(q[j].runAt)
}
func (q *pq) Swap(i, j int) {
(*q)[i], (*q)[j] = (*q)[j], (*q)[i]
}
func (q *pq) Push(x any) {
*q = append(*q, x.(*delayTask))
}
func (q *pq) Pop() any {
old := *q
n := len(old)
c := cap(old)
// Shrink the capacity of task queue.
if n < c/2 && c > 32 {
npq := make(pq, n, c/2)
copy(npq, old)
old = npq
}
if n == 0 {
return nil
}
item := old[n-1]
old[n-1] = nil // avoid memory leak
*q = old[0 : n-1]
return item
}
// Peek is not managed by the container/heap package, so we return the 0th element in the list.
func (q *pq) Peek() any {
if q.Len() < 1 {
return nil
}
return (*q)[0]
}
// Delayed implements queue such that tasks are executed after a specified delay.
type Delayed interface {
baseInstance
PushDelayed(t Task, delay time.Duration)
}
var _ Delayed = &delayQueue{}
// DelayQueueOption configure the behavior of the queue. Must be applied before Run.
type DelayQueueOption func(*delayQueue)
// DelayQueueBuffer sets maximum number of tasks awaiting execution. If this limit is reached, Push and PushDelayed
// will block until there is room.
func DelayQueueBuffer(bufferSize int) DelayQueueOption {
return func(queue *delayQueue) {
if queue.enqueue != nil {
close(queue.enqueue)
}
queue.enqueue = make(chan *delayTask, bufferSize)
}
}
// DelayQueueWorkers sets the number of background worker goroutines await tasks to execute. Effectively the
// maximum number of concurrent tasks.
func DelayQueueWorkers(workers int) DelayQueueOption {
return func(queue *delayQueue) {
queue.workers = workers
}
}
// workerChanBuf determines whether the channel of a worker should be a buffered channel
// to get the best performance.
var workerChanBuf = func() int {
// Use blocking channel if GOMAXPROCS=1.
// This switches context from sender to receiver immediately,
// which results in higher performance.
var n int
if n = runtime.GOMAXPROCS(0); n == 1 {
return 0
}
// Make channel non-blocking and set up its capacity with GOMAXPROCS if GOMAXPROCS>1,
// otherwise the sender might be dragged down if the receiver is CPU-bound.
//
// GOMAXPROCS determines how many goroutines can run in parallel,
// which makes it the best choice as the channel capacity,
return n
}()
// NewDelayed gives a Delayed queue with maximum concurrency specified by workers.
func NewDelayed(opts ...DelayQueueOption) Delayed {
q := &delayQueue{
workers: 1,
queue: &pq{},
execute: make(chan *delayTask, workerChanBuf),
enqueue: make(chan *delayTask, 100),
}
for _, o := range opts {
o(q)
}
return q
}
type delayQueue struct {
workers int
workerStopped []chan struct{}
// incoming
enqueue chan *delayTask
// outgoing
execute chan *delayTask
mu sync.Mutex
queue *pq
}
// Push will execute the task as soon as possible
func (d *delayQueue) Push(task Task) {
d.pushInternal(&delayTask{do: task, runAt: time.Now()})
}
// PushDelayed will execute the task after waiting for the delay
func (d *delayQueue) PushDelayed(t Task, delay time.Duration) {
task := &delayTask{do: t, runAt: time.Now().Add(delay)}
d.pushInternal(task)
}
// pushInternal will enqueue the delayTask with retries.
func (d *delayQueue) pushInternal(task *delayTask) {
select {
case d.enqueue <- task:
// buffer has room to enqueue
default:
// TODO warn and resize buffer
// if the buffer is full, we take the more expensive route of locking and pushing directly to the heap
d.mu.Lock()
heap.Push(d.queue, task)
d.mu.Unlock()
}
}
func (d *delayQueue) Closed() <-chan struct{} {
done := make(chan struct{})
go func() {
for _, ch := range d.workerStopped {
<-ch
}
close(done)
}()
return done
}
func (d *delayQueue) Run(stop <-chan struct{}) {
for i := 0; i < d.workers; i++ {
d.workerStopped = append(d.workerStopped, d.work(stop))
}
push := func(t *delayTask) bool {
select {
case d.execute <- t:
return true
case <-stop:
return false
}
}
for {
var task *delayTask
d.mu.Lock()
if head := d.queue.Peek(); head != nil {
task = head.(*delayTask)
heap.Pop(d.queue)
}
d.mu.Unlock()
if task != nil {
delay := time.Until(task.runAt)
if delay <= 0 {
// execute now and continue processing incoming enqueues/tasks
if !push(task) {
return
}
} else {
// not ready yet, don't block enqueueing
await := time.NewTimer(delay)
select {
case t := <-d.enqueue:
d.mu.Lock()
heap.Push(d.queue, t)
// put the old "head" back on the queue, it may be scheduled to execute after the one
// that was just pushed
heap.Push(d.queue, task)
d.mu.Unlock()
case <-await.C:
if !push(task) {
return
}
case <-stop:
await.Stop()
return
}
await.Stop()
}
} else {
// no items, wait for Push or stop
select {
case t := <-d.enqueue:
d.mu.Lock()
d.queue.Push(t)
d.mu.Unlock()
case <-stop:
return
}
}
}
}
// work takes a channel that signals to stop, and returns a channel that signals the worker has fully stopped
func (d *delayQueue) work(stop <-chan struct{}) (stopped chan struct{}) {
stopped = make(chan struct{})
go func() {
defer close(stopped)
for {
select {
case t := <-d.execute:
if err := t.do(); err != nil {
if t.retries < maxTaskRetry {
t.retries++
log.Warnf("Work item handle failed: %v %d times, retry it", err, t.retries)
d.pushInternal(t)
continue
}
log.Errorf("Work item handle failed: %v, reaching the maximum retry times: %d, drop it", err, maxTaskRetry)
}
case <-stop:
return
}
}
}()
return
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package queue
import (
"sync"
"time"
"go.uber.org/atomic"
"k8s.io/apimachinery/pkg/util/rand"
"istio.io/istio/pkg/log"
)
// Task to be performed.
type Task func() error
type queueTask struct {
task Task
enqueueTime time.Time
startTime time.Time
}
// Instance of work tickets processed using a rate-limiting loop
type baseInstance interface {
// Push a task.
Push(task Task)
// Run the loop until a signal on the channel
Run(<-chan struct{})
// Closed returns a chan that will be signaled when the Instance has stopped processing tasks.
Closed() <-chan struct{}
}
type Instance interface {
baseInstance
// HasSynced returns true once the queue has synced.
// Syncing indicates that all items in the queue *before* Run was called have been processed.
HasSynced() bool
}
type queueImpl struct {
delay time.Duration
tasks []*queueTask
cond *sync.Cond
closing bool
closed chan struct{}
closeOnce *sync.Once
// initialSync indicates the queue has initially "synced".
initialSync *atomic.Bool
id string
metrics *queueMetrics
}
// NewQueue instantiates a queue with a processing function
func NewQueue(errorDelay time.Duration) Instance {
return NewQueueWithID(errorDelay, rand.String(10))
}
func NewQueueWithID(errorDelay time.Duration, name string) Instance {
return &queueImpl{
delay: errorDelay,
tasks: make([]*queueTask, 0),
closing: false,
closed: make(chan struct{}),
closeOnce: &sync.Once{},
initialSync: atomic.NewBool(false),
cond: sync.NewCond(&sync.Mutex{}),
id: name,
metrics: newQueueMetrics(name),
}
}
func (q *queueImpl) Push(item Task) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if !q.closing {
q.tasks = append(q.tasks, &queueTask{task: item, enqueueTime: time.Now()})
q.metrics.depth.RecordInt(int64(len(q.tasks)))
}
q.cond.Signal()
}
func (q *queueImpl) Closed() <-chan struct{} {
return q.closed
}
// get blocks until it can return a task to be processed. If shutdown = true,
// the processing go routine should stop.
func (q *queueImpl) get() (task *queueTask, shutdown bool) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
// wait for closing to be set, or a task to be pushed
for !q.closing && len(q.tasks) == 0 {
q.cond.Wait()
}
if q.closing && len(q.tasks) == 0 {
// We must be shutting down.
return nil, true
}
task = q.tasks[0]
// Slicing will not free the underlying elements of the array, so explicitly clear them out here
q.tasks[0] = nil
q.tasks = q.tasks[1:]
task.startTime = time.Now()
q.metrics.depth.RecordInt(int64(len(q.tasks)))
q.metrics.latency.Record(time.Since(task.enqueueTime).Seconds())
return task, false
}
func (q *queueImpl) processNextItem() bool {
// Wait until there is a new item in the queue
task, shuttingdown := q.get()
if shuttingdown {
return false
}
// Run the task.
if err := task.task(); err != nil {
delay := q.delay
log.Infof("Work item handle failed (%v), retry after delay %v", err, delay)
time.AfterFunc(delay, func() {
q.Push(task.task)
})
}
q.metrics.workDuration.Record(time.Since(task.startTime).Seconds())
return true
}
func (q *queueImpl) HasSynced() bool {
return q.initialSync.Load()
}
func (q *queueImpl) Run(stop <-chan struct{}) {
log.Debugf("started queue %s", q.id)
defer func() {
q.closeOnce.Do(func() {
log.Debugf("closed queue %s", q.id)
close(q.closed)
})
}()
go func() {
<-stop
q.cond.L.Lock()
q.cond.Signal()
q.closing = true
q.cond.L.Unlock()
}()
q.Push(func() error {
q.initialSync.Store(true)
return nil
})
for q.processNextItem() {
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package queue
import (
"time"
"k8s.io/utils/clock"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/monitoring"
)
var (
queueIDTag = monitoring.CreateLabel("queueID")
enableMetric = monitoring.WithEnabled(func() bool {
return features.EnableControllerQueueMetrics
})
depth = monitoring.NewGauge("pilot_worker_queue_depth", "Depth of the controller queues", enableMetric)
latency = monitoring.NewDistribution("pilot_worker_queue_latency",
"Latency before the item is processed", []float64{.01, .1, .2, .5, 1, 3, 5}, enableMetric)
workDuration = monitoring.NewDistribution("pilot_worker_queue_duration",
"Time taken to process an item", []float64{.01, .1, .2, .5, 1, 3, 5}, enableMetric)
)
type queueMetrics struct {
depth monitoring.Metric
latency monitoring.Metric
workDuration monitoring.Metric
id string
clock clock.WithTicker
}
// Gets the time since the specified start in seconds.
func (m *queueMetrics) sinceInSeconds(start time.Time) float64 {
return m.clock.Since(start).Seconds()
}
func newQueueMetrics(id string) *queueMetrics {
return &queueMetrics{
id: id,
depth: depth.With(queueIDTag.Value(id)),
workDuration: workDuration.With(queueIDTag.Value(id)),
latency: latency.With(queueIDTag.Value(id)),
clock: clock.RealClock{},
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package queue
import (
"fmt"
"time"
)
// WaitForClose blocks until the Instance has stopped processing tasks or the timeout expires.
// If the timeout is zero, it will wait until the queue is done processing.
// WaitForClose an error if the timeout expires.
func WaitForClose(q Instance, timeout time.Duration) error {
closed := q.Closed()
if timeout == 0 {
<-closed
return nil
}
timer := time.NewTimer(timeout)
defer timer.Stop()
select {
case <-closed:
return nil
case <-timer.C:
return fmt.Errorf("timeout waiting for queue to close after %v", timeout)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package revisions
import (
"sync"
admitv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/types"
"istio.io/api/label"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
)
const (
defaultTagWebhookName = "istio-revision-tag-default"
)
// DefaultWatcher keeps track of the current default revision and can notify watchers
// when the default revision changes.
type DefaultWatcher interface {
Run(stopCh <-chan struct{})
HasSynced() bool
GetDefault() string
AddHandler(handler DefaultHandler)
}
// DefaultHandler is a callback for when the default revision changes.
type DefaultHandler func(string)
type defaultWatcher struct {
revision string
defaultRevision string
handlers []DefaultHandler
queue controllers.Queue
webhooks kclient.Client[*admitv1.MutatingWebhookConfiguration]
mu sync.RWMutex
}
func NewDefaultWatcher(client kube.Client, revision string) DefaultWatcher {
p := &defaultWatcher{
revision: revision,
mu: sync.RWMutex{},
}
p.queue = controllers.NewQueue("default revision", controllers.WithReconciler(p.setDefault))
p.webhooks = kclient.New[*admitv1.MutatingWebhookConfiguration](client)
p.webhooks.AddEventHandler(controllers.FilteredObjectHandler(p.queue.AddObject, isDefaultTagWebhook))
return p
}
func (p *defaultWatcher) Run(stopCh <-chan struct{}) {
kube.WaitForCacheSync("default revision", stopCh, p.webhooks.HasSynced)
p.queue.Run(stopCh)
}
// GetDefault returns the current default revision.
func (p *defaultWatcher) GetDefault() string {
p.mu.RLock()
defer p.mu.RUnlock()
return p.defaultRevision
}
// AddHandler registers a new handler for updates to default revision changes.
func (p *defaultWatcher) AddHandler(handler DefaultHandler) {
p.mu.Lock()
defer p.mu.Unlock()
p.handlers = append(p.handlers, handler)
}
func (p *defaultWatcher) HasSynced() bool {
return p.queue.HasSynced()
}
// notifyHandlers notifies all registered handlers on default revision change.
// assumes externally locked.
func (p *defaultWatcher) notifyHandlers() {
for _, handler := range p.handlers {
handler(p.defaultRevision)
}
}
func (p *defaultWatcher) setDefault(key types.NamespacedName) error {
revision := ""
wh := p.webhooks.Get(key.Name, "")
if wh != nil {
revision = wh.GetLabels()[label.IoIstioRev.Name]
}
p.mu.Lock()
defer p.mu.Unlock()
p.defaultRevision = revision
p.notifyHandlers()
return nil
}
func isDefaultTagWebhook(obj controllers.Object) bool {
return obj.GetName() == defaultTagWebhookName
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package revisions
import (
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
"k8s.io/apimachinery/pkg/types"
"istio.io/api/label"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/kube/kubetypes"
"istio.io/istio/pkg/util/sets"
)
// TagWatcher keeps track of the current tags and can notify watchers
// when the tags change.
type TagWatcher interface {
Run(stopCh <-chan struct{})
HasSynced() bool
AddHandler(handler TagHandler)
GetMyTags() sets.String
}
// TagHandler is a callback for when the tags revision change.
type TagHandler func(sets.String)
type tagWatcher struct {
revision string
handlers []TagHandler
queue controllers.Queue
webhooks kclient.Client[*admissionregistrationv1.MutatingWebhookConfiguration]
index *kclient.Index[string, *admissionregistrationv1.MutatingWebhookConfiguration]
}
func NewTagWatcher(client kube.Client, revision string) TagWatcher {
p := &tagWatcher{
revision: revision,
}
p.queue = controllers.NewQueue("tag", controllers.WithReconciler(func(key types.NamespacedName) error {
p.notifyHandlers()
return nil
}))
p.webhooks = kclient.NewFiltered[*admissionregistrationv1.MutatingWebhookConfiguration](client, kubetypes.Filter{
ObjectFilter: isTagWebhook,
})
p.index = kclient.CreateIndexWithDelegate[string, *admissionregistrationv1.MutatingWebhookConfiguration](p.webhooks,
func(o *admissionregistrationv1.MutatingWebhookConfiguration) []string {
rev := o.GetLabels()[label.IoIstioRev.Name]
if rev == "" {
return nil
}
return []string{rev}
}, controllers.ObjectHandler(p.queue.AddObject))
return p
}
func (p *tagWatcher) Run(stopCh <-chan struct{}) {
if !kube.WaitForCacheSync("tag watcher", stopCh, p.webhooks.HasSynced) {
return
}
// Notify handlers of initial state
p.notifyHandlers()
p.queue.Run(stopCh)
}
// AddHandler registers a new handler for updates to tag changes.
func (p *tagWatcher) AddHandler(handler TagHandler) {
p.handlers = append(p.handlers, handler)
}
func (p *tagWatcher) HasSynced() bool {
return p.queue.HasSynced()
}
func (p *tagWatcher) GetMyTags() sets.String {
res := sets.New(p.revision)
for _, wh := range p.index.Lookup(p.revision) {
res.Insert(wh.GetLabels()[IstioTagLabel])
}
return res
}
// notifyHandlers notifies all registered handlers on tag change.
func (p *tagWatcher) notifyHandlers() {
myTags := p.GetMyTags()
for _, handler := range p.handlers {
handler(myTags)
}
}
func isTagWebhook(uobj any) bool {
obj, ok := uobj.(controllers.Object)
if !ok {
return false
}
_, ok = obj.GetLabels()[IstioTagLabel]
return ok
}
const IstioTagLabel = "istio.io/tag"
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package security
import (
"context"
"errors"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/env"
)
var AuthPlaintext = env.Register("XDS_AUTH_PLAINTEXT", false,
"authenticate plain text requests - used if Istiod is running on a secure/trusted network").Get()
// Authenticate authenticates the ADS request using the configured authenticators.
// Returns the validated principals or an error.
// If no authenticators are configured, or if the request is on a non-secure
// stream ( 15010 ) - returns amn empty caller and no errors.
func Authenticate(ctx context.Context, authenticators []Authenticator) (*Caller, error) {
if !features.XDSAuth {
return nil, nil
}
// authenticate - currently just checks that request has a certificate signed with the our key.
// Protected by flag to avoid breaking upgrades - should be enabled in multi-cluster/meshexpansion where
// XDS is exposed.
peerInfo, ok := peer.FromContext(ctx)
if !ok {
return nil, errors.New("invalid context")
}
// Not a TLS connection, we will not perform authentication
// TODO: add a flag to prevent unauthenticated requests ( 15010 )
// request not over TLS on the insecure port
if _, ok := peerInfo.AuthInfo.(credentials.TLSInfo); !ok && !AuthPlaintext {
return nil, nil
}
am := authenticationManager{
Authenticators: authenticators,
}
if u := am.authenticate(ctx); u != nil {
return u, nil
}
securityLog.Errorf("Failed to authenticate client from %s: %s", peerInfo.Addr.String(), am.FailedMessages())
return nil, errors.New("authentication failure")
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package security
import (
"context"
"errors"
"fmt"
"net/http"
"sync"
"go.uber.org/atomic"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/spiffe"
"istio.io/istio/pkg/util/sets"
"istio.io/istio/security/pkg/pki/util"
)
type DirectSecretManager struct {
items map[string]*SecretItem
mu sync.RWMutex
}
var _ SecretManager = &DirectSecretManager{}
func NewDirectSecretManager() *DirectSecretManager {
return &DirectSecretManager{
items: map[string]*SecretItem{},
}
}
func (d *DirectSecretManager) GenerateSecret(resourceName string) (*SecretItem, error) {
d.mu.RLock()
defer d.mu.RUnlock()
si, f := d.items[resourceName]
if !f {
return nil, fmt.Errorf("resource %v not found", resourceName)
}
return si, nil
}
func (d *DirectSecretManager) Set(resourceName string, secret *SecretItem) {
d.mu.Lock()
defer d.mu.Unlock()
if secret == nil {
delete(d.items, resourceName)
} else {
d.items[resourceName] = secret
}
}
type FakeAuthenticator struct {
AllowedToken string
AllowedCert string
Name string
Successes *atomic.Int32
Failures *atomic.Int32
mu sync.Mutex
}
func NewFakeAuthenticator(name string) *FakeAuthenticator {
return &FakeAuthenticator{
Name: name,
Successes: atomic.NewInt32(0),
Failures: atomic.NewInt32(0),
}
}
func (f *FakeAuthenticator) Authenticate(authCtx AuthContext) (*Caller, error) {
if authCtx.GrpcContext != nil {
return f.authenticateGrpc(authCtx.GrpcContext)
}
if authCtx.Request != nil {
return f.authenticateHTTP(authCtx.Request)
}
return nil, nil
}
func (f *FakeAuthenticator) authenticateHTTP(req *http.Request) (*Caller, error) {
return nil, errors.New("not implemented")
}
func (f *FakeAuthenticator) authenticateGrpc(ctx context.Context) (*Caller, error) {
f.mu.Lock()
at := f.AllowedToken
ac := f.AllowedCert
f.mu.Unlock()
token := checkToken(ctx, at)
cert := checkCert(ctx, ac)
id := []string{spiffe.Identity{
TrustDomain: "cluster.local",
Namespace: "fake-namespace",
ServiceAccount: "fake-sa",
}.String()}
log.WithLabels("name", f.Name, "cert", cert, "token", token).Infof("authentication complete")
if cert == nil {
f.Successes.Inc()
return &Caller{
AuthSource: AuthSourceClientCertificate,
Identities: id,
}, nil
}
if token == nil {
f.Successes.Inc()
return &Caller{
AuthSource: AuthSourceIDToken,
Identities: id,
}, nil
}
f.Failures.Inc()
return nil, fmt.Errorf("neither token (%v) nor cert (%v) succeeded", token, cert)
}
func (f *FakeAuthenticator) AuthenticatorType() string {
return "fake"
}
func (f *FakeAuthenticator) Set(token string, identity string) *FakeAuthenticator {
f.mu.Lock()
defer f.mu.Unlock()
f.AllowedToken = token
f.AllowedCert = identity
return f
}
var _ Authenticator = &FakeAuthenticator{}
func checkToken(ctx context.Context, expected string) error {
if expected == "" {
return fmt.Errorf("jwt authentication not allowed")
}
targetJWT, err := ExtractBearerToken(ctx)
if err != nil {
return fmt.Errorf("target JWT extraction error: %v", err)
}
if targetJWT != expected {
return fmt.Errorf("expected token %q got %q", expected, targetJWT)
}
return nil
}
func checkCert(ctx context.Context, expected string) error {
if expected == "" {
return fmt.Errorf("cert authentication not allowed")
}
p, ok := peer.FromContext(ctx)
if !ok || p.AuthInfo == nil {
return fmt.Errorf("no client certificate is presented")
}
if authType := p.AuthInfo.AuthType(); authType != "tls" {
return fmt.Errorf("unsupported auth type: %q", authType)
}
tlsInfo := p.AuthInfo.(credentials.TLSInfo)
chains := tlsInfo.State.VerifiedChains
if len(chains) == 0 || len(chains[0]) == 0 {
return fmt.Errorf("no verified chain is found")
}
ids, err := util.ExtractIDs(chains[0][0].Extensions)
if err != nil {
return fmt.Errorf("failed to extract IDs")
}
if !sets.New(ids...).Contains(expected) {
return fmt.Errorf("expected identity %q, got %v", expected, ids)
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package security
import (
"time"
retry "github.com/grpc-ecosystem/go-grpc-middleware/retry"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"istio.io/istio/pkg/log"
"istio.io/istio/security/pkg/monitoring"
)
var caLog = log.RegisterScope("ca", "ca client")
// CARetryOptions returns the default retry options recommended for CA calls
// This includes 5 retries, with backoff from 100ms -> 1.6s with jitter.
var CARetryOptions = []retry.CallOption{
retry.WithMax(5),
retry.WithBackoff(wrapBackoffWithMetrics(retry.BackoffExponentialWithJitter(100*time.Millisecond, 0.1))),
retry.WithCodes(codes.Canceled, codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted, codes.Internal, codes.Unavailable),
}
// CARetryInterceptor is a grpc UnaryInterceptor that adds retry options, as a convenience wrapper
// around CARetryOptions. If needed to chain with other interceptors, the CARetryOptions can be used
// directly.
func CARetryInterceptor() grpc.DialOption {
return grpc.WithUnaryInterceptor(retry.UnaryClientInterceptor(CARetryOptions...))
}
// grpcretry has no hooks to trigger logic on failure (https://github.com/grpc-ecosystem/go-grpc-middleware/issues/375)
// Instead, we can wrap the backoff hook to log/increment metrics before returning the backoff result.
func wrapBackoffWithMetrics(bf retry.BackoffFunc) retry.BackoffFunc {
return func(attempt uint) time.Duration {
wait := bf(attempt)
caLog.Warnf("ca request failed, starting attempt %d in %v", attempt, wait)
monitoring.NumOutgoingRetries.With(monitoring.RequestType.Value(monitoring.CSR)).Increment()
return wait
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package security
import (
"context"
"fmt"
"net/http"
"os"
"strings"
"time"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"istio.io/istio/pkg/env"
istiolog "istio.io/istio/pkg/log"
)
var securityLog = istiolog.RegisterScope("security", "security debugging")
const (
// etc/certs files are used with external CA managing the certs,
// i.e. mounted Secret or external plugin.
// If present, FileMountedCerts should be true.
// DefaultCertChainFilePath is the well-known path for an existing certificate chain file
DefaultCertChainFilePath = "./etc/certs/cert-chain.pem"
// DefaultKeyFilePath is the well-known path for an existing key file
DefaultKeyFilePath = "./etc/certs/key.pem"
// DefaultRootCertFilePath is the well-known path for an existing root certificate file
DefaultRootCertFilePath = "./etc/certs/root-cert.pem"
// WorkloadIdentitySocketPath is the well-known path to the Unix Domain Socket for SDS.
WorkloadIdentitySocketPath = "./var/run/secrets/workload-spiffe-uds/socket"
// CredentialNameSocketPath is the well-known path to the Unix Domain Socket for Credential Name.
CredentialNameSocketPath = "./var/run/secrets/credential-uds/socket"
// CredentialMetaDataName is the name in node meta data.
CredentialMetaDataName = "credential"
// SDSExternalClusterName is the name of the cluster for external SDS connections which is defined via CredentialNameSocketPath
SDSExternalClusterName = "sds-external"
// SDSExternalCredentialPrefix is the prefix for the credentialName which will utilize external SDS connections defined via CredentialNameSocketPath
SDSExternalCredentialPrefix = "sds://"
// WorkloadIdentityCredentialsPath is the well-known path to a folder with workload certificate files.
WorkloadIdentityCredentialsPath = "./var/run/secrets/workload-spiffe-credentials"
// WorkloadIdentityCertChainPath is the well-known path to a workload certificate chain file.
WorkloadIdentityCertChainPath = WorkloadIdentityCredentialsPath + "/cert-chain.pem"
// WorkloadIdentityKeyPath is the well-known path to a workload key file.
WorkloadIdentityKeyPath = WorkloadIdentityCredentialsPath + "/key.pem"
// WorkloadIdentityRootCertPath is the well-known path to a workload root certificate file.
WorkloadIdentityRootCertPath = WorkloadIdentityCredentialsPath + "/root-cert.pem"
// GkeWorkloadCertChainFilePath is the well-known path for the GKE workload certificate chain file.
// Quoted from https://cloud.google.com/traffic-director/docs/security-proxyless-setup#create-service:
// "On creation, each Pod gets a volume at /var/run/secrets/workload-spiffe-credentials."
GkeWorkloadCertChainFilePath = WorkloadIdentityCredentialsPath + "/certificates.pem"
// GkeWorkloadKeyFilePath is the well-known path for the GKE workload certificate key file
GkeWorkloadKeyFilePath = WorkloadIdentityCredentialsPath + "/private_key.pem"
// GkeWorkloadRootCertFilePath is the well-known path for the GKE workload root certificate file
GkeWorkloadRootCertFilePath = WorkloadIdentityCredentialsPath + "/ca_certificates.pem"
// SystemRootCerts is special case input for root cert configuration to use system root certificates.
SystemRootCerts = "SYSTEM"
// RootCertReqResourceName is resource name of discovery request for root certificate.
RootCertReqResourceName = "ROOTCA"
// WorkloadKeyCertResourceName is the resource name of the discovery request for workload
// identity.
WorkloadKeyCertResourceName = "default"
// GCE is Credential fetcher type of Google plugin
GCE = "GoogleComputeEngine"
// JWT is a Credential fetcher type that reads from a JWT token file
JWT = "JWT"
// Mock is Credential fetcher type of mock plugin
Mock = "Mock" // testing only
// GoogleCAProvider uses the Google CA for workload certificate signing
GoogleCAProvider = "GoogleCA"
// GoogleCASProvider uses the Google certificate Authority Service to sign workload certificates
GoogleCASProvider = "GoogleCAS"
// GkeWorkloadCertificateProvider uses the GKE workload certificates
GkeWorkloadCertificateProvider = "GkeWorkloadCertificate"
// FileRootSystemCACert is a unique resource name signaling that the system CA certificate should be used
FileRootSystemCACert = "file-root:system"
)
// TODO: For 1.8, make sure MeshConfig is updated with those settings,
// they should be dynamic to allow migrations without restart.
// Both are critical.
var (
// Require3PToken disables the use of K8S 1P tokens. Note that 1P tokens can be used to request
// 3P TOKENS. A 1P token is the token automatically mounted by Kubelet and used for authentication with
// the Apiserver.
Require3PToken = env.Register("REQUIRE_3P_TOKEN", false,
"Reject k8s default tokens, without audience. If false, default K8S token will be accepted")
// TokenAudiences specifies a list of audiences for SDS trustworthy JWT. This is to make sure that the CSR requests
// contain the JWTs intended for Citadel.
TokenAudiences = strings.Split(env.Register("TOKEN_AUDIENCES", "istio-ca",
"A list of comma separated audiences to check in the JWT token before issuing a certificate. "+
"The token is accepted if it matches with one of the audiences").Get(), ",")
)
const (
BearerTokenPrefix = "Bearer "
K8sTokenPrefix = "Istio "
// CertSigner info
CertSigner = "CertSigner"
// ImpersonatedIdentity declares the identity we are requesting a certificate on behalf of.
// This is constrained to only allow identities in CATrustedNodeAccounts, and only to impersonate identities
// on their node.
ImpersonatedIdentity = "ImpersonatedIdentity"
)
type ImpersonatedIdentityContextKey struct{}
// Options provides all of the configuration parameters for secret discovery service
// and CA configuration. Used in both Istiod and Agent.
// TODO: ProxyConfig should have most of those, and be passed to all components
// (as source of truth)
type Options struct {
// CAEndpoint is the CA endpoint to which node agent sends CSR request.
CAEndpoint string
// CAEndpointSAN overrides the ServerName extracted from CAEndpoint.
CAEndpointSAN string
// The CA provider name.
CAProviderName string
// TrustDomain corresponds to the trust root of a system.
// https://github.com/spiffe/spiffe/blob/master/standards/SPIFFE-ID.md#21-trust-domain
TrustDomain string
// WorkloadRSAKeySize is the size of a private key for a workload certificate.
WorkloadRSAKeySize int
// Whether to generate PKCS#8 private keys.
Pkcs8Keys bool
// OutputKeyCertToDir is the directory for output the key and certificate
OutputKeyCertToDir string
// ProvCert is the directory for client to provide the key and certificate to CA server when authenticating
// with mTLS. This is not used for workload mTLS communication, and is
ProvCert string
// ClusterID is the cluster where the agent resides.
// Normally initialized from ISTIO_META_CLUSTER_ID - after a tortuous journey it
// makes its way into the ClusterID metadata of Citadel gRPC request to create the cert.
// Didn't find much doc - but I suspect used for 'central cluster' use cases - so should
// match the cluster name set in the MC setup.
ClusterID string
// The type of Elliptical Signature algorithm to use
// when generating private keys. Currently only ECDSA is supported.
ECCSigAlg string
// The type of curve to use when generating private keys with ECC. Currently only ECDSA is supported.
ECCCurve string
// FileMountedCerts indicates whether the proxy is using file
// mounted certs created by a foreign CA. Refresh is managed by the external
// CA, by updating the Secret or VM file. We will watch the file for changes
// or check before the cert expires. This assumes the certs are in the
// well-known ./etc/certs location.
FileMountedCerts bool
// PilotCertProvider is the provider of the Pilot certificate (PILOT_CERT_PROVIDER env)
// Determines the root CA file to use for connecting to CA gRPC:
// - istiod
// - kubernetes
// - custom
// - none
PilotCertProvider string
// secret TTL.
SecretTTL time.Duration
// The ratio of cert lifetime to refresh a cert. For example, at 0.10 and 1 hour TTL,
// we would refresh 6 minutes before expiration.
SecretRotationGracePeriodRatio float64
// STS port
STSPort int
// authentication provider specific plugins, will exchange the token
// For example exchange long lived refresh with access tokens.
// Used by the secret fetcher when signing CSRs.
// Optional; if not present the token will be used directly
TokenExchanger TokenExchanger
// credential fetcher.
CredFetcher CredFetcher
// credential identity provider
CredIdentityProvider string
// Namespace corresponding to workload
WorkloadNamespace string
// Name of the Service Account
ServiceAccount string
// XDS auth provider
XdsAuthProvider string
// Token manager for the token exchange of XDS
TokenManager TokenManager
// Cert signer info
CertSigner string
// Delay in reading certificates from file after the change is detected. This is useful in cases
// where the write operation of key and cert take longer.
FileDebounceDuration time.Duration
// Root Cert read from the OS
CARootPath string
// The path for an existing certificate chain file
CertChainFilePath string
// The path for an existing key file
KeyFilePath string
// The path for an existing root certificate bundle
RootCertFilePath string
}
// TokenManager contains methods for generating token.
type TokenManager interface {
// GenerateToken takes STS request parameters and generates token. Returns
// StsResponseParameters in JSON.
GenerateToken(parameters StsRequestParameters) ([]byte, error)
// DumpTokenStatus dumps status of all generated tokens and returns status in JSON.
DumpTokenStatus() ([]byte, error)
// GetMetadata returns the metadata headers related to the token
GetMetadata(forCA bool, xdsAuthProvider, token string) (map[string]string, error)
}
// StsRequestParameters stores all STS request attributes defined in
// https://tools.ietf.org/html/draft-ietf-oauth-token-exchange-16#section-2.1
type StsRequestParameters struct {
// REQUIRED. The value "urn:ietf:params:oauth:grant-type:token- exchange"
// indicates that a token exchange is being performed.
GrantType string
// OPTIONAL. Indicates the location of the target service or resource where
// the client intends to use the requested security token.
Resource string
// OPTIONAL. The logical name of the target service where the client intends
// to use the requested security token.
Audience string
// OPTIONAL. A list of space-delimited, case-sensitive strings, that allow
// the client to specify the desired Scope of the requested security token in the
// context of the service or Resource where the token will be used.
Scope string
// OPTIONAL. An identifier, for the type of the requested security token.
RequestedTokenType string
// REQUIRED. A security token that represents the identity of the party on
// behalf of whom the request is being made.
SubjectToken string
// REQUIRED. An identifier, that indicates the type of the security token in
// the "subject_token" parameter.
SubjectTokenType string
// OPTIONAL. A security token that represents the identity of the acting party.
ActorToken string
// An identifier, that indicates the type of the security token in the
// "actor_token" parameter.
ActorTokenType string
}
// Client interface defines the clients need to implement to talk to CA for CSR.
// The Agent will create a key pair and a CSR, and use an implementation of this
// interface to get back a signed certificate. There is no guarantee that the SAN
// in the request will be returned - server may replace it.
type Client interface {
CSRSign(csrPEM []byte, certValidTTLInSec int64) ([]string, error)
Close()
// Retrieve CA root certs If CA publishes API endpoint for this
GetRootCertBundle() ([]string, error)
}
// SecretManager defines secrets management interface which is used by SDS.
type SecretManager interface {
// GenerateSecret generates new secret for the given resource.
//
// The current implementation also watched the generated secret and trigger a callback when it is
// near expiry. It will constructs the SAN based on the token's 'sub' claim, expected to be in
// the K8S format. No other JWTs are currently supported due to client logic. If JWT is
// missing/invalid, the resourceName is used.
GenerateSecret(resourceName string) (*SecretItem, error)
}
// TokenExchanger provides common interfaces so that authentication providers could choose to implement their specific logic.
type TokenExchanger interface {
// ExchangeToken provides a common interface to exchange an existing token for a new one.
ExchangeToken(serviceAccountToken string) (string, error)
}
// SecretItem is the cached item in in-memory secret store.
type SecretItem struct {
CertificateChain []byte
PrivateKey []byte
RootCert []byte
// ResourceName passed from envoy SDS discovery request.
// "ROOTCA" for root cert request, "default" for key/cert request.
ResourceName string
CreatedTime time.Time
ExpireTime time.Time
}
type CredFetcher interface {
// GetPlatformCredential fetches workload credential provided by the platform.
GetPlatformCredential() (string, error)
// GetIdentityProvider returns the name of the IdentityProvider that can authenticate the workload credential.
GetIdentityProvider() string
// Stop releases resources and cleans up.
Stop()
}
// AuthSource represents where authentication result is derived from.
type AuthSource int
const (
AuthSourceClientCertificate AuthSource = iota
AuthSourceIDToken
)
const (
authorizationMeta = "authorization"
)
type AuthContext struct {
// grpc context
GrpcContext context.Context
// http request
Request *http.Request
}
// RemoteAddress returns the authenticated remote address from AuthContext.
func (ac *AuthContext) RemoteAddress() string {
if ac.GrpcContext != nil {
return GetConnectionAddress(ac.GrpcContext)
} else if ac.Request != nil {
return ac.Request.RemoteAddr
}
return ""
}
// Header returns the authenticated remote address from AuthContext.
func (ac *AuthContext) Header(header string) []string {
if ac.GrpcContext != nil {
if meta, ok := metadata.FromIncomingContext(ac.GrpcContext); ok {
return meta.Get(header)
}
} else if ac.Request != nil {
return ac.Request.Header.Values(header)
}
return nil
}
// Caller carries the identity and authentication source of a caller.
type Caller struct {
AuthSource AuthSource
Identities []string
KubernetesInfo KubernetesInfo
}
// KubernetesInfo defines Kubernetes specific information extracted from the caller.
// This involves additional metadata about the caller beyond just its SPIFFE identity.
type KubernetesInfo struct {
PodName string
PodNamespace string
PodUID string
PodServiceAccount string
}
func (k KubernetesInfo) String() string {
return fmt.Sprintf("Pod{Name: %s, Namespace: %s, UID: %s, ServiceAccount: %s}", k.PodName, k.PodNamespace, k.PodUID, k.PodServiceAccount)
}
// Authenticator determines the caller identity based on request context.
type Authenticator interface {
Authenticate(ctx AuthContext) (*Caller, error)
AuthenticatorType() string
}
// authenticationManager orchestrates all authenticators to perform authentication.
type authenticationManager struct {
Authenticators []Authenticator
// authFailMsgs contains list of messages that authenticator wants to record - mainly used for logging.
authFailMsgs []string
}
// Authenticate loops through all the configured Authenticators and returns if one of the authenticator succeeds.
func (am *authenticationManager) authenticate(ctx context.Context) *Caller {
req := AuthContext{GrpcContext: ctx}
for _, authn := range am.Authenticators {
u, err := authn.Authenticate(req)
if u != nil && len(u.Identities) > 0 && err == nil {
securityLog.Debugf("Authentication successful through auth source %v", u.AuthSource)
return u
}
am.authFailMsgs = append(am.authFailMsgs, fmt.Sprintf("Authenticator %s: %v", authn.AuthenticatorType(), err))
}
return nil
}
func GetConnectionAddress(ctx context.Context) string {
peerInfo, ok := peer.FromContext(ctx)
peerAddr := "unknown"
if ok {
peerAddr = peerInfo.Addr.String()
}
return peerAddr
}
func (am *authenticationManager) FailedMessages() string {
return strings.Join(am.authFailMsgs, "; ")
}
func ExtractBearerToken(ctx context.Context) (string, error) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return "", fmt.Errorf("no metadata is attached")
}
authHeader, exists := md[authorizationMeta]
if !exists {
return "", fmt.Errorf("no HTTP authorization header exists")
}
for _, value := range authHeader {
if strings.HasPrefix(value, BearerTokenPrefix) {
return strings.TrimPrefix(value, BearerTokenPrefix), nil
}
}
return "", fmt.Errorf("no bearer token exists in HTTP authorization header")
}
func ExtractRequestToken(req *http.Request) (string, error) {
value := req.Header.Get(authorizationMeta)
if value == "" {
return "", fmt.Errorf("no HTTP authorization header exists")
}
if strings.HasPrefix(value, BearerTokenPrefix) {
return strings.TrimPrefix(value, BearerTokenPrefix), nil
}
if strings.HasPrefix(value, K8sTokenPrefix) {
return strings.TrimPrefix(value, K8sTokenPrefix), nil
}
return "", fmt.Errorf("no bearer token exists in HTTP authorization header")
}
// GetOSRootFilePath returns the first file path detected from a list of known CA certificate file paths.
// If none of the known CA certificate files are found, a warning in printed and an empty string is returned.
func GetOSRootFilePath() string {
// Get and store the OS CA certificate path for Linux systems
// Source of CA File Paths: https://golang.org/src/crypto/x509/root_linux.go
certFiles := []string{
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc.
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6
"/etc/ssl/ca-bundle.pem", // OpenSUSE
"/etc/pki/tls/cacert.pem", // OpenELEC
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7
"/etc/ssl/cert.pem", // Alpine Linux
"/usr/local/etc/ssl/cert.pem", // FreeBSD
"/etc/ssl/certs/ca-certificates", // Talos Linux
}
for _, cert := range certFiles {
if _, err := os.Stat(cert); err == nil {
istiolog.Debugf("Using OS CA certificate for proxy: %s", cert)
return cert
}
}
istiolog.Warn("OS CA Cert could not be found for agent")
return ""
}
// CheckWorkloadCertificate returns true when the workload certificate
// files are present under the provided paths. Otherwise, return false.
func CheckWorkloadCertificate(certChainFilePath, keyFilePath, rootCertFilePath string) bool {
if _, err := os.Stat(certChainFilePath); err != nil {
return false
}
if _, err := os.Stat(keyFilePath); err != nil {
return false
}
if _, err := os.Stat(rootCertFilePath); err != nil {
return false
}
return true
}
type SdsCertificateConfig struct {
CertificatePath string
PrivateKeyPath string
CaCertificatePath string
}
const (
ResourceSeparator = "~"
)
// GetResourceName converts a SdsCertificateConfig to a string to be used as an SDS resource name
func (s SdsCertificateConfig) GetResourceName() string {
if s.IsKeyCertificate() {
return "file-cert:" + s.CertificatePath + ResourceSeparator + s.PrivateKeyPath // Format: file-cert:%s~%s
}
return ""
}
// GetRootResourceName converts a SdsCertificateConfig to a string to be used as an SDS resource name for the root
func (s SdsCertificateConfig) GetRootResourceName() string {
if s.IsRootCertificate() {
return "file-root:" + s.CaCertificatePath // Format: file-root:%s
}
return ""
}
// IsRootCertificate returns true if this config represents a root certificate config.
func (s SdsCertificateConfig) IsRootCertificate() bool {
return s.CaCertificatePath != ""
}
// IsKeyCertificate returns true if this config represents key certificate config.
func (s SdsCertificateConfig) IsKeyCertificate() bool {
return s.CertificatePath != "" && s.PrivateKeyPath != ""
}
// SdsCertificateConfigFromResourceName converts the provided resource name into a SdsCertificateConfig
// If the resource name is not valid, false is returned.
func SdsCertificateConfigFromResourceName(resource string) (SdsCertificateConfig, bool) {
if strings.HasPrefix(resource, "file-cert:") {
filesString := strings.TrimPrefix(resource, "file-cert:")
split := strings.Split(filesString, ResourceSeparator)
if len(split) != 2 {
return SdsCertificateConfig{}, false
}
return SdsCertificateConfig{split[0], split[1], ""}, true
} else if strings.HasPrefix(resource, "file-root:") {
filesString := strings.TrimPrefix(resource, "file-root:")
split := strings.Split(filesString, ResourceSeparator)
if len(split) != 1 {
return SdsCertificateConfig{}, false
}
return SdsCertificateConfig{"", "", split[0]}, true
}
return SdsCertificateConfig{}, false
}
// SdsCertificateConfigFromResourceNameForOSCACert converts the OS resource name into a SdsCertificateConfig
func SdsCertificateConfigFromResourceNameForOSCACert(resource string) (SdsCertificateConfig, bool) {
if resource == "" {
return SdsCertificateConfig{}, false
}
return SdsCertificateConfig{"", "", resource}, true
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sleep
import (
"context"
"time"
)
// UntilContext sleeps for the given duration, or until the context is complete.
// Returns true if the sleep completes the full duration
func UntilContext(ctx context.Context, d time.Duration) bool {
return Until(ctx.Done(), d)
}
// Until sleeps for the given duration, or until the channel is closed.
// Returns true if the sleep completes the full duration
func Until(ch <-chan struct{}, d time.Duration) bool {
timer := time.NewTimer(d)
select {
case <-ch:
if !timer.Stop() {
<-timer.C
}
return false
case <-timer.C:
return true
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package slices defines various functions useful with slices of any type.
package slices
import (
"cmp"
"slices" // nolint: depguard
"strings"
"golang.org/x/exp/constraints"
)
// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
func Equal[E comparable](s1, s2 []E) bool {
return slices.Equal(s1, s2)
}
// EqualUnordered reports whether two slices are equal, ignoring order
func EqualUnordered[E comparable](s1, s2 []E) bool {
if len(s1) != len(s2) {
return false
}
first := make(map[E]struct{}, len(s1))
for _, c := range s1 {
first[c] = struct{}{}
}
for _, c := range s2 {
if _, f := first[c]; !f {
return false
}
}
return true
}
// EqualFunc reports whether two slices are equal using a comparison
// function on each pair of elements. If the lengths are different,
// EqualFunc returns false. Otherwise, the elements are compared in
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
func EqualFunc[E1, E2 comparable](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool {
return slices.EqualFunc(s1, s2, eq)
}
// SortFunc sorts the slice x in ascending order as determined by the less function.
// This sort is not guaranteed to be stable.
// The slice is modified in place but returned.
func SortFunc[E any](x []E, less func(a, b E) int) []E {
if len(x) <= 1 {
return x
}
slices.SortFunc(x, less)
return x
}
// SortBy is a helper to sort a slice by some value. Typically, this would be sorting a struct
// by a single field. If you need to have multiple fields, see the ExampleSort.
func SortBy[E any, A constraints.Ordered](x []E, extract func(a E) A) []E {
if len(x) <= 1 {
return x
}
SortFunc(x, func(a, b E) int {
return cmp.Compare(extract(a), extract(b))
})
return x
}
// Sort sorts a slice of any ordered type in ascending order.
// The slice is modified in place but returned.
func Sort[E constraints.Ordered](x []E) []E {
if len(x) <= 1 {
return x
}
slices.Sort(x)
return x
}
// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
func Clone[S ~[]E, E any](s S) S {
return slices.Clone(s)
}
// Delete removes the element i from s, returning the modified slice.
func Delete[S ~[]E, E any](s S, i int) S {
// "If those elements contain pointers you might consider zeroing those elements
// so that objects they reference can be garbage collected."
var empty E
s[i] = empty
return slices.Delete(s, i, i+1)
}
// Contains reports whether v is present in s.
func Contains[E comparable](s []E, v E) bool {
return slices.Contains(s, v)
}
// FindFunc finds the first element matching the function, or nil if none do
func FindFunc[E any](s []E, f func(E) bool) *E {
idx := slices.IndexFunc(s, f)
if idx == -1 {
return nil
}
return &s[idx]
}
// Reverse returns its argument array reversed
func Reverse[E any](r []E) []E {
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
r[i], r[j] = r[j], r[i]
}
return r
}
// FilterInPlace retains all elements in []E that f(E) returns true for.
// The array is *mutated in place* and returned.
// Use Filter to avoid mutation
func FilterInPlace[E any](s []E, f func(E) bool) []E {
n := 0
for _, val := range s {
if f(val) {
s[n] = val
n++
}
}
// If those elements contain pointers you might consider zeroing those elements
// so that objects they reference can be garbage collected."
var empty E
for i := n; i < len(s); i++ {
s[i] = empty
}
s = s[:n]
return s
}
// Filter retains all elements in []E that f(E) returns true for.
// A new slice is created and returned. Use FilterInPlace to perform in-place
func Filter[E any](s []E, f func(E) bool) []E {
matched := []E{}
for _, v := range s {
if f(v) {
matched = append(matched, v)
}
}
return matched
}
// Map runs f() over all elements in s and returns the result
func Map[E any, O any](s []E, f func(E) O) []O {
n := make([]O, 0, len(s))
for _, e := range s {
n = append(n, f(e))
}
return n
}
// MapFilter runs f() over all elements in s and returns any non-nil results
func MapFilter[E any, O any](s []E, f func(E) *O) []O {
n := make([]O, 0, len(s))
for _, e := range s {
if res := f(e); res != nil {
n = append(n, *res)
}
}
return n
}
// Reference takes a pointer to all elements in the slice
func Reference[E any](s []E) []*E {
res := make([]*E, 0, len(s))
for _, v := range s {
v := v
res = append(res, &v)
}
return res
}
// Dereference returns all non-nil references, dereferenced
func Dereference[E any](s []*E) []E {
res := make([]E, 0, len(s))
for _, v := range s {
if v != nil {
res = append(res, *v)
}
}
return res
}
// Flatten merges a slice of slices into a single slice.
func Flatten[E any](s [][]E) []E {
if s == nil {
return nil
}
res := make([]E, 0)
for _, v := range s {
res = append(res, v...)
}
return res
}
// Group groups a slice by a key.
func Group[T any, K comparable](data []T, f func(T) K) map[K][]T {
res := make(map[K][]T, len(data))
for _, e := range data {
k := f(e)
res[k] = append(res[k], e)
}
return res
}
// GroupUnique groups a slice by a key. Each key must be unique or data will be lost. To allow multiple use Group.
func GroupUnique[T any, K comparable](data []T, f func(T) K) map[K]T {
res := make(map[K]T, len(data))
for _, e := range data {
res[f(e)] = e
}
return res
}
func Join(sep string, fields ...string) string {
return strings.Join(fields, sep)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spiffe
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"encoding/pem"
"fmt"
"net"
"net/http"
"net/url"
"strings"
"sync"
"time"
jose "github.com/go-jose/go-jose/v3"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
const (
Scheme = "spiffe"
URIPrefix = Scheme + "://"
URIPrefixLen = len(URIPrefix)
// The default SPIFFE URL value for trust domain
defaultTrustDomain = constants.DefaultClusterLocalDomain
ServiceAccountSegment = "sa"
NamespaceSegment = "ns"
)
var (
trustDomain = defaultTrustDomain
trustDomainMutex sync.RWMutex
firstRetryBackOffTime = time.Millisecond * 50
spiffeLog = log.RegisterScope("spiffe", "SPIFFE library logging")
)
type Identity struct {
TrustDomain string
Namespace string
ServiceAccount string
}
func ParseIdentity(s string) (Identity, error) {
if !strings.HasPrefix(s, URIPrefix) {
return Identity{}, fmt.Errorf("identity is not a spiffe format")
}
split := strings.Split(s[URIPrefixLen:], "/")
if len(split) != 5 {
return Identity{}, fmt.Errorf("identity is not a spiffe format")
}
if split[1] != NamespaceSegment || split[3] != ServiceAccountSegment {
return Identity{}, fmt.Errorf("identity is not a spiffe format")
}
return Identity{
TrustDomain: split[0],
Namespace: split[2],
ServiceAccount: split[4],
}, nil
}
func (i Identity) String() string {
return URIPrefix + i.TrustDomain + "/ns/" + i.Namespace + "/sa/" + i.ServiceAccount
}
type bundleDoc struct {
jose.JSONWebKeySet
Sequence uint64 `json:"spiffe_sequence,omitempty"`
RefreshHint int `json:"spiffe_refresh_hint,omitempty"`
}
func SetTrustDomain(value string) {
// Replace special characters in spiffe
v := strings.Replace(value, "@", ".", -1)
trustDomainMutex.Lock()
trustDomain = v
trustDomainMutex.Unlock()
}
func GetTrustDomain() string {
trustDomainMutex.RLock()
defer trustDomainMutex.RUnlock()
return trustDomain
}
// GenSpiffeURI returns the formatted uri(SPIFFE format for now) for the certificate.
func GenSpiffeURI(ns, serviceAccount string) (string, error) {
var err error
if ns == "" || serviceAccount == "" {
err = fmt.Errorf(
"namespace or service account empty for SPIFFE uri ns=%v serviceAccount=%v", ns, serviceAccount)
}
return URIPrefix + GetTrustDomain() + "/ns/" + ns + "/sa/" + serviceAccount, err
}
// MustGenSpiffeURI returns the formatted uri(SPIFFE format for now) for the certificate and logs if there was an error.
func MustGenSpiffeURI(ns, serviceAccount string) string {
uri, err := GenSpiffeURI(ns, serviceAccount)
if err != nil {
spiffeLog.Debug(err.Error())
}
return uri
}
// ExpandWithTrustDomains expands a given spiffe identities, plus a list of trust domain aliases.
// We ensure the returned list does not contain duplicates; the original input is always retained.
// For example,
// ExpandWithTrustDomains({"spiffe://td1/ns/def/sa/def"}, {"td1", "td2"}) returns
//
// {"spiffe://td1/ns/def/sa/def", "spiffe://td2/ns/def/sa/def"}.
//
// ExpandWithTrustDomains({"spiffe://td1/ns/def/sa/a", "spiffe://td1/ns/def/sa/b"}, {"td2"}) returns
//
// {"spiffe://td1/ns/def/sa/a", "spiffe://td2/ns/def/sa/a", "spiffe://td1/ns/def/sa/b", "spiffe://td2/ns/def/sa/b"}.
func ExpandWithTrustDomains(spiffeIdentities sets.String, trustDomainAliases []string) sets.String {
if len(trustDomainAliases) == 0 {
return spiffeIdentities
}
out := sets.New[string]()
for id := range spiffeIdentities {
out.Insert(id)
// Skip if not a SPIFFE identity - This can happen for example if the identity is a DNS name.
if !strings.HasPrefix(id, URIPrefix) {
continue
}
// Expand with aliases set.
m, err := ParseIdentity(id)
if err != nil {
spiffeLog.Errorf("Failed to extract SPIFFE trust domain from %v: %v", id, err)
continue
}
for _, td := range trustDomainAliases {
m.TrustDomain = td
out.Insert(m.String())
}
}
return out
}
// GetTrustDomainFromURISAN extracts the trust domain part from the URI SAN in the X.509 certificate.
func GetTrustDomainFromURISAN(uriSan string) (string, error) {
parsed, err := ParseIdentity(uriSan)
if err != nil {
return "", fmt.Errorf("failed to parse URI SAN %s. Error: %v", uriSan, err)
}
return parsed.TrustDomain, nil
}
// RetrieveSpiffeBundleRootCerts retrieves the trusted CA certificates from a list of SPIFFE bundle endpoints.
// It can use the system cert pool and the supplied certificates to validate the endpoints.
func RetrieveSpiffeBundleRootCerts(config map[string]string, caCertPool *x509.CertPool, retryTimeout time.Duration) (
map[string][]*x509.Certificate, error,
) {
httpClient := &http.Client{
Timeout: time.Second * 10,
}
ret := map[string][]*x509.Certificate{}
for trustDomain, endpoint := range config {
if !strings.HasPrefix(endpoint, "https://") {
endpoint = "https://" + endpoint
}
u, err := url.Parse(endpoint)
if err != nil {
return nil, fmt.Errorf("failed to split the SPIFFE bundle URL: %v", err)
}
config := &tls.Config{
ServerName: u.Hostname(),
RootCAs: caCertPool,
MinVersion: tls.VersionTLS12,
}
httpClient.Transport = &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: config,
DialContext: (&net.Dialer{
Timeout: time.Second * 10,
}).DialContext,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
retryBackoffTime := firstRetryBackOffTime
startTime := time.Now()
var resp *http.Response
for {
resp, err = httpClient.Get(endpoint)
var errMsg string
if err != nil {
errMsg = fmt.Sprintf("Calling %s failed with error: %v", endpoint, err)
} else if resp == nil {
errMsg = fmt.Sprintf("Calling %s failed with nil response", endpoint)
} else if resp.StatusCode != http.StatusOK {
b := make([]byte, 1024)
n, _ := resp.Body.Read(b)
errMsg = fmt.Sprintf("Calling %s failed with unexpected status: %v, fetching bundle: %s",
endpoint, resp.StatusCode, string(b[:n]))
} else {
break
}
if startTime.Add(retryTimeout).Before(time.Now()) {
return nil, fmt.Errorf("exhausted retries to fetch the SPIFFE bundle %s from url %s. Latest error: %v",
trustDomain, endpoint, errMsg)
}
spiffeLog.Warnf("%s, retry in %v", errMsg, retryBackoffTime)
time.Sleep(retryBackoffTime)
retryBackoffTime *= 2 // Exponentially increase the retry backoff time.
}
defer resp.Body.Close()
doc := new(bundleDoc)
if err := json.NewDecoder(resp.Body).Decode(doc); err != nil {
return nil, fmt.Errorf("trust domain [%s] at URL [%s] failed to decode bundle: %v", trustDomain, endpoint, err)
}
var certs []*x509.Certificate
for i, key := range doc.Keys {
if key.Use == "x509-svid" {
if len(key.Certificates) != 1 {
return nil, fmt.Errorf("trust domain [%s] at URL [%s] expected 1 certificate in x509-svid entry %d; got %d",
trustDomain, endpoint, i, len(key.Certificates))
}
certs = append(certs, key.Certificates[0])
}
}
if len(certs) == 0 {
return nil, fmt.Errorf("trust domain [%s] at URL [%s] does not provide a X509 SVID", trustDomain, endpoint)
}
ret[trustDomain] = certs
}
for trustDomain, certs := range ret {
spiffeLog.Infof("Loaded SPIFFE trust bundle for: %v, containing %d certs", trustDomain, len(certs))
}
return ret, nil
}
// PeerCertVerifier is an instance to verify the peer certificate in the SPIFFE way using the retrieved root certificates.
type PeerCertVerifier struct {
generalCertPool *x509.CertPool
certPools map[string]*x509.CertPool
}
// NewPeerCertVerifier returns a new PeerCertVerifier.
func NewPeerCertVerifier() *PeerCertVerifier {
return &PeerCertVerifier{
generalCertPool: x509.NewCertPool(),
certPools: make(map[string]*x509.CertPool),
}
}
// GetGeneralCertPool returns generalCertPool containing all root certs.
func (v *PeerCertVerifier) GetGeneralCertPool() *x509.CertPool {
return v.generalCertPool
}
// AddMapping adds a new trust domain to certificates mapping to the certPools map.
func (v *PeerCertVerifier) AddMapping(trustDomain string, certs []*x509.Certificate) {
if v.certPools[trustDomain] == nil {
v.certPools[trustDomain] = x509.NewCertPool()
}
for _, cert := range certs {
v.certPools[trustDomain].AddCert(cert)
v.generalCertPool.AddCert(cert)
}
spiffeLog.Infof("Added %d certs to trust domain %s in peer cert verifier", len(certs), trustDomain)
}
// AddMappingFromPEM adds multiple RootCA's to the spiffe Trust bundle in the trustDomain namespace
func (v *PeerCertVerifier) AddMappingFromPEM(trustDomain string, rootCertBytes []byte) error {
block, rest := pem.Decode(rootCertBytes)
var blockBytes []byte
// Loop while there are no block are found
for block != nil {
blockBytes = append(blockBytes, block.Bytes...)
block, rest = pem.Decode(rest)
}
rootCAs, err := x509.ParseCertificates(blockBytes)
if err != nil {
spiffeLog.Errorf("parse certificate from rootPEM got error: %v", err)
return fmt.Errorf("parse certificate from rootPEM got error: %v", err)
}
v.AddMapping(trustDomain, rootCAs)
return nil
}
// AddMappings merges a trust domain to certs map to the certPools map.
func (v *PeerCertVerifier) AddMappings(certMap map[string][]*x509.Certificate) {
for trustDomain, certs := range certMap {
v.AddMapping(trustDomain, certs)
}
}
// VerifyPeerCert is an implementation of tls.Config.VerifyPeerCertificate.
// It verifies the peer certificate using the root certificates associated with its trust domain.
func (v *PeerCertVerifier) VerifyPeerCert(rawCerts [][]byte, _ [][]*x509.Certificate) error {
if len(rawCerts) == 0 {
// Peer doesn't present a certificate. Just skip. Other authn methods may be used.
return nil
}
var peerCert *x509.Certificate
intCertPool := x509.NewCertPool()
for id, rawCert := range rawCerts {
cert, err := x509.ParseCertificate(rawCert)
if err != nil {
return err
}
if id == 0 {
peerCert = cert
} else {
intCertPool.AddCert(cert)
}
}
if len(peerCert.URIs) != 1 {
return fmt.Errorf("peer certificate does not contain 1 URI type SAN, detected %d", len(peerCert.URIs))
}
trustDomain, err := GetTrustDomainFromURISAN(peerCert.URIs[0].String())
if err != nil {
return err
}
rootCertPool, ok := v.certPools[trustDomain]
if !ok {
return fmt.Errorf("no cert pool found for trust domain %s", trustDomain)
}
_, err = peerCert.Verify(x509.VerifyOptions{
Roots: rootCertPool,
Intermediates: intCertPool,
})
return err
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.27.1
// protoc v3.18.0
// source: pkg/test/config/mock_config.proto
// Basic config resource consisting
// of a set of key-value pairs
package config
import (
reflect "reflect"
sync "sync"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type MockConfig struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Pairs []*ConfigPair `protobuf:"bytes,2,rep,name=pairs,proto3" json:"pairs,omitempty"`
}
func (x *MockConfig) Reset() {
*x = MockConfig{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_test_config_mock_config_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *MockConfig) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*MockConfig) ProtoMessage() {}
func (x *MockConfig) ProtoReflect() protoreflect.Message {
mi := &file_pkg_test_config_mock_config_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use MockConfig.ProtoReflect.Descriptor instead.
func (*MockConfig) Descriptor() ([]byte, []int) {
return file_pkg_test_config_mock_config_proto_rawDescGZIP(), []int{0}
}
func (x *MockConfig) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *MockConfig) GetPairs() []*ConfigPair {
if x != nil {
return x.Pairs
}
return nil
}
type ConfigPair struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (x *ConfigPair) Reset() {
*x = ConfigPair{}
if protoimpl.UnsafeEnabled {
mi := &file_pkg_test_config_mock_config_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ConfigPair) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ConfigPair) ProtoMessage() {}
func (x *ConfigPair) ProtoReflect() protoreflect.Message {
mi := &file_pkg_test_config_mock_config_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ConfigPair.ProtoReflect.Descriptor instead.
func (*ConfigPair) Descriptor() ([]byte, []int) {
return file_pkg_test_config_mock_config_proto_rawDescGZIP(), []int{1}
}
func (x *ConfigPair) GetKey() string {
if x != nil {
return x.Key
}
return ""
}
func (x *ConfigPair) GetValue() string {
if x != nil {
return x.Value
}
return ""
}
var File_pkg_test_config_mock_config_proto protoreflect.FileDescriptor
var file_pkg_test_config_mock_config_proto_rawDesc = []byte{
0x0a, 0x21, 0x70, 0x6b, 0x67, 0x2f, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
0x67, 0x2f, 0x6d, 0x6f, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x48, 0x0a, 0x0a, 0x4d,
0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x28, 0x0a, 0x05, 0x70,
0x61, 0x69, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x6f, 0x6e,
0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05,
0x70, 0x61, 0x69, 0x72, 0x73, 0x22, 0x34, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50,
0x61, 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0a, 0x5a, 0x08, 0x2e,
0x3b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_pkg_test_config_mock_config_proto_rawDescOnce sync.Once
file_pkg_test_config_mock_config_proto_rawDescData = file_pkg_test_config_mock_config_proto_rawDesc
)
func file_pkg_test_config_mock_config_proto_rawDescGZIP() []byte {
file_pkg_test_config_mock_config_proto_rawDescOnce.Do(func() {
file_pkg_test_config_mock_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_test_config_mock_config_proto_rawDescData)
})
return file_pkg_test_config_mock_config_proto_rawDescData
}
var file_pkg_test_config_mock_config_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_pkg_test_config_mock_config_proto_goTypes = []any{
(*MockConfig)(nil), // 0: config.MockConfig
(*ConfigPair)(nil), // 1: config.ConfigPair
}
var file_pkg_test_config_mock_config_proto_depIdxs = []int32{
1, // 0: config.MockConfig.pairs:type_name -> config.ConfigPair
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_pkg_test_config_mock_config_proto_init() }
func file_pkg_test_config_mock_config_proto_init() {
if File_pkg_test_config_mock_config_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_pkg_test_config_mock_config_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*MockConfig); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_pkg_test_config_mock_config_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ConfigPair); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_pkg_test_config_mock_config_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_pkg_test_config_mock_config_proto_goTypes,
DependencyIndexes: file_pkg_test_config_mock_config_proto_depIdxs,
MessageInfos: file_pkg_test_config_mock_config_proto_msgTypes,
}.Build()
File_pkg_test_config_mock_config_proto = out.File
file_pkg_test_config_mock_config_proto_rawDesc = nil
file_pkg_test_config_mock_config_proto_goTypes = nil
file_pkg_test_config_mock_config_proto_depIdxs = nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package env
import (
"encoding/json"
"fmt"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"istio.io/istio/pkg/log"
)
var (
// TARGET_OUT environment variable
// nolint: revive, stylecheck
TARGET_OUT Variable = "TARGET_OUT"
// LOCAL_OUT environment variable
// nolint: revive, stylecheck
LOCAL_OUT Variable = "LOCAL_OUT"
// REPO_ROOT environment variable
// nolint: revive, stylecheck
REPO_ROOT Variable = "REPO_ROOT"
// HUB is the Docker hub to be used for images.
// nolint: revive, stylecheck
HUB Variable = "HUB"
// TAG is the Docker tag to be used for images.
// nolint: revive, stylecheck
TAG Variable = "TAG"
// PULL_POLICY is the image pull policy to use when rendering templates.
// nolint: revive, stylecheck
PULL_POLICY Variable = "PULL_POLICY"
// ECHO_IMAGE is the image to use when deploying echo services.
// nolint: golint, revive, stylecheck
ECHO_IMAGE Variable = "ECHO_IMAGE"
// GRPC_ECHO_IMAGE is the image to use for a separate gRPC-only container in echo Pods.
// nolint: golint, revive, stylecheck
GRPC_ECHO_IMAGE Variable = "GRPC_ECHO_IMAGE"
// KUBECONFIG is the list of Kubernetes configuration files. If configuration files are specified on
// the command-line, that takes precedence.
// nolint: revive, stylecheck
KUBECONFIG Variable = "KUBECONFIG"
// IstioSrc is the location of istio source ($TOP/src/istio.io/istio
IstioSrc = REPO_ROOT.ValueOrDefaultFunc(getDefaultIstioSrc)
// IstioOut is the location of the output directory ($TOP/out)
IstioOut = verifyFile(TARGET_OUT, TARGET_OUT.ValueOrDefaultFunc(getDefaultIstioOut))
// LocalOut is the location of the output directory for the OS we are running in,
// not necessarily the OS we are building for
LocalOut = verifyFile(LOCAL_OUT, LOCAL_OUT.ValueOrDefaultFunc(getDefaultIstioOut))
// OtelCollectorInstallFilePath is the OpenTelemetry installation file.
OtelCollectorInstallFilePath = path.Join(IstioSrc, getSampleFile("open-telemetry/otel.yaml"))
// StackdriverInstallFilePath is the stackdriver installation file.
StackdriverInstallFilePath = path.Join(IstioSrc, getInstallationFile("stackdriver/stackdriver.yaml"))
// GCEMetadataServerInstallFilePath is the GCE Metadata Server installation file.
GCEMetadataServerInstallFilePath = path.Join(IstioSrc, getInstallationFile("gcemetadata/gce_metadata_server.yaml"))
// RegistryRedirectorServerInstallFilePath is the registry redirector installation file.
RegistryRedirectorServerInstallFilePath = path.Join(IstioSrc, getInstallationFile("registryredirector/registry_redirector_server.yaml"))
)
var (
_, b, _, _ = runtime.Caller(0)
// Root folder of this project
// This relies on the fact this file is 3 levels up from the root; if this changes, adjust the path below
Root = filepath.Join(filepath.Dir(b), "../../..")
)
func getDefaultIstioSrc() string {
return Root
}
func getSampleFile(p string) string {
return fmt.Sprintf("samples/%s", p)
}
func getInstallationFile(p string) string {
return fmt.Sprintf("pkg/test/framework/components/%s", p)
}
func getDefaultIstioOut() string {
return fmt.Sprintf("%s/out/%s_%s", IstioSrc, runtime.GOOS, runtime.GOARCH)
}
func verifyFile(v Variable, f string) string {
if !fileExists(f) {
log.Warnf("unable to resolve %s. Dir %s does not exist", v, f)
return ""
}
return f
}
func fileExists(f string) bool {
return CheckFileExists(f) == nil
}
func CheckFileExists(path string) error {
if _, err := os.Stat(path); os.IsNotExist(err) {
return err
}
return nil
}
func ReadDepsSHA(name string) (string, error) {
type DepsFile struct {
Name string `json:"name"`
LastStableSHA string `json:"lastStableSHA"`
}
f := filepath.Join(IstioSrc, "istio.deps")
depJSON, err := os.ReadFile(f)
if err != nil {
return "", err
}
var deps []DepsFile
if err := json.Unmarshal(depJSON, &deps); err != nil {
return "", err
}
for _, d := range deps {
if d.Name == name {
return d.LastStableSHA, nil
}
}
return "", fmt.Errorf("%s not found", name)
}
// ReadVersion returns the contents of the $ROOTDIR/VERSION file
func ReadVersion() (string, error) {
f := filepath.Join(IstioSrc, "VERSION")
v, err := os.ReadFile(f)
if err != nil {
return "", err
}
return strings.TrimSuffix(string(v), "\n"), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package env
import "os"
// Variable is a wrapper for an environment variable.
type Variable string
// Name of the environment variable.
func (e Variable) Name() string {
return string(e)
}
// Value of the environment variable.
func (e Variable) Value() string {
return os.Getenv(e.Name())
}
// ValueOrDefault returns the value of the environment variable if it is non-empty. Otherwise returns the value provided.
func (e Variable) ValueOrDefault(defaultValue string) string {
return e.ValueOrDefaultFunc(func() string {
return defaultValue
})
}
// ValueOrDefaultFunc returns the value of the environment variable if it is non-empty. Otherwise returns the value function provided.
func (e Variable) ValueOrDefaultFunc(defaultValueFunc func() string) string {
if value := e.Value(); value != "" {
return value
}
return defaultValueFunc()
}
// Copyright Istio Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"errors"
"fmt"
"os"
"runtime"
"sync"
"testing"
"istio.io/istio/pkg/log"
)
var (
_ Failer = &testing.T{}
_ Failer = &testing.B{}
_ Failer = &errorWrapper{}
)
// Failer is an interface to be provided to test functions of the form XXXOrFail. This is a
// substitute for testing.TB, which cannot be implemented outside of the testing
// package.
type Failer interface {
Fail()
FailNow()
Fatal(args ...any)
Fatalf(format string, args ...any)
Log(args ...any)
Logf(format string, args ...any)
TempDir() string
Helper()
Cleanup(func())
Skip(args ...any)
}
// Fuzzer abstracts *testing.F
type Fuzzer interface {
Fuzz(ff any)
Add(args ...any)
}
// errorWrapper is a Failer that can be used to just extract an `error`. This allows mixing
// functions that take in a Failer and those that take an error.
// The function must be called within a goroutine, or calls to Fatal will try to terminate the outer
// test context, which will cause the test to panic. The Wrap function handles this automatically
type errorWrapper struct {
mu sync.RWMutex
failed error
cleanup func()
}
// Wrap executes a function with a fake Failer, and returns an error if the test failed. This allows
// calling functions that take a Failer and using them with functions that expect an error, or
// allowing calling functions that would cause a test to immediately fail to instead return an error.
// Wrap handles Cleanup() and short-circuiting of Fatal() just like the real testing.T.
func Wrap(f func(t Failer)) error {
done := make(chan struct{})
w := &errorWrapper{}
go func() {
defer close(done)
f(w)
}()
<-done
return w.ToErrorCleanup()
}
// ToErrorCleanup returns any errors encountered and executes any cleanup actions
func (e *errorWrapper) ToErrorCleanup() error {
e.mu.RLock()
defer e.mu.RUnlock()
if e.cleanup != nil {
e.cleanup()
}
return e.failed
}
func (e *errorWrapper) Fail() {
e.Fatal("fail called")
}
func (e *errorWrapper) FailNow() {
e.Fatal("fail now called")
}
func (e *errorWrapper) Fatal(args ...any) {
e.mu.Lock()
defer e.mu.Unlock()
if e.failed == nil {
e.failed = errors.New(fmt.Sprint(args...))
}
runtime.Goexit()
}
func (e *errorWrapper) Fatalf(format string, args ...any) {
e.Fatal(fmt.Sprintf(format, args...))
}
func (e *errorWrapper) Helper() {
}
func (e *errorWrapper) Skip(args ...any) {
e.Fatal(args...)
}
func (e *errorWrapper) Cleanup(f func()) {
e.mu.Lock()
defer e.mu.Unlock()
oldCleanup := e.cleanup
e.cleanup = func() {
if oldCleanup != nil {
defer func() {
oldCleanup()
}()
}
f()
}
}
func (e *errorWrapper) Log(args ...any) {
log.Info(fmt.Sprint(args...))
}
func (e *errorWrapper) Logf(format string, args ...any) {
log.Infof(format, args...)
}
func (e *errorWrapper) TempDir() string {
tempDir, err := os.MkdirTemp("", "test")
if err == nil {
e.Cleanup(func() {
os.RemoveAll(tempDir)
})
}
return tempDir
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bytes"
"encoding/json"
)
// JSONEquals compares two json strings. We cannot compare JSON strings from protobuf because of
// design decisions https://github.com/golang/protobuf/issues/1373 Instead, use this function to
// normalize the formatting
func JSONEquals(t Failer, a, b string) {
t.Helper()
ba := bytes.Buffer{}
if err := json.Compact(&ba, []byte(a)); err != nil {
t.Fatal(err)
}
bb := bytes.Buffer{}
if err := json.Compact(&bb, []byte(b)); err != nil {
t.Fatal(err)
}
if ba.String() != bb.String() {
t.Fatalf("got %v, want %v", ba.String(), bb.String())
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"context"
"os"
"go.uber.org/atomic"
)
// SetForTest sets a variable for the duration of a test, then resets it once the test is complete.
func SetForTest[T any](t Failer, vv *T, v T) {
old := *vv
*vv = v
t.Cleanup(func() {
*vv = old
})
}
// SetEnvForTest sets an environment variable for the duration of a test, then resets it once the test is complete.
func SetEnvForTest(t Failer, k, v string) {
old, oldset := os.LookupEnv(k)
if err := os.Setenv(k, v); err != nil {
t.Fatal(err)
}
t.Cleanup(func() {
if oldset {
if err := os.Setenv(k, old); err != nil {
t.Fatal(err)
}
} else {
if err := os.Unsetenv(k); err != nil {
t.Fatal(err)
}
}
})
}
// SetAtomicBoolForTest sets a variable for the duration of a test, then resets it once the test is complete atomically.
func SetAtomicBoolForTest(t Failer, vv *atomic.Bool, v bool) {
old := vv.Load()
vv.Store(v)
t.Cleanup(func() {
vv.Store(old)
})
}
// NewStop returns a stop channel that will automatically be closed when the test is complete
func NewStop(t Failer) chan struct{} {
s := make(chan struct{})
t.Cleanup(func() {
close(s)
})
return s
}
// NewContext returns a context that will automatically be closed when the test is complete
func NewContext(t Failer) context.Context {
ctx, cancel := context.WithCancel(context.Background())
t.Cleanup(cancel)
return ctx
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package assert
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"google.golang.org/protobuf/testing/protocmp"
"istio.io/istio/pkg/ptr"
"istio.io/istio/pkg/test"
"istio.io/istio/pkg/test/util/retry"
)
var compareErrors = cmp.Comparer(func(x, y error) bool {
switch {
case x == nil && y == nil:
return true
case x != nil && y == nil:
return false
case x == nil && y != nil:
return false
case x != nil && y != nil:
return x.Error() == y.Error()
default:
panic("unreachable")
}
})
// cmpOptioner can be implemented to provide custom options that should be used when comparing a type.
// Warning: this is no recursive, unfortunately. So a type `A{B}` cannot rely on `B` implementing this to customize comparing `B`.
type cmpOptioner interface {
CmpOpts() []cmp.Option
}
// opts gets the comparison opts for a type. This includes some defaults, but allows each type to explicitly append their own.
func opts[T any](a T) []cmp.Option {
if o, ok := any(a).(cmpOptioner); ok {
opts := append([]cmp.Option{}, cmpOpts...)
opts = append(opts, o.CmpOpts()...)
return opts
}
// if T is actually a slice (ex: []A), check that and get the opts for the element type (A).
t := reflect.TypeOf(a)
if t != nil && t.Kind() == reflect.Slice {
v := reflect.New(t.Elem()).Elem().Interface()
if o, ok := v.(cmpOptioner); ok {
opts := append([]cmp.Option{}, cmpOpts...)
opts = append(opts, o.CmpOpts()...)
return opts
}
}
return cmpOpts
}
var cmpOpts = []cmp.Option{protocmp.Transform(), cmpopts.EquateEmpty(), compareErrors}
// Compare compares two objects and returns and error if they are not the same.
func Compare[T any](a, b T) error {
if !cmp.Equal(a, b, opts(a)...) {
return fmt.Errorf("found diff: %v\nLeft: %v\nRight: %v", cmp.Diff(a, b, opts(a)...), a, b)
}
return nil
}
// Equal compares two objects and fails if they are not the same.
func Equal[T any](t test.Failer, a, b T, context ...string) {
t.Helper()
if !cmp.Equal(a, b, opts(a)...) {
cs := ""
if len(context) > 0 {
cs = " " + strings.Join(context, ", ") + ":"
}
t.Fatalf("found diff:%s %v\nLeft: %v\nRight: %v", cs, cmp.Diff(a, b, opts(a)...), a, b)
}
}
// EventuallyEqual compares repeatedly calls the fetch function until the result matches the expectation.
func EventuallyEqual[T any](t test.Failer, fetch func() T, expected T, retryOpts ...retry.Option) {
t.Helper()
var a T
// Unit tests typically need shorter default; opts can override though
ro := []retry.Option{retry.Timeout(time.Second * 2), retry.BackoffDelay(time.Millisecond * 2)}
ro = append(ro, retryOpts...)
err := retry.UntilSuccess(func() error {
a = fetch()
if !cmp.Equal(a, expected, opts(expected)...) {
return fmt.Errorf("not equal")
}
return nil
}, ro...)
if err != nil {
t.Fatalf("found diff: %v\nGot: %v\nWant: %v", cmp.Diff(a, expected, opts(expected)...), a, expected)
}
}
// Error asserts the provided err is non-nil
func Error(t test.Failer, err error) {
t.Helper()
if err == nil {
t.Fatal("expected error but got nil")
}
}
// NoError asserts the provided err is nil
func NoError(t test.Failer, err error) {
t.Helper()
if err != nil {
t.Fatalf("expected no error but got: %v", err)
}
}
// ChannelHasItem asserts a channel has an element within 5s and returns the element
func ChannelHasItem[T any](t test.Failer, c <-chan T) T {
t.Helper()
select {
case r := <-c:
return r
case <-time.After(time.Second * 5):
t.Fatalf("failed to receive event after 5s")
}
// Not reachable
return ptr.Empty[T]()
}
// ChannelIsEmpty asserts a channel is empty for at least 20ms
func ChannelIsEmpty[T any](t test.Failer, c <-chan T) {
t.Helper()
select {
case r := <-c:
t.Fatalf("channel had element, expected empty: %v", r)
case <-time.After(time.Millisecond * 20):
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package assert
import (
"fmt"
"sync"
"time"
"istio.io/istio/pkg/ptr"
"istio.io/istio/pkg/test"
"istio.io/istio/pkg/test/util/retry"
)
type Tracker[T comparable] struct {
t test.Failer
mu sync.Mutex
events []T
}
// NewTracker builds a tracker which records events that occur
func NewTracker[T comparable](t test.Failer) *Tracker[T] {
return &Tracker[T]{t: t}
}
// Record that an event occurred.
func (t *Tracker[T]) Record(event T) {
t.mu.Lock()
defer t.mu.Unlock()
t.events = append(t.events, event)
}
// Empty asserts the tracker is empty
func (t *Tracker[T]) Empty() {
t.t.Helper()
t.mu.Lock()
defer t.mu.Unlock()
if len(t.events) != 0 {
t.t.Fatalf("unexpected events: %v", t.events)
}
}
// WaitOrdered waits for an event to happen, in order
func (t *Tracker[T]) WaitOrdered(events ...T) {
t.t.Helper()
for _, event := range events {
var err error
retry.UntilSuccessOrFail(t.t, func() error {
t.mu.Lock()
defer t.mu.Unlock()
if len(t.events) == 0 {
return fmt.Errorf("no events")
}
if t.events[0] != event {
// Exit early instead of continuing to retry
err = fmt.Errorf("got events %v, want %v", t.events, event)
return nil
}
// clear the event
t.events[0] = ptr.Empty[T]()
t.events = t.events[1:]
return nil
}, retry.Timeout(time.Second), retry.BackoffDelay(time.Millisecond))
if err != nil {
t.t.Fatal(err)
}
}
t.Empty()
}
// WaitUnordered waits for an event to happen, in any order
func (t *Tracker[T]) WaitUnordered(events ...T) {
t.t.Helper()
want := map[T]struct{}{}
for _, e := range events {
want[e] = struct{}{}
}
var err error
retry.UntilSuccessOrFail(t.t, func() error {
t.mu.Lock()
defer t.mu.Unlock()
if len(t.events) == 0 {
return fmt.Errorf("no events (want %v)", want)
}
got := t.events[0]
if _, f := want[got]; !f {
// Exit early instead of continuing to retry
err = fmt.Errorf("got events %v, want %v", t.events, want)
return nil
}
// clear the event
t.events[0] = ptr.Empty[T]()
t.events = t.events[1:]
delete(want, got)
if len(want) > 0 {
return fmt.Errorf("still waiting for %v", want)
}
return nil
}, retry.Timeout(time.Second), retry.BackoffDelay(time.Millisecond))
if err != nil {
t.t.Fatal(err)
}
t.Empty()
}
// WaitCompare waits for an event to happen and ensures it meets a custom comparison function
func (t *Tracker[T]) WaitCompare(f func(T) bool) {
t.t.Helper()
var err error
retry.UntilSuccessOrFail(t.t, func() error {
t.mu.Lock()
defer t.mu.Unlock()
if len(t.events) == 0 {
return fmt.Errorf("no events")
}
got := t.events[0]
if !f(got) {
// Exit early instead of continuing to retry
err = fmt.Errorf("got events %v, which does not match criteria", t.events)
return nil
}
// clear the event
t.events[0] = ptr.Empty[T]()
t.events = t.events[1:]
return nil
}, retry.Timeout(time.Second), retry.BackoffDelay(time.Millisecond))
if err != nil {
t.t.Fatal(err)
}
t.Empty()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package file
import (
"archive/tar"
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/mitchellh/go-homedir"
io2 "github.com/AdamKorcz/bugdetectors/io"
"istio.io/istio/pkg/test"
)
// AsBytes is a simple wrapper around os.ReadFile provided for completeness.
func AsBytes(filename string) ([]byte, error) {
return os.ReadFile(filename)
}
// AsBytesOrFail calls AsBytes and fails the test if any errors occurred.
func AsBytesOrFail(t test.Failer, filename string) []byte {
t.Helper()
content, err := AsBytes(filename)
if err != nil {
t.Fatal(err)
}
return content
}
// MustAsBytes calls AsBytes and panics the test if any errors occurred.
func MustAsBytes(filename string) []byte {
content, err := AsBytes(filename)
if err != nil {
panic(err)
}
return content
}
// AsStringArray is a convenience wrapper around os.ReadFile that converts the content to a string.
func AsStringArray(files ...string) ([]string, error) {
out := make([]string, 0, len(files))
for _, f := range files {
b, err := AsBytes(f)
if err != nil {
return nil, err
}
out = append(out, string(b))
}
return out, nil
}
// AsStringArrayOrFail calls AsStringOrFail and then converts to string.
func AsStringArrayOrFail(t test.Failer, files ...string) []string {
t.Helper()
out, err := AsStringArray(files...)
if err != nil {
t.Fatal(err)
}
return out
}
// AsString is a convenience wrapper around os.ReadFile that converts the content to a string.
func AsString(filename string) (string, error) {
b, err := AsBytes(filename)
if err != nil {
return "", err
}
return string(b), nil
}
// AsStringOrFail calls AsBytesOrFail and then converts to string.
func AsStringOrFail(t test.Failer, filename string) string {
t.Helper()
return string(AsBytesOrFail(t, filename))
}
// MustAsString calls MustAsBytes and then converts to string.
func MustAsString(filename string) string {
return string(MustAsBytes(filename))
}
// NormalizePath expands the homedir (~) and returns an error if the file doesn't exist.
func NormalizePath(originalPath string) (string, error) {
if originalPath == "" {
return "", nil
}
// trim leading/trailing spaces from the path and if it uses the homedir ~, expand it.
var err error
out := strings.TrimSpace(originalPath)
out, err = homedir.Expand(out)
if err != nil {
return "", err
}
// Verify that the file exists.
if _, err := os.Stat(out); os.IsNotExist(err) {
return "", fmt.Errorf("failed normalizing file %s: %v", originalPath, err)
}
return out, nil
}
// ReadTarFile reads a tar compress file from the embedded
func ReadTarFile(filePath string) (string, error) {
b, err := os.ReadFile(filePath)
if err != nil {
return "", err
}
tr := tar.NewReader(bytes.NewBuffer(b))
for {
hdr, err := tr.Next()
if err == io.EOF {
break // End of archive
}
if err != nil {
return "", err
}
if hdr.Name != strings.TrimSuffix(filepath.Base(filePath), filepath.Ext(filePath)) {
continue
}
contents, err := io2.ReadAll(tr, "/src/istio/pkg/test/util/file/file.go:137:20 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
return "", err
}
return string(contents), nil
}
return "", fmt.Errorf("file not found %v", filePath)
}
// ReadDir returns the names of all files in the given directory. This is not recursive.
// The base path is appended; for example, ReadDir("dir") -> ["dir/file1", "dir/folder1"]
func ReadDir(filePath string, extensions ...string) ([]string, error) {
dir, err := os.ReadDir(filePath)
if err != nil {
return nil, err
}
res := []string{}
for _, d := range dir {
matched := len(extensions) == 0 // If none are set, match anything
for _, ext := range extensions {
if filepath.Ext(d.Name()) == ext {
matched = true
break
}
}
if matched {
res = append(res, filepath.Join(filePath, d.Name()))
}
}
return res, nil
}
func ReadDirOrFail(t test.Failer, filePath string, extensions ...string) []string {
t.Helper()
res, err := ReadDir(filePath, extensions...)
if err != nil {
t.Fatal(err)
}
return res
}
func WriteOrFail(t test.Failer, filePath string, contents []byte) {
t.Helper()
err := os.WriteFile(filePath, contents, os.ModePerm)
if err != nil {
t.Fatal(err)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package retry
import (
"errors"
"fmt"
"time"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/test"
)
var scope = log.RegisterScope("retry", "logs for retries")
const (
// DefaultTimeout the default timeout for the entire retry operation
DefaultTimeout = time.Second * 30
// DefaultDelay the default delay between successive retry attempts
DefaultDelay = time.Millisecond * 10
// DefaultConverge the default converge, requiring something to succeed one time
DefaultConverge = 1
)
var defaultConfig = config{
timeout: DefaultTimeout,
delay: DefaultDelay,
delayMax: DefaultDelay * 16,
converge: DefaultConverge,
}
type config struct {
error string
timeout time.Duration
delay time.Duration
delayMax time.Duration
converge int
maxAttempts int
}
// Option for a retry operation.
type Option func(cfg *config)
// Timeout sets the timeout for the entire retry operation.
func Timeout(timeout time.Duration) Option {
return func(cfg *config) {
cfg.timeout = timeout
}
}
// Delay sets the delay between successive retry attempts.
func Delay(delay time.Duration) Option {
return func(cfg *config) {
cfg.delay = delay
cfg.delayMax = delay
}
}
func BackoffDelay(delay time.Duration) Option {
return func(cfg *config) {
cfg.delay = delay
// Currently, hardcode to 16 backoffs. We can make it configurable if needed
cfg.delayMax = delay * 16
}
}
// Converge sets the number of successes in a row needed to count a success.
// This is useful to avoid the case where tests like `coin.Flip() == HEADS` will always
// return success due to random variance.
func Converge(successes int) Option {
return func(cfg *config) {
cfg.converge = successes
}
}
// Message defines a more detailed error message to use when failing
func Message(errorMessage string) Option {
return func(cfg *config) {
cfg.error = errorMessage
}
}
// MaxAttempts allows defining a maximum number of attempts. If unset, only timeout is considered.
func MaxAttempts(attempts int) Option {
return func(cfg *config) {
cfg.maxAttempts = attempts
}
}
// RetriableFunc a function that can be retried.
type RetriableFunc func() (result any, completed bool, err error)
// UntilSuccess retries the given function until success, timeout, or until the passed-in function returns nil.
func UntilSuccess(fn func() error, options ...Option) error {
_, e := UntilComplete(func() (any, bool, error) {
err := fn()
if err != nil {
return nil, false, err
}
return nil, true, nil
}, options...)
return e
}
// UntilSuccessOrFail calls UntilSuccess, and fails t with Fatalf if it ends up returning an error
func UntilSuccessOrFail(t test.Failer, fn func() error, options ...Option) {
t.Helper()
err := UntilSuccess(fn, options...)
if err != nil {
t.Fatalf("retry.UntilSuccessOrFail: %v", err)
}
}
var ErrConditionNotMet = errors.New("expected condition not met")
// Until retries the given function until it returns true or hits the timeout
func Until(fn func() bool, options ...Option) error {
return UntilSuccess(func() error {
if !fn() {
return getErrorMessage(options)
}
return nil
}, options...)
}
// UntilOrFail calls Until, and fails t with Fatalf if it ends up returning an error
func UntilOrFail(t test.Failer, fn func() bool, options ...Option) {
t.Helper()
err := Until(fn, options...)
if err != nil {
t.Fatalf("retry.UntilOrFail: %v", err)
}
}
func getErrorMessage(options []Option) error {
cfg := defaultConfig
for _, option := range options {
option(&cfg)
}
if cfg.error == "" {
return ErrConditionNotMet
}
return errors.New(cfg.error)
}
// UntilComplete retries the given function, until there is a timeout, or until the function indicates that it has completed.
// Once complete, the returned value and error are returned.
func UntilComplete(fn RetriableFunc, options ...Option) (any, error) {
cfg := defaultConfig
for _, option := range options {
option(&cfg)
}
successes := 0
attempts := 0
var lasterr error
to := time.After(cfg.timeout)
delay := cfg.delay
for {
if cfg.maxAttempts > 0 && attempts >= cfg.maxAttempts {
return nil, fmt.Errorf("hit max attempts %d attempts (last error: %v)", attempts, lasterr)
}
select {
case <-to:
return nil, fmt.Errorf("timeout while waiting after %d attempts (last error: %v)", attempts, lasterr)
default:
}
result, completed, err := fn()
attempts++
if completed {
if err == nil {
successes++
} else {
successes = 0
}
if successes >= cfg.converge {
return result, err
}
// Skip delay if we have a success
continue
}
successes = 0
if err != nil {
scope.Debugf("encountered an error on attempt %d: %v", attempts, err)
lasterr = err
}
select {
case <-to:
convergeStr := ""
if cfg.converge > 1 {
convergeStr = fmt.Sprintf(", %d/%d successes", successes, cfg.converge)
}
return nil, fmt.Errorf("timeout while waiting after %d attempts%s (last error: %v)", attempts, convergeStr, lasterr)
case <-time.After(delay):
delay *= 2
if delay > cfg.delayMax {
delay = cfg.delayMax
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tmpl
import (
"fmt"
"istio.io/istio/pkg/test"
"istio.io/istio/pkg/test/util/file"
)
// Evaluate parses the template and then executes it with the given parameters.
func Evaluate(tpl string, data any) (string, error) {
t, err := Parse(tpl)
if err != nil {
return "", err
}
return Execute(t, data)
}
func EvaluateFile(filePath string, data any) (string, error) {
tpl, err := file.AsString(filePath)
if err != nil {
return "", err
}
return Evaluate(tpl, data)
}
// EvaluateOrFail calls Evaluate and fails tests if it returns error.
func EvaluateOrFail(t test.Failer, tpl string, data any) string {
t.Helper()
s, err := Evaluate(tpl, data)
if err != nil {
t.Fatalf("tmpl.EvaluateOrFail: %v", err)
}
return s
}
func EvaluateFileOrFail(t test.Failer, filePath string, data any) string {
t.Helper()
s, err := EvaluateFile(filePath, data)
if err != nil {
t.Fatalf("tmpl.EvaluateFileOrFail: %v", err)
}
return s
}
// MustEvaluate calls Evaluate and panics if there is an error.
func MustEvaluate(tpl string, data any) string {
s, err := Evaluate(tpl, data)
if err != nil {
panic(fmt.Sprintf("tmpl.MustEvaluate: %v", err))
}
return s
}
func MustEvaluateFile(filePath string, data any) string {
s, err := EvaluateFile(filePath, data)
if err != nil {
panic(fmt.Sprintf("tmpl.MustEvaluate: %v", err))
}
return s
}
// EvaluateAll calls Evaluate the same data args against each of the given templates.
func EvaluateAll(data any, templates ...string) ([]string, error) {
out := make([]string, 0, len(templates))
for _, t := range templates {
content, err := Evaluate(t, data)
if err != nil {
return nil, err
}
out = append(out, content)
}
return out, nil
}
func EvaluateAllFiles(data any, filePaths ...string) ([]string, error) {
templates, err := file.AsStringArray(filePaths...)
if err != nil {
return nil, err
}
return EvaluateAll(data, templates...)
}
func MustEvaluateAll(data any, templates ...string) []string {
out, err := EvaluateAll(data, templates...)
if err != nil {
panic(fmt.Sprintf("tmpl.MustEvaluateAll: %v", err))
}
return out
}
// EvaluateAllOrFail calls Evaluate and fails t if an error occurs.
func EvaluateAllOrFail(t test.Failer, data any, templates ...string) []string {
t.Helper()
out, err := EvaluateAll(data, templates...)
if err != nil {
t.Fatal(err)
}
return out
}
func EvaluateAllFilesOrFail(t test.Failer, data any, filePaths ...string) []string {
t.Helper()
out, err := EvaluateAllFiles(data, filePaths...)
if err != nil {
t.Fatal(err)
}
return out
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tmpl
import (
"bytes"
"text/template"
"istio.io/istio/pkg/test"
)
// Execute the template with the given parameters.
func Execute(t *template.Template, data any) (string, error) {
var b bytes.Buffer
if err := t.Execute(&b, data); err != nil {
return "", err
}
return b.String(), nil
}
// ExecuteOrFail calls Execute and fails the test if it returns an error.
func ExecuteOrFail(t test.Failer, t2 *template.Template, data any) string {
t.Helper()
s, err := Execute(t2, data)
if err != nil {
t.Fatal(err)
}
return s
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tmpl
import (
"fmt"
"text/template"
"github.com/Masterminds/sprig/v3"
"istio.io/istio/pkg/test"
)
// Parse the given template content.
func Parse(tpl string) (*template.Template, error) {
t := template.New("test template")
return t.Funcs(sprig.TxtFuncMap()).Parse(tpl)
}
// ParseOrFail calls Parse and fails tests if it returns error.
func ParseOrFail(t test.Failer, tpl string) *template.Template {
t.Helper()
tpl2, err := Parse(tpl)
if err != nil {
t.Fatalf("tmpl.ParseOrFail: %v", err)
}
return tpl2
}
// MustParse calls Parse and panics if it returns error.
func MustParse(tpl string) *template.Template {
tpl2, err := Parse(tpl)
if err != nil {
panic(fmt.Sprintf("tmpl.MustParse: %v", err))
}
return tpl2
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yml
import (
"fmt"
"reflect"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"sigs.k8s.io/yaml"
"istio.io/istio/pkg/test"
"istio.io/istio/pkg/test/util/tmpl"
)
// ApplyNamespace applies the given namespaces to the resources in the yamlText if not set.
func ApplyNamespace(yamlText, ns string) (string, error) {
chunks := SplitString(yamlText)
toJoin := make([]string, 0, len(chunks))
for _, chunk := range chunks {
chunk, err := applyNamespace(chunk, ns)
if err != nil {
return "", err
}
toJoin = append(toJoin, chunk)
}
result := JoinString(toJoin...)
return result, nil
}
// ApplyPullSecrets applies the given pullsecret to the deployment resource
func ApplyPullSecret(deploymentYaml string, pullSecret string) (string, error) {
var deploymentMerge appsv1.Deployment
mainYaml, err := yaml.YAMLToJSON([]byte(deploymentYaml))
if err != nil {
return "", fmt.Errorf("yamlToJSON error in base: %s\n%s", err, mainYaml)
}
patchYaml := tmpl.MustEvaluate(`
spec:
template:
spec:
imagePullSecrets:
- name: {{.pullSecret}}
`, map[string]string{"pullSecret": pullSecret})
overlayYaml, err := yaml.YAMLToJSON([]byte(patchYaml))
if err != nil {
return "", fmt.Errorf("yamlToJSON error in overlay: %s\n%s", err, overlayYaml)
}
merged, err := strategicpatch.StrategicMergePatch(mainYaml, overlayYaml, &deploymentMerge)
if err != nil {
return "", fmt.Errorf("json merge error (%s) for base object: \n%s\n override object: \n%s", err, mainYaml, overlayYaml)
}
resYaml, err := yaml.JSONToYAML(merged)
if err != nil {
return "", fmt.Errorf("jsonToYAML error (%s) for merged object: \n%s", err, merged)
}
return string(resYaml), nil
}
// MustApplyNamespace applies the given namespaces to the resources in the yamlText if not set.
func MustApplyNamespace(t test.Failer, yamlText, ns string) string {
y, err := ApplyNamespace(yamlText, ns)
if err != nil {
t.Fatalf("ApplyNamespace: %v for text %v", err, yamlText)
}
return y
}
func ApplyAnnotation(yamlText, k, v string) (string, error) {
m := make(map[string]any)
if err := yaml.Unmarshal([]byte(yamlText), &m); err != nil {
return "", err
}
meta, err := ensureChildMap(m, "metadata")
if err != nil {
return "", err
}
if meta["annotations"] != nil {
meta["annotations"].(map[string]string)[k] = v
} else {
an := map[string]string{k: v}
meta["annotations"] = an
}
by, err := yaml.Marshal(m)
if err != nil {
return "", err
}
return string(by), nil
}
func applyNamespace(yamlText, ns string) (string, error) {
m := make(map[string]any)
if err := yaml.Unmarshal([]byte(yamlText), &m); err != nil {
return "", err
}
meta, err := ensureChildMap(m, "metadata")
if err != nil {
return "", err
}
if meta["namespace"] != nil && meta["namespace"] != "" {
return yamlText, nil
}
meta["namespace"] = ns
by, err := yaml.Marshal(m)
if err != nil {
return "", err
}
return string(by), nil
}
func ensureChildMap(m map[string]any, name string) (map[string]any, error) {
c, ok := m[name]
if !ok {
c = make(map[string]any)
}
cm, ok := c.(map[string]any)
if !ok {
return nil, fmt.Errorf("child %q field is not a map: %v", name, reflect.TypeOf(c))
}
return cm, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yml
import (
"fmt"
"os"
"path"
"strings"
"sync"
)
// Cache tracks the life-cycle of Yaml based resources. It stores single-part Yaml files on disk and updates the
// files as needed, as the resources change.
type Cache struct {
mu sync.Mutex
discriminator int64
resources map[CacheKey]*resourceState
dir string
}
// CacheKey is a key representing a tracked Yaml based resource.
type CacheKey struct {
group string
kind string
namespace string
name string
}
type resourceState struct {
part Part
file string
}
// NewCache returns a new Cache instance
func NewCache(dir string) *Cache {
return &Cache{
resources: make(map[CacheKey]*resourceState),
dir: dir,
}
}
// Apply adds the given yamlText contents as part of a given context name. If there is an existing context
// with the given name, then a diffgram will be generated.
func (c *Cache) Apply(yamlText string) ([]CacheKey, error) {
c.mu.Lock()
defer c.mu.Unlock()
parts, err := Parse(yamlText)
if err != nil {
return nil, err
}
var result []CacheKey
newKeys := make(map[CacheKey]struct{})
for _, p := range parts {
key := toKey(p.Descriptor)
result = append(result, key)
newKeys[key] = struct{}{}
state, found := c.resources[key]
if found {
if err = c.deleteFile(state.file); err != nil {
return nil, err
}
} else {
state = &resourceState{}
c.resources[key] = state
}
state.file = c.generateFileName(key)
state.part = p
if err = c.writeFile(state.file, p.Contents); err != nil {
return nil, err
}
}
return result, nil
}
// Delete the resources from the given yamlText
func (c *Cache) Delete(yamlText string) error {
c.mu.Lock()
defer c.mu.Unlock()
parts, err := Parse(yamlText)
if err != nil {
return err
}
for _, p := range parts {
key := toKey(p.Descriptor)
state, found := c.resources[key]
if found {
if err = c.deleteFile(state.file); err != nil {
return err
}
delete(c.resources, key)
}
}
return nil
}
// AllKeys returns all resource keys in the tracker.
func (c *Cache) AllKeys() []CacheKey {
c.mu.Lock()
defer c.mu.Unlock()
var result []CacheKey
for k := range c.resources {
result = append(result, k)
}
return result
}
// Clear all tracked yaml content.
func (c *Cache) Clear() error {
c.mu.Lock()
defer c.mu.Unlock()
for _, s := range c.resources {
if err := c.deleteFile(s.file); err != nil {
return err
}
}
c.resources = make(map[CacheKey]*resourceState)
return nil
}
// GetFileFor returns the file that keeps the on-disk state for the given key.
func (c *Cache) GetFileFor(k CacheKey) string {
c.mu.Lock()
defer c.mu.Unlock()
state, found := c.resources[k]
if !found {
return ""
}
return state.file
}
func (c *Cache) writeFile(file string, contents string) error {
return os.WriteFile(file, []byte(contents), os.ModePerm)
}
func (c *Cache) deleteFile(file string) error {
return os.Remove(file)
}
func (c *Cache) generateFileName(key CacheKey) string {
c.discriminator++
d := c.discriminator
name := fmt.Sprintf("%s_%s_%s_%s-%d.yaml",
sanitize(key.group), sanitize(key.kind), sanitize(key.namespace), sanitize(key.name), d)
return path.Join(c.dir, name)
}
func sanitize(c string) string {
return strings.Replace(
strings.Replace(c, "/", "", -1),
".", "_", -1)
}
func toKey(d Descriptor) CacheKey {
return CacheKey{
group: d.Group,
kind: d.Kind,
namespace: d.Metadata.Namespace,
name: d.Metadata.Name,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yml
import (
"fmt"
"os"
"strings"
"istio.io/istio/pkg/test"
)
type docType string
const (
namespacesAndCRDs docType = "namespaces_and_crds"
misc docType = "misc"
)
// FileWriter write YAML content to files.
type FileWriter interface {
// WriteYAML writes the given YAML content to one or more YAML files.
WriteYAML(filenamePrefix string, contents ...string) ([]string, error)
// WriteYAMLOrFail calls WriteYAML and fails the test if an error occurs.
WriteYAMLOrFail(t test.Failer, filenamePrefix string, contents ...string) []string
}
type writerImpl struct {
workDir string
}
// NewFileWriter creates a new FileWriter that stores files under workDir.
func NewFileWriter(workDir string) FileWriter {
return &writerImpl{
workDir: workDir,
}
}
// WriteYAML writes the given YAML content to one or more YAML files.
func (w *writerImpl) WriteYAML(filenamePrefix string, contents ...string) ([]string, error) {
out := make([]string, 0, len(contents))
content := JoinString(contents...)
files, err := splitContentsToFiles(w.workDir, content, filenamePrefix)
if err != nil {
return nil, err
}
if len(files) == 0 {
f, err := writeContentsToTempFile(w.workDir, content)
if err != nil {
return nil, err
}
files = append(files, f)
}
out = append(out, files...)
return out, nil
}
// WriteYAMLOrFial calls WriteYAML and fails the test if an error occurs.
func (w *writerImpl) WriteYAMLOrFail(t test.Failer, filenamePrefix string, contents ...string) []string {
t.Helper()
out, err := w.WriteYAML(filenamePrefix, contents...)
if err != nil {
t.Fatal(err)
}
return out
}
func writeContentsToTempFile(workDir, contents string) (filename string, err error) {
defer func() {
if err != nil && filename != "" {
_ = os.Remove(filename)
filename = ""
}
}()
var f *os.File
f, err = os.CreateTemp(workDir, yamlToFilename(contents)+".*.yaml")
if err != nil {
return
}
defer f.Close()
filename = f.Name()
_, err = f.WriteString(contents)
return
}
func yamlToFilename(contents string) string {
spl := SplitYamlByKind(contents)
delete(spl, "")
types := []string{}
for k := range spl {
types = append(types, k)
}
switch len(types) {
case 0:
return "empty"
case 1:
m := GetMetadata(contents)
if len(m) == 0 {
return fmt.Sprintf("%s.%s", types[0], m[0].Name)
}
return types[0]
case 2, 3, 4:
return strings.Join(types, "-")
default:
return strings.Join(types[:4], "-") + "-more"
}
}
func splitContentsToFiles(workDir, content, filenamePrefix string) ([]string, error) {
split := SplitYamlByKind(content)
namespacesAndCrds := &yamlDoc{
docType: namespacesAndCRDs,
content: split["Namespace"],
}
misc := &yamlDoc{
docType: misc,
content: split["CustomResourceDefinition"],
}
// If all elements were put into a single doc just return an empty list, indicating that the original
// content should be used.
docs := []*yamlDoc{namespacesAndCrds, misc}
for _, doc := range docs {
if len(doc.content) == 0 {
return make([]string, 0), nil
}
}
filesToApply := make([]string, 0, len(docs))
for _, doc := range docs {
tfile, err := doc.toTempFile(workDir, filenamePrefix)
if err != nil {
return nil, err
}
filesToApply = append(filesToApply, tfile)
}
return filesToApply, nil
}
type yamlDoc struct {
content string
docType docType
}
func (d *yamlDoc) toTempFile(workDir, fileNamePrefix string) (string, error) {
f, err := os.CreateTemp(workDir, fmt.Sprintf("%s_%s.yaml", fileNamePrefix, d.docType))
if err != nil {
return "", err
}
defer func() { _ = f.Close() }()
name := f.Name()
_, err = f.WriteString(d.content)
if err != nil {
return "", err
}
return name, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yml
import (
"encoding/json"
"fmt"
"strings"
"sigs.k8s.io/yaml"
)
// Metadata metadata for a kubernetes resource.
type Metadata struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
}
// Descriptor a descriptor for a kubernetes resource.
type Descriptor struct {
Kind string `json:"kind"`
Group string `json:"group"`
APIVersion string `json:"apiVersion"`
Metadata Metadata `json:"metadata"`
}
// Part is a single-part yaml source, along with its descriptor.
type Part struct {
Contents string
Descriptor Descriptor
}
// Parse parses the given multi-part yaml text, and returns as Parts.
func Parse(yamlText string) ([]Part, error) {
splitContent := SplitString(yamlText)
parts := make([]Part, 0, len(splitContent))
for _, part := range splitContent {
if len(part) > 0 {
descriptor, err := ParseDescriptor(part)
if err != nil {
return nil, err
}
parts = append(parts, Part{
Contents: part,
Descriptor: descriptor,
})
}
}
return parts, nil
}
// ParseDescriptor parses the given single-part yaml and generates the descriptor.
func ParseDescriptor(yamlText string) (Descriptor, error) {
d := Descriptor{}
jsonText, err := yaml.YAMLToJSON([]byte(yamlText))
if err != nil {
return Descriptor{}, fmt.Errorf("failed converting YAML to JSON: %v", err)
}
if err := json.Unmarshal(jsonText, &d); err != nil {
return Descriptor{}, fmt.Errorf("failed parsing descriptor: %v", err)
}
parts := strings.Split(d.APIVersion, "/")
switch len(parts) {
case 1:
d.APIVersion = parts[0]
case 2:
d.Group = parts[0]
d.APIVersion = parts[1]
default:
return Descriptor{}, fmt.Errorf("unexpected apiGroup: %q", d.APIVersion)
}
return d, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package yml
import (
"regexp"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/yaml"
)
const (
joinSeparator = "\n---\n"
)
// Split where the '---' appears at the very beginning of a line. This will avoid
// accidentally splitting in cases where yaml resources contain nested yaml (which
// is indented).
var splitRegex = regexp.MustCompile(`(^|\n)---`)
// SplitYamlByKind splits the given YAML into parts indexed by kind.
func SplitYamlByKind(content string) map[string]string {
cfgs := SplitString(content)
result := map[string]string{}
for _, cfg := range cfgs {
var typeMeta metav1.TypeMeta
if e := yaml.Unmarshal([]byte(cfg), &typeMeta); e != nil {
// Ignore invalid parts. This most commonly happens when it's empty or contains only comments.
continue
}
result[typeMeta.Kind] = JoinString(result[typeMeta.Kind], cfg)
}
return result
}
// SplitYamlByKind splits the given YAML into parts indexed by kind.
func GetMetadata(content string) []metav1.ObjectMeta {
cfgs := SplitString(content)
result := []metav1.ObjectMeta{}
for _, cfg := range cfgs {
var m metav1.ObjectMeta
if e := yaml.Unmarshal([]byte(cfg), &m); e != nil {
// Ignore invalid parts. This most commonly happens when it's empty or contains only comments.
continue
}
result = append(result, m)
}
return result
}
// SplitString splits the given yaml doc if it's multipart document.
func SplitString(yamlText string) []string {
out := make([]string, 0)
parts := splitRegex.Split(yamlText, -1)
for _, part := range parts {
part := strings.TrimSpace(part)
if len(part) > 0 {
out = append(out, part)
}
}
return out
}
// JoinString joins the given yaml parts into a single multipart document.
func JoinString(parts ...string) string {
// Assume that each part is already a multi-document. Split and trim each part,
// if necessary.
toJoin := make([]string, 0, len(parts))
for _, part := range parts {
toJoin = append(toJoin, SplitString(part)...)
}
return strings.Join(toJoin, joinSeparator)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package concurrent
import (
"time"
"istio.io/istio/pkg/util/sets"
)
type Debouncer[T comparable] struct{}
func (d *Debouncer[T]) Run(ch chan T, stopCh <-chan struct{}, debounceMinInterval, debounceMaxInterval time.Duration, pushFn func(sets.Set[T])) {
var timeChan <-chan time.Time
var startDebounce time.Time
var lastConfigUpdateTime time.Time
pushCounter := 0
debouncedEvents := 0
// Keeps track of the push requests. If updates are debounce they will be merged.
combinedEvents := sets.New[T]()
free := true
freeCh := make(chan struct{}, 1)
push := func(events sets.Set[T], debouncedEvents int, startDebounce time.Time) {
pushFn(events)
freeCh <- struct{}{}
}
pushWorker := func() {
eventDelay := time.Since(startDebounce)
quietTime := time.Since(lastConfigUpdateTime)
// it has been too long or quiet enough
if eventDelay >= debounceMaxInterval || quietTime >= debounceMinInterval {
if combinedEvents != nil {
pushCounter++
free = false
go push(combinedEvents, debouncedEvents, startDebounce)
combinedEvents = sets.New[T]()
debouncedEvents = 0
}
} else {
timeChan = time.After(debounceMinInterval - quietTime)
}
}
for {
select {
case <-freeCh:
free = true
pushWorker()
case r := <-ch:
lastConfigUpdateTime = time.Now()
if debouncedEvents == 0 {
timeChan = time.After(debounceMinInterval)
startDebounce = lastConfigUpdateTime
}
debouncedEvents++
combinedEvents = combinedEvents.Insert(r)
case <-timeChan:
if free {
pushWorker()
}
case <-stopCh:
return
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gogoprotomarshal
import (
"strings"
"github.com/gogo/protobuf/jsonpb" // nolint: depguard
"github.com/gogo/protobuf/proto" // nolint: depguard
"istio.io/istio/pkg/log"
)
// ApplyJSON unmarshals a JSON string into a proto message. Unknown fields are allowed
func ApplyJSON(js string, pb proto.Message) error {
reader := strings.NewReader(js)
m := jsonpb.Unmarshaler{}
if err := m.Unmarshal(reader, pb); err != nil {
log.Debugf("Failed to decode proto: %q. Trying decode with AllowUnknownFields=true", err)
m.AllowUnknownFields = true
reader.Reset(js)
return m.Unmarshal(reader, pb)
}
return nil
}
// ApplyJSONStrict unmarshals a JSON string into a proto message.
func ApplyJSONStrict(js string, pb proto.Message) error {
reader := strings.NewReader(js)
m := jsonpb.Unmarshaler{}
return m.Unmarshal(reader, pb)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hash
import (
"encoding/hex"
"github.com/cespare/xxhash/v2"
)
type Hash interface {
Write(p []byte) (n int)
WriteString(s string) (n int)
Sum() string
Sum64() uint64
}
type instance struct {
hash *xxhash.Digest
}
var _ Hash = &instance{}
func New() Hash {
return &instance{
hash: xxhash.New(),
}
}
// Write wraps the Hash.Write function call
// Hash.Write error always return nil, this func simplify caller handle error
func (i *instance) Write(p []byte) (n int) {
n, _ = i.hash.Write(p)
return
}
// Write wraps the Hash.Write function call
// Hash.Write error always return nil, this func simplify caller handle error
func (i *instance) WriteString(s string) (n int) {
n, _ = i.hash.WriteString(s)
return
}
func (i *instance) Sum64() uint64 {
return i.hash.Sum64()
}
func (i *instance) Sum() string {
sum := i.hash.Sum(nil)
return hex.EncodeToString(sum)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package identifier
const Undefined = ""
func IsSameOrEmpty(a, b string) bool {
return a == Undefined || b == Undefined || a == b
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package istiomultierror
import (
"fmt"
"strings"
"github.com/hashicorp/go-multierror"
)
// MultiErrorFormat provides a format for multierrors. This matches the default format, but if there
// is only one error we will not expand to multiple lines.
func MultiErrorFormat() multierror.ErrorFormatFunc {
return func(es []error) string {
if len(es) == 1 {
return es[0].Error()
}
points := make([]string, len(es))
for i, err := range es {
points[i] = fmt.Sprintf("* %s", err)
}
return fmt.Sprintf(
"%d errors occurred:\n\t%s\n\n",
len(es), strings.Join(points, "\n\t"))
}
}
func New() *multierror.Error {
return &multierror.Error{
ErrorFormat: MultiErrorFormat(),
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package net
import (
"net/netip"
"istio.io/istio/pkg/log"
)
// IsValidIPAddress Tell whether the given IP address is valid or not
func IsValidIPAddress(ip string) bool {
ipa, err := netip.ParseAddr(ip)
if err != nil {
return false
}
return ipa.IsValid()
}
// IsIPv6Address returns if ip is IPv6.
func IsIPv6Address(ip string) bool {
ipa, err := netip.ParseAddr(ip)
if err != nil {
return false
}
return ipa.Is6()
}
// IsIPv4Address returns if ip is IPv4.
func IsIPv4Address(ip string) bool {
ipa, err := netip.ParseAddr(ip)
if err != nil {
return false
}
return ipa.Is4()
}
// IPsSplitV4V6 returns two slice of ipv4 and ipv6 string slice.
func IPsSplitV4V6(ips []string) (ipv4 []string, ipv6 []string) {
for _, i := range ips {
ip, err := netip.ParseAddr(i)
if err != nil {
log.Debugf("ignoring un-parsable IP address: %v", err)
continue
}
if ip.Is4() {
ipv4 = append(ipv4, ip.String())
} else if ip.Is6() {
ipv6 = append(ipv6, ip.String())
} else {
log.Debugf("ignoring un-parsable IP address: %v", ip)
}
}
return
}
// ParseIPsSplitToV4V6 returns two slice of ipv4 and ipv6 netip.Addr.
func ParseIPsSplitToV4V6(ips []string) (ipv4 []netip.Addr, ipv6 []netip.Addr) {
for _, i := range ips {
ip, err := netip.ParseAddr(i)
if err != nil {
log.Debugf("ignoring un-parsable IP address: %v", err)
continue
}
if ip.Is4() {
ipv4 = append(ipv4, ip)
} else if ip.Is6() {
ipv6 = append(ipv6, ip)
} else {
log.Debugf("ignoring un-parsable IP address: %v", ip)
}
}
return
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package protomarshal provides operations to marshal and unmarshal protobuf objects.
// Unlike the rest of this repo, which uses the new google.golang.org/protobuf API, this package
// explicitly uses the legacy jsonpb package. This is due to a number of compatibility concerns with the new API:
// * https://github.com/golang/protobuf/issues/1374
// * https://github.com/golang/protobuf/issues/1373
package protomarshal
import (
"bytes"
"encoding/json"
"errors"
"strings"
"github.com/golang/protobuf/jsonpb" // nolint: depguard
customBytes "github.com/AdamKorcz/bugdetectors/bytes"
legacyproto "github.com/golang/protobuf/proto" // nolint: staticcheck
"google.golang.org/protobuf/encoding/protojson" // nolint: depguard
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"sigs.k8s.io/yaml"
"istio.io/istio/pkg/log"
)
var (
unmarshaler = jsonpb.Unmarshaler{AllowUnknownFields: true}
strictUnmarshaler = jsonpb.Unmarshaler{}
)
func Unmarshal(b []byte, m proto.Message) error {
return strictUnmarshaler.Unmarshal(bytes.NewReader(b), legacyproto.MessageV1(m))
}
func UnmarshalString(s string, m proto.Message) error {
return Unmarshal([]byte(s), m)
}
func UnmarshalAllowUnknown(b []byte, m proto.Message) error {
return unmarshaler.Unmarshal(bytes.NewReader(b), legacyproto.MessageV1(m))
}
func UnmarshalAllowUnknownWithAnyResolver(anyResolver jsonpb.AnyResolver, b []byte, m proto.Message) error {
return (&jsonpb.Unmarshaler{
AllowUnknownFields: true,
AnyResolver: anyResolver,
}).Unmarshal(bytes.NewReader(b), legacyproto.MessageV1(m))
}
func UnmarshalWithGlobalTypesResolver(b []byte, m proto.Message) error {
return protojson.Unmarshal(b, m)
}
// ToJSON marshals a proto to canonical JSON
func ToJSON(msg proto.Message) (string, error) {
return ToJSONWithIndent(msg, "")
}
// Marshal marshals a proto to canonical JSON
func Marshal(msg proto.Message) ([]byte, error) {
res, err := ToJSONWithIndent(msg, "")
if err != nil {
return nil, err
}
return []byte(res), err
}
// MarshalIndent marshals a proto to canonical JSON with indentation
func MarshalIndent(msg proto.Message, indent string) ([]byte, error) {
res, err := ToJSONWithIndent(msg, indent)
if err != nil {
return nil, err
}
return []byte(res), err
}
// MarshalIndentWithGlobalTypesResolver marshals a proto to canonical JSON with indentation
// and multiline while using generic types resolver
func MarshalIndentWithGlobalTypesResolver(msg proto.Message, indent string) ([]byte, error) {
return protojson.MarshalOptions{
Multiline: true,
Indent: indent,
}.Marshal(msg)
}
// MarshalProtoNames marshals a proto to canonical JSON original protobuf names
func MarshalProtoNames(msg proto.Message) ([]byte, error) {
if msg == nil {
return nil, errors.New("unexpected nil message")
}
// Marshal from proto to json bytes
m := jsonpb.Marshaler{OrigName: true}
buf := &bytes.Buffer{}
err := m.Marshal(buf, legacyproto.MessageV1(msg))
if err != nil {
return nil, err
}
return customBytes.CheckLen(buf.Bytes(),
// ToJSONWithIndent marshals a proto to canonical JSON with pretty printed string
"/src/istio/pkg/util/protomarshal/protomarshal.go:111:9 (May be slightly inaccurate) NEW_LINEbuf.Bytes()"), nil
}
func ToJSONWithIndent(msg proto.Message, indent string) (string, error) {
return ToJSONWithOptions(msg, indent, false)
}
// ToJSONWithOptions marshals a proto to canonical JSON with options to indent and
// print enums' int values
func ToJSONWithOptions(msg proto.Message, indent string, enumsAsInts bool) (string, error) {
if msg == nil {
return "", errors.New("unexpected nil message")
}
// Marshal from proto to json bytes
m := jsonpb.Marshaler{Indent: indent, EnumsAsInts: enumsAsInts}
return m.MarshalToString(legacyproto.MessageV1(msg))
}
// ToYAML marshals a proto to canonical YAML
func ToYAML(msg proto.Message) (string, error) {
js, err := ToJSON(msg)
if err != nil {
return "", err
}
yml, err := yaml.JSONToYAML([]byte(js))
return string(yml), err
}
// ToJSONMap converts a proto message to a generic map using canonical JSON encoding
// JSON encoding is specified here: https://developers.google.com/protocol-buffers/docs/proto3#json
func ToJSONMap(msg proto.Message) (map[string]any, error) {
js, err := ToJSON(msg)
if err != nil {
return nil, err
}
// Unmarshal from json bytes to go map
var data map[string]any
err = json.Unmarshal([]byte(js), &data)
if err != nil {
return nil, err
}
return data, nil
}
// ApplyJSON unmarshals a JSON string into a proto message.
func ApplyJSON(js string, pb proto.Message) error {
reader := strings.NewReader(js)
m := jsonpb.Unmarshaler{}
if err := m.Unmarshal(reader, legacyproto.MessageV1(pb)); err != nil {
log.Debugf("Failed to decode proto: %q. Trying decode with AllowUnknownFields=true", err)
m.AllowUnknownFields = true
reader.Reset(js)
return m.Unmarshal(reader, legacyproto.MessageV1(pb))
}
return nil
}
// ApplyJSONStrict unmarshals a JSON string into a proto message.
func ApplyJSONStrict(js string, pb proto.Message) error {
reader := strings.NewReader(js)
m := jsonpb.Unmarshaler{}
return m.Unmarshal(reader, legacyproto.MessageV1(pb))
}
// ApplyYAML unmarshals a YAML string into a proto message.
// Unknown fields are allowed.
func ApplyYAML(yml string, pb proto.Message) error {
js, err := yaml.YAMLToJSON([]byte(yml))
if err != nil {
return err
}
return ApplyJSON(string(js), pb)
}
// ApplyYAMLStrict unmarshals a YAML string into a proto message.
// Unknown fields are not allowed.
func ApplyYAMLStrict(yml string, pb proto.Message) error {
js, err := yaml.YAMLToJSON([]byte(yml))
if err != nil {
return err
}
return ApplyJSONStrict(string(js), pb)
}
func ShallowCopy(dst, src proto.Message) {
dm := dst.ProtoReflect()
sm := src.ProtoReflect()
if dm.Type() != sm.Type() {
panic("mismatching type")
}
proto.Reset(dst)
sm.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
dm.Set(fd, v)
return true
})
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sets
import (
"fmt"
"golang.org/x/exp/constraints"
"istio.io/istio/pkg/slices"
)
type Set[T comparable] map[T]struct{}
type String = Set[string]
// NewWithLength returns an empty Set with the given capacity.
// It's only a hint, not a limitation.
func NewWithLength[T comparable](l int) Set[T] {
return make(Set[T], l)
}
// New creates a new Set with the given items.
func New[T comparable](items ...T) Set[T] {
s := NewWithLength[T](len(items))
return s.InsertAll(items...)
}
// Insert a single item to this Set.
func (s Set[T]) Insert(item T) Set[T] {
s[item] = struct{}{}
return s
}
// InsertAll adds the items to this Set.
func (s Set[T]) InsertAll(items ...T) Set[T] {
for _, item := range items {
s[item] = struct{}{}
}
return s
}
// Delete removes an item from the set.
func (s Set[T]) Delete(item T) Set[T] {
delete(s, item)
return s
}
// DeleteAll removes items from the set.
func (s Set[T]) DeleteAll(items ...T) Set[T] {
for _, item := range items {
delete(s, item)
}
return s
}
// Merge a set of objects that are in s2 into s
// For example:
// s = {a1, a2, a3}
// s2 = {a3, a4, a5}
// s.Merge(s2) = {a1, a2, a3, a4, a5}
func (s Set[T]) Merge(s2 Set[T]) Set[T] {
for item := range s2 {
s[item] = struct{}{}
}
return s
}
// Copy this set.
func (s Set[T]) Copy() Set[T] {
result := New[T]()
for key := range s {
result.Insert(key)
}
return result
}
// Union returns a set of objects that are in s or s2
// For example:
// s = {a1, a2, a3}
// s2 = {a1, a2, a4, a5}
// s.Union(s2) = s2.Union(s) = {a1, a2, a3, a4, a5}
func (s Set[T]) Union(s2 Set[T]) Set[T] {
result := s.Copy()
for key := range s2 {
result.Insert(key)
}
return result
}
// Difference returns a set of objects that are not in s2
// For example:
// s = {a1, a2, a3}
// s2 = {a1, a2, a4, a5}
// s.Difference(s2) = {a3}
// s2.Difference(s) = {a4, a5}
func (s Set[T]) Difference(s2 Set[T]) Set[T] {
result := New[T]()
for key := range s {
if !s2.Contains(key) {
result.Insert(key)
}
}
return result
}
// Diff takes a pair of Sets, and returns the elements that occur only on the left and right set.
func (s Set[T]) Diff(other Set[T]) (left []T, right []T) {
for k := range s {
if _, f := other[k]; !f {
left = append(left, k)
}
}
for k := range other {
if _, f := s[k]; !f {
right = append(right, k)
}
}
return
}
// Intersection returns a set of objects that are common between s and s2
// For example:
// s = {a1, a2, a3}
// s2 = {a1, a2, a4, a5}
// s.Intersection(s2) = {a1, a2}
func (s Set[T]) Intersection(s2 Set[T]) Set[T] {
result := New[T]()
for key := range s {
if s2.Contains(key) {
result.Insert(key)
}
}
return result
}
// SupersetOf returns true if s contains all elements of s2
// For example:
// s = {a1, a2, a3}
// s2 = {a1, a2, a3, a4, a5}
// s.SupersetOf(s2) = false
// s2.SupersetOf(s) = true
func (s Set[T]) SupersetOf(s2 Set[T]) bool {
if s2 == nil {
return true
}
if len(s2) > len(s) {
return false
}
for key := range s2 {
if !s.Contains(key) {
return false
}
}
return true
}
// UnsortedList returns the slice with contents in random order.
func (s Set[T]) UnsortedList() []T {
res := make([]T, 0, s.Len())
for key := range s {
res = append(res, key)
}
return res
}
// SortedList returns the slice with contents sorted.
func SortedList[T constraints.Ordered](s Set[T]) []T {
res := s.UnsortedList()
slices.Sort(res)
return res
}
// InsertContains inserts the item into the set and returns if it was already present.
// Example:
//
// if !set.InsertContains(item) {
// fmt.Println("Added item for the first time", item)
// }
func (s Set[T]) InsertContains(item T) bool {
if s.Contains(item) {
return true
}
s[item] = struct{}{}
return false
}
// Contains returns whether the given item is in the set.
func (s Set[T]) Contains(item T) bool {
_, ok := s[item]
return ok
}
// ContainsAll is alias of SupersetOf
// returns true if s contains all elements of s2
func (s Set[T]) ContainsAll(s2 Set[T]) bool {
return s.SupersetOf(s2)
}
// Equals checks whether the given set is equal to the current set.
func (s Set[T]) Equals(other Set[T]) bool {
if s.Len() != other.Len() {
return false
}
for key := range s {
if !other.Contains(key) {
return false
}
}
return true
}
// Len returns the number of elements in this Set.
func (s Set[T]) Len() int {
return len(s)
}
// IsEmpty indicates whether the set is the empty set.
func (s Set[T]) IsEmpty() bool {
return len(s) == 0
}
// String returns a string representation of the set.
// Be aware that the order of elements is random so the string representation may vary.
// Use it only for debugging and logging.
func (s Set[T]) String() string {
return fmt.Sprintf("%v", s.UnsortedList())
}
// InsertOrNew inserts t into the set if the set exists, or returns a new set with t if not.
// Works well with DeleteCleanupLast.
// Example:
//
// InsertOrNew(m, key, value)
func InsertOrNew[K comparable, T comparable](m map[K]Set[T], k K, v T) {
s, f := m[k]
if !f {
m[k] = New(v)
} else {
s.Insert(v)
}
}
// DeleteCleanupLast removes an element from a set in a map of sets, deleting the key from the map if there are no keys left.
// Works well with InsertOrNew.
// Example:
//
// sets.DeleteCleanupLast(m, key, value)
func DeleteCleanupLast[K comparable, T comparable](m map[K]Set[T], k K, v T) {
if m[k].Delete(v).IsEmpty() {
delete(m, k)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package strcase
import (
"bytes"
"strings"
)
// CamelCase converts the string into camel case string
func CamelCase(s string) string {
if s == "" {
return ""
}
t := make([]byte, 0, 32)
i := 0
if isWordSeparator(s[0]) {
// Need a capital letter; drop the '_'.
t = append(t, 'X')
i++
}
// Invariant: if the next letter is lower case, it must be converted
// to upper case.
// That is, we process a word at a time, where words are marked by _, - or
// upper case letter. Digits are treated as words.
for ; i < len(s); i++ {
c := s[i]
if isWordSeparator(c) {
// Skip the separate and capitalize the next letter.
continue
}
if isASCIIDigit(c) {
t = append(t, c)
continue
}
// Assume we have a letter now - if not, it's a bogus identifier.
// The next word is a sequence of characters that must start upper case.
if isASCIILower(c) {
c ^= ' ' // Make it a capital letter.
}
t = append(t, c) // Guaranteed not lower case.
// Accept lower case sequence that follows.
for i+1 < len(s) && isASCIILower(s[i+1]) {
i++
t = append(t, s[i])
}
}
return string(t)
}
// CamelCaseWithSeparator splits the given string by the separator, converts the parts to CamelCase and then re-joins them.
func CamelCaseWithSeparator(n string, sep string) string {
p := strings.Split(n, sep)
for i := 0; i < len(p); i++ {
p[i] = CamelCase(p[i])
}
return strings.Join(p, "")
}
// CamelCaseToKebabCase converts "MyName" to "my-name"
func CamelCaseToKebabCase(s string) string {
switch s {
case "HTTPAPISpec":
return "http-api-spec"
case "HTTPRoute":
return "http-route"
case "HTTPAPISpecBinding":
return "http-api-spec-binding"
default:
var out bytes.Buffer
for i := range s {
if 'A' <= s[i] && s[i] <= 'Z' {
if i > 0 {
out.WriteByte('-')
}
out.WriteByte(s[i] - 'A' + 'a')
} else {
out.WriteByte(s[i])
}
}
return out.String()
}
}
func isWordSeparator(c byte) bool {
return c == '_' || c == '-'
}
// Is c an ASCII lower-case letter?
func isASCIILower(c byte) bool {
return 'a' <= c && c <= 'z'
}
// Is c an ASCII digit?
func isASCIIDigit(c byte) bool {
return '0' <= c && c <= '9'
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package version
import (
"encoding/json"
"errors"
"fmt"
"sort"
"strings"
"github.com/spf13/cobra"
"sigs.k8s.io/yaml"
)
// Version holds info for client and control plane versions
type Version struct {
ClientVersion *BuildInfo `json:"clientVersion,omitempty" yaml:"clientVersion,omitempty"`
MeshVersion *MeshInfo `json:"meshVersion,omitempty" yaml:"meshVersion,omitempty"`
DataPlaneVersion *[]ProxyInfo `json:"dataPlaneVersion,omitempty" yaml:"dataPlaneVersion,omitempty"`
}
// GetRemoteVersionFunc is the function prototype to be passed to CobraOptions so that it is
// called when invoking `cmd version`
type (
GetRemoteVersionFunc func() (*MeshInfo, error)
GetProxyVersionFunc func() (*[]ProxyInfo, error)
)
// CobraOptions holds options to be passed to `CobraCommandWithOptions`
type CobraOptions struct {
// GetRemoteVersion is the function to be invoked to retrieve remote versions for
// Istio components. Optional. If not set, the 'version' subcommand will not attempt
// to connect to a remote side, and CLI flags such as '--remote' will be hidden.
GetRemoteVersion GetRemoteVersionFunc
GetProxyVersions GetProxyVersionFunc
}
// CobraCommand returns a command used to print version information.
func CobraCommand() *cobra.Command {
return CobraCommandWithOptions(CobraOptions{})
}
// CobraCommandWithOptions returns a command used to print version information.
// It accepts an CobraOptions argument that might modify its behavior
func CobraCommandWithOptions(options CobraOptions) *cobra.Command {
var (
short bool
output string
remote bool
version Version
remoteVersion *MeshInfo
serverErr error
)
cmd := &cobra.Command{
Use: "version",
Short: "Prints out build version information",
RunE: func(cmd *cobra.Command, args []string) error {
if output != "" && output != "yaml" && output != "json" {
return errors.New(`--output must be 'yaml' or 'json'`)
}
version.ClientVersion = &Info
if options.GetRemoteVersion != nil && remote {
remoteVersion, serverErr = options.GetRemoteVersion()
if serverErr != nil {
return serverErr
}
version.MeshVersion = remoteVersion
}
if options.GetProxyVersions != nil && remote {
version.DataPlaneVersion, _ = options.GetProxyVersions()
}
switch output {
case "":
if short {
if remoteVersion != nil {
remoteVersion = coalesceVersions(remoteVersion)
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "client version: %s\n", version.ClientVersion.Version)
for _, remote := range *remoteVersion {
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "%s version: %s\n", remote.Component, remote.Info.Version)
}
} else {
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "%s\n", version.ClientVersion.Version)
}
if version.DataPlaneVersion != nil {
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "data plane version: %s\n", renderProxyVersions(version.DataPlaneVersion))
}
} else {
if remoteVersion != nil {
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "client version: %s\n", version.ClientVersion.LongForm())
for _, remote := range *remoteVersion {
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "%s version: %s\n", remote.Component, remote.Info.LongForm())
}
} else {
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "%s\n", version.ClientVersion.LongForm())
}
if version.DataPlaneVersion != nil {
for _, proxy := range *version.DataPlaneVersion {
_, _ = fmt.Fprintf(cmd.OutOrStdout(), "data plane version: %#v\n", proxy)
}
}
}
case "yaml":
if marshaled, err := yaml.Marshal(&version); err == nil {
_, _ = fmt.Fprintln(cmd.OutOrStdout(), string(marshaled))
}
case "json":
if marshaled, err := json.MarshalIndent(&version, "", " "); err == nil {
_, _ = fmt.Fprintln(cmd.OutOrStdout(), string(marshaled))
}
}
return nil
},
}
cmd.Flags().BoolVarP(&short, "short", "s", false, "Use --short=false to generate full version information")
cmd.Flags().StringVarP(&output, "output", "o", "", "One of 'yaml' or 'json'.")
if options.GetRemoteVersion != nil {
cmd.Flags().BoolVar(&remote, "remote", false, "Use --remote=false to suppress control plane check")
}
return cmd
}
func coalesceVersions(remoteVersion *MeshInfo) *MeshInfo {
if identicalVersions(*remoteVersion) {
return &MeshInfo{
ServerInfo{
Component: "control plane",
Info: (*remoteVersion)[0].Info,
},
}
}
return remoteVersion
}
func identicalVersions(remoteVersion MeshInfo) bool {
exemplar := remoteVersion[0].Info
for i := 1; i < len(remoteVersion); i++ {
candidate := (remoteVersion)[i].Info
// Note that we don't compare GitTag, GitRevision, BuildStatus,
// or DockerHub because released Istio versions may use the same version tag
// but differ in those fields.
if exemplar.Version != candidate.Version {
return false
}
}
return true
}
// renderProxyVersions produces human-readable summary of an array of sidecar Istio versions
func renderProxyVersions(pinfos *[]ProxyInfo) string {
if len(*pinfos) == 0 {
return "none"
}
versions := make(map[string][]string)
for _, pinfo := range *pinfos {
ids := versions[pinfo.IstioVersion]
versions[pinfo.IstioVersion] = append(ids, pinfo.ID)
}
sortedVersions := make([]string, 0)
for v := range versions {
sortedVersions = append(sortedVersions, v)
}
sort.Strings(sortedVersions)
counts := []string{}
for _, ver := range sortedVersions {
counts = append(counts, fmt.Sprintf("%s (%d proxies)", ver, len(versions[ver])))
}
return strings.Join(counts, ", ")
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package version
import "istio.io/istio/pkg/monitoring"
var (
gitTagKey = monitoring.CreateLabel("tag")
componentTagKey = monitoring.CreateLabel("component")
istioBuildTag = monitoring.NewGauge(
"istio_build",
"Istio component build info",
)
)
// RecordComponentBuildTag sets the value for a metric that will be used to track component build tags for
// tracking rollouts, etc.
func (b BuildInfo) RecordComponentBuildTag(component string) {
istioBuildTag.With(gitTagKey.Value(b.GitTag), componentTagKey.Value(component)).Increment()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package version provides build version information.
package version
import (
"fmt"
"runtime"
"strings"
)
// The following fields are populated at build time using -ldflags -X.
// Note that DATE is omitted for reproducible builds
var (
buildVersion = "unknown"
buildGitRevision = "unknown"
buildStatus = "unknown"
buildTag = "unknown"
buildHub = "unknown"
buildOS = "unknown"
buildArch = "unknown"
)
// BuildInfo describes version information about the binary build.
type BuildInfo struct {
Version string `json:"version"`
GitRevision string `json:"revision"`
GolangVersion string `json:"golang_version"`
BuildStatus string `json:"status"`
GitTag string `json:"tag"`
}
// ServerInfo contains the version for a single control plane component
type ServerInfo struct {
Component string
Revision string
Info BuildInfo
}
// MeshInfo contains the versions for all Istio control plane components
type MeshInfo []ServerInfo
// NodeType decides the responsibility of the proxy serves in the mesh
type NodeType string
func ToUserFacingNodeType(t string) NodeType {
switch t {
case "router":
return "gateway"
default:
return NodeType(t)
}
}
// ProxyInfo contains the version for a single data plane component
type ProxyInfo struct {
ID string
IstioVersion string
Type NodeType
}
// DockerBuildInfo contains and exposes Hub: buildHub, Tag: buildVersion, OS: buildOS, and Arch: buildArch
type DockerBuildInfo struct {
Hub string
Tag string
OS string
Arch string
}
// NewBuildInfoFromOldString creates a BuildInfo struct based on the output
// of previous Istio components '-- version' output
func NewBuildInfoFromOldString(oldOutput string) (BuildInfo, error) {
res := BuildInfo{}
lines := strings.Split(oldOutput, "\n")
for _, line := range lines {
if strings.TrimSpace(line) == "" {
continue
}
fields := strings.SplitN(line, ":", 2)
if fields != nil {
if len(fields) != 2 {
return BuildInfo{}, fmt.Errorf("invalid BuildInfo input, field '%s' is not valid", fields[0])
}
value := strings.TrimSpace(fields[1])
switch fields[0] {
case "Version":
res.Version = value
case "GitRevision":
res.GitRevision = value
case "GolangVersion":
res.GolangVersion = value
case "BuildStatus":
res.BuildStatus = value
case "GitTag":
res.GitTag = value
default:
// Skip unknown fields, as older versions may report other fields
continue
}
}
}
return res, nil
}
var (
// Info exports the build version information.
Info BuildInfo
DockerInfo DockerBuildInfo
)
// String produces a single-line version info
//
// This looks like:
//
// ```
// u<version>-<git revision>-<build status>
// ```
func (b BuildInfo) String() string {
return fmt.Sprintf("%v-%v-%v",
b.Version,
b.GitRevision,
b.BuildStatus)
}
// LongForm returns a dump of the Info struct
// This looks like:
func (b BuildInfo) LongForm() string {
return fmt.Sprintf("%#v", b)
}
func init() {
Info = BuildInfo{
Version: buildVersion,
GitRevision: buildGitRevision,
GolangVersion: runtime.Version(),
BuildStatus: buildStatus,
GitTag: buildTag,
}
DockerInfo = DockerBuildInfo{
Hub: buildHub,
Tag: buildVersion,
OS: buildOS,
Arch: buildArch,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wasm
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
"fmt"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/google/go-containerregistry/pkg/name"
extensions "istio.io/api/extensions/v1alpha1"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/util/sets"
)
var wasmLog = log.RegisterScope("wasm", "")
const (
// oci URL prefix
ociURLPrefix = "oci://"
// sha256 scheme prefix
sha256SchemePrefix = "sha256:"
)
// Cache models a Wasm module cache.
type Cache interface {
Get(url string, opts GetOptions) (string, error)
Cleanup()
}
// LocalFileCache for downloaded Wasm modules. Currently it stores the Wasm module as local file.
type LocalFileCache struct {
// Map from Wasm module checksum to cache entry.
modules map[moduleKey]*cacheEntry
// Map from tagged URL to checksum
checksums map[string]*checksumEntry
// http fetcher fetches Wasm module with HTTP get.
httpFetcher *HTTPFetcher
// directory path used to store Wasm module.
dir string
// mux is needed because stale Wasm module files will be purged periodically.
mux sync.Mutex
// option sets for configurating the cache.
cacheOptions
// stopChan currently is only used by test
stopChan chan struct{}
}
var _ Cache = &LocalFileCache{}
type checksumEntry struct {
checksum string
// Keeps the resource version per each resource for dealing with multiple resources which pointing the same image.
resourceVersionByResource map[string]string
}
type moduleKey struct {
// Identifier for the module. It should be neutral for the checksum.
// e.g.) oci://docker.io/test@sha256:0123456789 is not allowed.
// oci://docker.io/test:latest (tagged form) is allowed.
name string
checksum string
}
type cacheKey struct {
moduleKey
downloadURL string
// Resource name of WasmPlugin resource. This should be a fully-qualified name.
resourceName string
// Resource version of WasmPlugin resource. Even though PullPolicy is Always,
// if there is no change of resource state, a cached entry is used instead of pulling newly.
resourceVersion string
}
// cacheEntry contains information about a Wasm module cache entry.
type cacheEntry struct {
// File path to the downloaded wasm modules.
modulePath string
// Last time that this local Wasm module is referenced.
last time.Time
// set of URLs referencing this entry
referencingURLs sets.String
}
type cacheOptions struct {
Options
allowAllInsecureRegistries bool
}
func (o cacheOptions) sanitize() cacheOptions {
ret := cacheOptions{
Options: defaultOptions(),
}
if o.InsecureRegistries != nil {
ret.InsecureRegistries = o.InsecureRegistries
}
ret.allowAllInsecureRegistries = ret.InsecureRegistries.Contains("*")
if o.PurgeInterval != 0 {
ret.PurgeInterval = o.PurgeInterval
}
if o.ModuleExpiry != 0 {
ret.ModuleExpiry = o.ModuleExpiry
}
if o.HTTPRequestTimeout != 0 {
ret.HTTPRequestTimeout = o.HTTPRequestTimeout
}
if o.HTTPRequestMaxRetries != 0 {
ret.HTTPRequestMaxRetries = o.HTTPRequestMaxRetries
}
return ret
}
func (o cacheOptions) allowInsecure(host string) bool {
return o.allowAllInsecureRegistries || o.InsecureRegistries.Contains(host)
}
// NewLocalFileCache create a new Wasm module cache which downloads and stores Wasm module files locally.
func NewLocalFileCache(dir string, options Options) *LocalFileCache {
wasmLog.Debugf("LocalFileCache is created with the option\n%#v", options)
cacheOptions := cacheOptions{Options: options}
cache := &LocalFileCache{
httpFetcher: NewHTTPFetcher(options.HTTPRequestTimeout, options.HTTPRequestMaxRetries),
modules: make(map[moduleKey]*cacheEntry),
checksums: make(map[string]*checksumEntry),
dir: dir,
cacheOptions: cacheOptions.sanitize(),
stopChan: make(chan struct{}),
}
go func() {
cache.purge()
}()
return cache
}
func moduleNameFromURL(fullURLStr string) string {
if strings.HasPrefix(fullURLStr, ociURLPrefix) {
if tag, err := name.ParseReference(fullURLStr[len(ociURLPrefix):]); err == nil {
// remove tag or sha
return ociURLPrefix + tag.Context().Name()
}
}
return fullURLStr
}
func shouldIgnoreResourceVersion(pullPolicy extensions.PullPolicy, u *url.URL) bool {
switch pullPolicy {
case extensions.PullPolicy_Always:
// When Always, pull a wasm module when the resource version is changed.
return false
case extensions.PullPolicy_IfNotPresent:
// When IfNotPresent, use the cached one regardless of the resource version.
return true
default:
// Default is IfNotPresent except OCI images tagged with `latest`.
return u.Scheme != "oci" || !strings.HasSuffix(u.Path, ":latest")
}
}
func getModulePath(baseDir string, mkey moduleKey) (string, error) {
sha := sha256.Sum256([]byte(mkey.name))
hashedName := hex.EncodeToString(sha[:])
moduleDir := filepath.Join(baseDir, hashedName)
if err := os.Mkdir(moduleDir, 0o755); err != nil && !os.IsExist(err) {
return "", err
}
return filepath.Join(moduleDir, fmt.Sprintf("%s.wasm", mkey.checksum)), nil
}
// Get returns path the local Wasm module file.
func (c *LocalFileCache) Get(downloadURL string, opts GetOptions) (string, error) {
// Construct Wasm cache key with downloading URL and provided checksum of the module.
key := cacheKey{
downloadURL: downloadURL,
moduleKey: moduleKey{
name: moduleNameFromURL(downloadURL),
checksum: opts.Checksum,
},
resourceName: opts.ResourceName,
resourceVersion: opts.ResourceVersion,
}
entry, err := c.getOrFetch(key, opts)
if err != nil {
return "", err
}
return entry.modulePath, err
}
func (c *LocalFileCache) getOrFetch(key cacheKey, opts GetOptions) (*cacheEntry, error) {
u, err := url.Parse(key.downloadURL)
if err != nil {
return nil, fmt.Errorf("fail to parse Wasm module fetch url: %s, error: %v", key.downloadURL, err)
}
// First check if the cache entry is already downloaded and policy does not require to pull always.
ce, checksum := c.getEntry(key, shouldIgnoreResourceVersion(opts.PullPolicy, u))
if ce != nil {
return ce, nil
}
key.checksum = checksum
// Fetch the image now as it is not available in cache.
var b []byte // Byte array of Wasm binary.
var dChecksum string // Hex-Encoded sha256 checksum of binary.
var binaryFetcher func() ([]byte, error)
insecure := c.allowInsecure(u.Host)
ctx, cancel := context.WithTimeout(context.Background(), opts.RequestTimeout)
defer cancel()
switch u.Scheme {
case "http", "https":
// Download the Wasm module with http fetcher.
b, err = c.httpFetcher.Fetch(ctx, key.downloadURL, insecure)
if err != nil {
wasmRemoteFetchCount.With(resultTag.Value(downloadFailure)).Increment()
return nil, err
}
// Get sha256 checksum and check if it is the same as provided one.
sha := sha256.Sum256(b)
dChecksum = hex.EncodeToString(sha[:])
case "oci":
imgFetcherOps := ImageFetcherOption{
Insecure: insecure,
}
if opts.PullSecret != nil {
imgFetcherOps.PullSecret = opts.PullSecret
}
wasmLog.Debugf("fetching oci image from %s with options: %v", key.downloadURL, imgFetcherOps)
fetcher := NewImageFetcher(ctx, imgFetcherOps)
binaryFetcher, dChecksum, err = fetcher.PrepareFetch(u.Host + u.Path)
if err != nil {
wasmRemoteFetchCount.With(resultTag.Value(manifestFailure)).Increment()
return nil, fmt.Errorf("could not fetch Wasm OCI image: %v", err)
}
default:
return nil, fmt.Errorf("unsupported Wasm module downloading URL scheme: %v", u.Scheme)
}
if key.checksum == "" {
key.checksum = dChecksum
// check again if the cache is having the checksum.
if ce, _ := c.getEntry(key, true); ce != nil {
return ce, nil
}
} else if dChecksum != key.checksum {
wasmRemoteFetchCount.With(resultTag.Value(checksumMismatch)).Increment()
return nil, fmt.Errorf("module downloaded from %v has checksum %v, which does not match: %v", key.downloadURL, dChecksum, key.checksum)
}
if binaryFetcher != nil {
b, err = binaryFetcher()
if err != nil {
wasmRemoteFetchCount.With(resultTag.Value(downloadFailure)).Increment()
return nil, fmt.Errorf("could not fetch Wasm binary: %v", err)
}
}
if !isValidWasmBinary(b) {
wasmRemoteFetchCount.With(resultTag.Value(fetchFailure)).Increment()
return nil, fmt.Errorf("fetched Wasm binary from %s is invalid", key.downloadURL)
}
wasmRemoteFetchCount.With(resultTag.Value(fetchSuccess)).Increment()
key.checksum = dChecksum
return c.addEntry(key, b)
}
// Cleanup closes background Wasm module purge routine.
func (c *LocalFileCache) Cleanup() {
close(c.stopChan)
}
func (c *LocalFileCache) updateChecksum(key cacheKey) bool {
// If OCI URL having a tag or just http/https URL, we need to update checksum.
needChecksumUpdate := !strings.HasPrefix(key.downloadURL, ociURLPrefix) || !strings.Contains(key.downloadURL, "@")
if needChecksumUpdate {
ce := c.checksums[key.downloadURL]
if ce == nil {
ce = new(checksumEntry)
ce.resourceVersionByResource = make(map[string]string)
c.checksums[key.downloadURL] = ce
}
ce.checksum = key.checksum
ce.resourceVersionByResource[key.resourceName] = key.resourceVersion
}
return needChecksumUpdate
}
// addEntry adds a wasmModule to cache with cacheKey, writes the module to the local file system,
// and returns the created entry.
func (c *LocalFileCache) addEntry(key cacheKey, wasmModule []byte) (*cacheEntry, error) {
c.mux.Lock()
defer c.mux.Unlock()
needChecksumUpdate := c.updateChecksum(key)
// Check if the module has already been added. If so, avoid writing the file again.
if ce, ok := c.modules[key.moduleKey]; ok {
// Update last touched time.
ce.last = time.Now()
if needChecksumUpdate {
ce.referencingURLs.Insert(key.downloadURL)
}
return ce, nil
}
modulePath, err := getModulePath(c.dir, key.moduleKey)
if err != nil {
return nil, err
}
// Materialize the Wasm module into a local file. Use checksum as name of the module.
if err := os.WriteFile(modulePath, wasmModule, 0o644); err != nil {
return nil, err
}
ce := cacheEntry{
modulePath: modulePath,
last: time.Now(),
referencingURLs: sets.New[string](),
}
if needChecksumUpdate {
ce.referencingURLs.Insert(key.downloadURL)
}
c.modules[key.moduleKey] = &ce
wasmCacheEntries.Record(float64(len(c.modules)))
return &ce, nil
}
// getEntry finds a cached module, and returns the found cache entry and its checksum.
func (c *LocalFileCache) getEntry(key cacheKey, ignoreResourceVersion bool) (*cacheEntry, string) {
cacheHit := false
c.mux.Lock()
defer func() {
c.mux.Unlock()
wasmCacheLookupCount.With(hitTag.Value(strconv.FormatBool(cacheHit))).Increment()
}()
if len(key.checksum) == 0 && strings.HasPrefix(key.downloadURL, ociURLPrefix) {
if d, err := name.NewDigest(key.downloadURL[len(ociURLPrefix):]); err == nil {
// If there is no checksum and the digest is suffixed in URL, use the digest.
dstr := d.DigestStr()
if strings.HasPrefix(dstr, sha256SchemePrefix) {
key.checksum = dstr[len(sha256SchemePrefix):]
}
// For other digest scheme, give up to use cache.
}
}
if len(key.checksum) == 0 {
// If no checksum, try the checksum cache.
// If the image was pulled before, there should be a checksum of the most recently pulled image.
if ce, found := c.checksums[key.downloadURL]; found {
if ignoreResourceVersion || key.resourceVersion == ce.resourceVersionByResource[key.resourceName] {
// update checksum
key.checksum = ce.checksum
}
// update resource version here
ce.resourceVersionByResource[key.resourceName] = key.resourceVersion
}
}
if ce, ok := c.modules[key.moduleKey]; ok {
// Update last touched time.
ce.last = time.Now()
cacheHit = true
c.updateChecksum(key)
return ce, key.checksum
}
return nil, key.checksum
}
// Purge periodically clean up the stale Wasm modules local file and the cache map.
func (c *LocalFileCache) purge() {
ticker := time.NewTicker(c.PurgeInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
c.mux.Lock()
for k, m := range c.modules {
if !m.expired(c.ModuleExpiry) {
continue
}
// The module has not be touched for expiry duration, delete it from the map as well as the local dir.
if err := os.Remove(m.modulePath); err != nil {
wasmLog.Errorf("failed to purge Wasm module %v: %v", m.modulePath, err)
} else {
for downloadURL := range m.referencingURLs {
delete(c.checksums, downloadURL)
}
delete(c.modules, k)
wasmLog.Debugf("successfully removed stale Wasm module %v", m.modulePath)
}
}
wasmCacheEntries.Record(float64(len(c.modules)))
c.mux.Unlock()
case <-c.stopChan:
// Currently this will only happen in test.
return
}
}
}
// Expired returns true if the module has not been touched for Wasm module Expiry.
func (ce *cacheEntry) expired(expiry time.Duration) bool {
now := time.Now()
return now.Sub(ce.last) > expiry
}
var wasmMagicNumber = []byte{0x00, 0x61, 0x73, 0x6d}
func isValidWasmBinary(in []byte) bool {
// Wasm file header is 8 bytes (magic number + version).
return len(in) >= 8 && bytes.Equal(in[:4], wasmMagicNumber)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wasm
import (
"fmt"
"strings"
"sync"
"time"
udpa "github.com/cncf/xds/go/udpa/type/v1"
core "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
httprbac "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/rbac/v3"
httpwasm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/http/wasm/v3"
networkrbac "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/rbac/v3"
networkwasm "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/wasm/v3"
wasmextensions "github.com/envoyproxy/go-control-plane/envoy/extensions/wasm/v3"
"github.com/envoyproxy/go-control-plane/pkg/conversion"
"github.com/hashicorp/go-multierror"
anypb "google.golang.org/protobuf/types/known/anypb"
extensions "istio.io/api/extensions/v1alpha1"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/util/protoconv"
"istio.io/istio/pkg/bootstrap"
"istio.io/istio/pkg/config/xds"
)
var (
allowHTTPTypedConfig = protoconv.MessageToAny(&httprbac.RBAC{})
allowNetworkTypedConfig = protoconv.MessageToAny(&networkrbac.RBAC{})
)
func createHTTPAllowAllFilter(name string) (*anypb.Any, error) {
ec := &core.TypedExtensionConfig{
Name: name,
TypedConfig: allowHTTPTypedConfig,
}
return anypb.New(ec)
}
func createNetworkAllowAllFilter(name string) (*anypb.Any, error) {
ec := &core.TypedExtensionConfig{
Name: name,
TypedConfig: allowNetworkTypedConfig,
}
return anypb.New(ec)
}
// MaybeConvertWasmExtensionConfig converts any presence of module remote download to local file.
// It downloads the Wasm module and stores the module locally in the file system.
func MaybeConvertWasmExtensionConfig(resources []*anypb.Any, cache Cache) error {
var wg sync.WaitGroup
numResources := len(resources)
convertErrs := make([]error, numResources)
wg.Add(numResources)
startTime := time.Now()
defer func() {
wasmConfigConversionDuration.Record(float64(time.Since(startTime).Milliseconds()))
}()
for i := 0; i < numResources; i++ {
go func(i int) {
defer wg.Done()
extConfig, wasmHTTPConfig, wasmNetworkConfig, err := tryUnmarshal(resources[i])
if err != nil {
wasmConfigConversionCount.
With(resultTag.Value(unmarshalFailure)).
Increment()
convertErrs[i] = err
return
}
if extConfig == nil || (wasmHTTPConfig == nil && wasmNetworkConfig == nil) {
// If there is no config, it is not wasm config.
// Let's bypass the ECDS resource.
wasmConfigConversionCount.
With(resultTag.Value(noRemoteLoad)).
Increment()
return
}
if wasmHTTPConfig != nil {
newExtensionConfig, err := convertHTTPWasmConfigFromRemoteToLocal(extConfig, wasmHTTPConfig, cache)
if err != nil {
if !wasmHTTPConfig.GetConfig().GetFailOpen() {
convertErrs[i] = err
return
}
// Use NOOP filter because the download failed.
newExtensionConfig, err = createHTTPAllowAllFilter(extConfig.GetName())
if err != nil {
// If the fallback is failing, send the Nack regardless of fail_open.
err = fmt.Errorf("failed to create allow-all filter as a fallback of %s Wasm Module: %w", extConfig.GetName(), err)
convertErrs[i] = err
return
}
}
resources[i] = newExtensionConfig
} else {
newExtensionConfig, err := convertNetworkWasmConfigFromRemoteToLocal(extConfig, wasmNetworkConfig, cache)
if err != nil {
if !wasmNetworkConfig.GetConfig().GetFailOpen() {
convertErrs[i] = err
return
}
// Use NOOP filter because the download failed.
newExtensionConfig, err = createNetworkAllowAllFilter(extConfig.GetName())
if err != nil {
// If the fallback is failing, send the Nack regardless of fail_open.
err = fmt.Errorf("failed to create allow-all filter as a fallback of %s Wasm Module: %w", extConfig.GetName(), err)
convertErrs[i] = err
return
}
}
resources[i] = newExtensionConfig
}
}(i)
}
wg.Wait()
err := multierror.Append(nil, convertErrs...).ErrorOrNil()
if err != nil {
wasmLog.Errorf("convert the wasm config: %v", err)
}
return err
}
// tryUnmarshal returns the typed extension config and wasm config by unmarsharling `resource`,
// if `resource` is a wasm config loading a wasm module from the remote site.
// It returns `nil` for both the typed extension config and wasm config if it is not for the remote wasm or has an error.
func tryUnmarshal(resource *anypb.Any) (*core.TypedExtensionConfig, *httpwasm.Wasm, *networkwasm.Wasm, error) {
ec := &core.TypedExtensionConfig{}
wasmHTTPFilterConfig := &httpwasm.Wasm{}
wasmNetworkFilterConfig := &networkwasm.Wasm{}
wasmNetwork := false
if err := resource.UnmarshalTo(ec); err != nil {
return nil, nil, nil, fmt.Errorf("failed to unmarshal extension config resource: %w", err)
}
// Wasm filter can be configured using typed struct and Wasm filter type
switch {
case ec.GetTypedConfig() == nil:
return nil, nil, nil, fmt.Errorf("typed extension config %+v does not contain any typed config", ec)
case ec.GetTypedConfig().TypeUrl == xds.WasmHTTPFilterType:
if err := ec.GetTypedConfig().UnmarshalTo(wasmHTTPFilterConfig); err != nil {
return nil, nil, nil, fmt.Errorf("failed to unmarshal extension config resource into Wasm HTTP filter: %w", err)
}
case ec.GetTypedConfig().TypeUrl == xds.WasmNetworkFilterType:
wasmNetwork = true
if err := ec.GetTypedConfig().UnmarshalTo(wasmNetworkFilterConfig); err != nil {
return nil, nil, nil, fmt.Errorf("failed to unmarshal extension config resource into Wasm Network filter: %w", err)
}
case ec.GetTypedConfig().TypeUrl == xds.TypedStructType:
typedStruct := &udpa.TypedStruct{}
wasmTypedConfig := ec.GetTypedConfig()
if err := wasmTypedConfig.UnmarshalTo(typedStruct); err != nil {
return nil, nil, nil, fmt.Errorf("failed to unmarshal typed config for wasm filter: %w", err)
}
if typedStruct.TypeUrl == xds.WasmHTTPFilterType {
if err := conversion.StructToMessage(typedStruct.Value, wasmHTTPFilterConfig); err != nil {
return nil, nil, nil, fmt.Errorf("failed to convert extension config struct %+v to Wasm Network filter", typedStruct)
}
} else if typedStruct.TypeUrl == xds.WasmNetworkFilterType {
wasmNetwork = true
if err := conversion.StructToMessage(typedStruct.Value, wasmNetworkFilterConfig); err != nil {
return nil, nil, nil, fmt.Errorf("failed to convert extension config struct %+v to Wasm HTTP filter", typedStruct)
}
} else {
// This is not a Wasm filter.
wasmLog.Debugf("typed extension config %+v does not contain wasm http filter", typedStruct)
return nil, nil, nil, nil
}
default:
// This is not a Wasm filter.
wasmLog.Debugf("cannot find typed config or typed struct in %+v", ec)
return nil, nil, nil, nil
}
// At this point, we should have wasmNetworkFilterConfig or wasmHTTPFilterConfig should be unmarshalled.
if wasmNetwork {
if wasmNetworkFilterConfig.Config.GetVmConfig().GetCode().GetRemote() == nil {
if wasmNetworkFilterConfig.Config.GetVmConfig().GetCode().GetLocal() == nil {
return nil, nil, nil, fmt.Errorf("no remote and local load found in Wasm Network filter %+v", wasmNetworkFilterConfig)
}
// This has a local Wasm. Let's bypass it.
wasmLog.Debugf("no remote load found in Wasm Network filter %+v", wasmNetworkFilterConfig)
return nil, nil, nil, nil
}
return ec, nil, wasmNetworkFilterConfig, nil
}
if wasmHTTPFilterConfig.Config.GetVmConfig().GetCode().GetRemote() == nil {
if wasmHTTPFilterConfig.Config.GetVmConfig().GetCode().GetLocal() == nil {
return nil, nil, nil, fmt.Errorf("no remote and local load found in Wasm HTTP filter %+v", wasmHTTPFilterConfig)
}
// This has a local Wasm. Let's bypass it.
wasmLog.Debugf("no remote load found in Wasm HTTP filter %+v", wasmHTTPFilterConfig)
return nil, nil, nil, nil
}
return ec, wasmHTTPFilterConfig, nil, nil
}
func convertHTTPWasmConfigFromRemoteToLocal(ec *core.TypedExtensionConfig, wasmHTTPFilterConfig *httpwasm.Wasm, cache Cache) (*anypb.Any, error) {
status := conversionSuccess
defer func() {
wasmConfigConversionCount.
With(resultTag.Value(status)).
Increment()
}()
// ec.Name is resourceName.
// https://github.com/istio/istio/blob/9ea7ad532a9cc58a3564143d41ac89a61aaa8058/pilot/pkg/networking/core/v1alpha3/extension/wasmplugin.go#L103
err := rewriteVMConfig(ec.Name, wasmHTTPFilterConfig.Config.GetVmConfig(), &status, cache, wasmHTTPFilterConfig.Config.Name)
if err != nil {
return nil, err
}
wasmTypedConfig, err := anypb.New(wasmHTTPFilterConfig)
if err != nil {
status = marshalFailure
return nil, fmt.Errorf("failed to marshal new wasm HTTP filter %+v to protobuf Any: %w", wasmHTTPFilterConfig, err)
}
ec.TypedConfig = wasmTypedConfig
wasmLog.Debugf("new extension config resource %+v", ec)
nec, err := anypb.New(ec)
if err != nil {
status = marshalFailure
return nil, fmt.Errorf("failed to marshal new extension config resource: %w", err)
}
// At this point, we are certain that wasm module has been downloaded and config is rewritten.
// ECDS will be rewritten successfully.
return nec, nil
}
func convertNetworkWasmConfigFromRemoteToLocal(ec *core.TypedExtensionConfig, wasmNetworkFilterConfig *networkwasm.Wasm, cache Cache) (*anypb.Any, error) {
status := conversionSuccess
defer func() {
wasmConfigConversionCount.
With(resultTag.Value(status)).
Increment()
}()
// ec.Name is resourceName.
// https://github.com/istio/istio/blob/9ea7ad532a9cc58a3564143d41ac89a61aaa8058/pilot/pkg/networking/core/v1alpha3/extension/wasmplugin.go#L103
err := rewriteVMConfig(ec.Name, wasmNetworkFilterConfig.Config.GetVmConfig(), &status, cache, wasmNetworkFilterConfig.Config.Name)
if err != nil {
return nil, err
}
wasmTypedConfig, err := anypb.New(wasmNetworkFilterConfig)
if err != nil {
status = marshalFailure
return nil, fmt.Errorf("failed to marshal new wasm Network filter %+v to protobuf Any: %w", wasmNetworkFilterConfig, err)
}
ec.TypedConfig = wasmTypedConfig
wasmLog.Debugf("new extension config resource %+v", ec)
nec, err := anypb.New(ec)
if err != nil {
status = marshalFailure
return nil, fmt.Errorf("failed to marshal new extension config resource: %w", err)
}
// At this point, we are certain that wasm module has been downloaded and config is rewritten.
// ECDS will be rewritten successfully.
return nec, nil
}
func rewriteVMConfig(resourceName string, vm *wasmextensions.VmConfig, status *string, cache Cache, configName string) error {
envs := vm.GetEnvironmentVariables()
var pullSecret []byte
pullPolicy := extensions.PullPolicy_UNSPECIFIED_POLICY
resourceVersion := ""
if envs != nil {
if sec, found := envs.KeyValues[model.WasmSecretEnv]; found {
if sec == "" {
*status = fetchFailure
return fmt.Errorf("cannot fetch Wasm module %v: missing image pulling secret", configName)
}
pullSecret = []byte(sec)
}
if ps, found := envs.KeyValues[model.WasmPolicyEnv]; found {
if p, found := extensions.PullPolicy_value[ps]; found {
pullPolicy = extensions.PullPolicy(p)
}
}
resourceVersion = envs.KeyValues[model.WasmResourceVersionEnv]
// Strip all internal env variables(with ISTIO_META) from VM env variable.
// These env variables are added by Istio control plane and meant to be consumed by the
// agent for image pulling control should not be leaked to Envoy or the Wasm extension runtime.
for k := range envs.KeyValues {
if strings.HasPrefix(k, bootstrap.IstioMetaPrefix) {
delete(envs.KeyValues, k)
}
}
if len(envs.KeyValues) == 0 {
if len(envs.HostEnvKeys) == 0 {
vm.EnvironmentVariables = nil
} else {
envs.KeyValues = nil
}
}
}
remote := vm.GetCode().GetRemote()
httpURI := remote.GetHttpUri()
if httpURI == nil {
*status = missRemoteFetchHint
return fmt.Errorf("wasm remote fetch %+v does not have httpUri specified for config %s", remote, configName)
}
// checksum sent by istiod can be "nil" if not set by user - magic value used to avoid unmarshaling errors
if remote.Sha256 == "nil" {
remote.Sha256 = ""
}
// Default timeout, without this, if a user does not specify a timeout in the config, it fails with deadline exceeded
// while building transport in go container.
timeout := time.Second * 5
if remote.GetHttpUri().Timeout != nil {
// This is always 30s, because the timeout is set by the control plane when converted to WasmPluginWrapper.
// see buildDataSource() in pilot/pkg/model/extensions.go
timeout = remote.GetHttpUri().Timeout.AsDuration()
}
f, err := cache.Get(httpURI.GetUri(), GetOptions{
Checksum: remote.Sha256,
ResourceName: resourceName,
ResourceVersion: resourceVersion,
RequestTimeout: timeout,
PullSecret: pullSecret,
PullPolicy: pullPolicy,
})
if err != nil {
*status = fetchFailure
return fmt.Errorf("cannot fetch Wasm module %v: %w", remote.GetHttpUri().GetUri(), err)
}
// Rewrite remote fetch to local file.
vm.Code = &core.AsyncDataSource{
Specifier: &core.AsyncDataSource_Local{
Local: &core.DataSource{
Specifier: &core.DataSource_Filename{
Filename: f,
},
},
},
}
return nil
}
package wasm
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"fmt"
"io"
"net/http"
"time"
"istio.io/istio/pkg/backoff"
lengthchecker "github.com/AdamKorcz/bugdetectors/other"
io2 "github.com/AdamKorcz/bugdetectors/io"
)
var (
tarMagicNumber = []byte{0x75, 0x73, 0x74, 0x61, 0x72}
gzMagicNumber = []byte{0x1f, 0x8b}
)
type HTTPFetcher struct {
client *http.Client
insecureClient *http.Client
initialBackoff time.Duration
requestMaxRetry int
}
func NewHTTPFetcher(requestTimeout time.Duration, requestMaxRetry int) *HTTPFetcher {
if requestTimeout == 0 {
requestTimeout = 5 * time.Second
}
transport := http.DefaultTransport.(*http.Transport).Clone()
transport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
return &HTTPFetcher{
client: &http.Client{
Timeout: requestTimeout,
},
insecureClient: &http.Client{
Timeout: requestTimeout,
Transport: transport,
},
initialBackoff: time.Millisecond * 500,
requestMaxRetry: requestMaxRetry,
}
}
func (f *HTTPFetcher) Fetch(ctx context.Context, url string, allowInsecure bool) ([]byte, error) {
c := f.client
if allowInsecure {
c = f.insecureClient
}
attempts := 0
o := backoff.DefaultOption()
o.InitialInterval = f.initialBackoff
b := backoff.NewExponentialBackOff(o)
var lastError error
for attempts < f.requestMaxRetry {
attempts++
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
if err != nil {
wasmLog.Debugf("wasm module download request failed: %v", err)
return nil, err
}
resp, err := c.Do(req)
if err != nil {
lastError = err
wasmLog.Debugf("wasm module download request failed: %v", err)
if ctx.Err() != nil {
return nil, fmt.Errorf("wasm module download failed after %v attempts, last error: %v", attempts, lastError)
}
time.Sleep(b.NextBackOff())
continue
}
if resp.StatusCode == http.StatusOK {
body, err := io2.ReadAll(io.LimitReader(resp.Body, 1024*1024*256), "/src/istio/pkg/wasm/httpfetcher.go:81:17 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
return nil, err
}
err = resp.Body.Close()
if err != nil {
wasmLog.Infof("wasm server connection is not closed: %v", err)
}
return unboxIfPossible(body), err
}
lastError = fmt.Errorf("wasm module download request failed: status code %v", resp.StatusCode)
if retryable(resp.StatusCode) {
body, err := io2.ReadAll(io.LimitReader(resp.Body, 1024*1024*256), "/src/istio/pkg/wasm/httpfetcher.go:94:17 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
return nil, err
}
wasmLog.Debugf("wasm module download failed: status code %v, body %v", resp.StatusCode, string(body))
err = resp.Body.Close()
if err != nil {
wasmLog.Infof("wasm server connection is not closed: %v", err)
}
time.Sleep(b.NextBackOff())
continue
}
err = resp.Body.Close()
if err != nil {
wasmLog.Infof("wasm server connection is not closed: %v", err)
}
break
}
return nil, fmt.Errorf("wasm module download failed after %v attempts, last error: %v", attempts, lastError)
}
func retryable(code int) bool {
return code >= 500 &&
!(code == http.StatusNotImplemented ||
code == http.StatusHTTPVersionNotSupported ||
code == http.StatusNetworkAuthenticationRequired)
}
func isPosixTar(b []byte) bool {
return len(b) > 262 && bytes.Equal(b[257:262], tarMagicNumber)
}
func getFirstFileFromTar(b []byte) []byte {
buf := bytes.NewBuffer(b)
tr := tar.NewReader(io.LimitReader(buf, 1024*1024*256))
h, err := tr.Next()
if err != nil {
return nil
}
ret := make([]byte, lengthchecker.CheckLength(h.Size))
_, err = io.ReadFull(tr, ret)
if err != nil {
return nil
}
return ret
}
func isGZ(b []byte) bool {
return len(b) > 2 && bytes.Equal(b[:2], gzMagicNumber)
}
func getFileFromGZ(b []byte) []byte {
buf := bytes.NewBuffer(b)
zr, err := gzip.NewReader(buf)
if err != nil {
return nil
}
ret, err := io2.ReadAll(zr, "/src/istio/pkg/wasm/httpfetcher.go:156:14 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
return nil
}
return ret
}
func unboxIfPossible(origin []byte) []byte {
b := origin
for {
if isValidWasmBinary(b) {
return b
} else if isGZ(b) {
if b = getFileFromGZ(b); b == nil {
return origin
}
} else if isPosixTar(b) {
if b = getFirstFileFromTar(b); b == nil {
return origin
}
} else {
return origin
}
}
}
package wasm
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"crypto/tls"
"errors"
"fmt"
"io"
"net/http"
"path/filepath"
"strings"
"github.com/docker/cli/cli/config/configfile"
lengthchecker "github.com/AdamKorcz/bugdetectors/other"
io2 "github.com/AdamKorcz/bugdetectors/io"
dtypes "github.com/docker/cli/cli/config/types"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/types"
"github.com/hashicorp/go-multierror"
)
type ImageFetcherOption struct {
PullSecret []byte
Insecure bool
}
func (o *ImageFetcherOption) useDefaultKeyChain() bool {
return o.PullSecret == nil
}
func (o ImageFetcherOption) String() string {
if o.PullSecret == nil {
return fmt.Sprintf("{Insecure: %v}", o.Insecure)
}
return fmt.Sprintf("{Insecure: %v, PullSecret: <redacted>}", o.Insecure)
}
type ImageFetcher struct {
fetchOpts []remote.Option
}
func NewImageFetcher(ctx context.Context, opt ImageFetcherOption) *ImageFetcher {
fetchOpts := make([]remote.Option, 0, 2)
if opt.useDefaultKeyChain() {
fetchOpts = append(fetchOpts, remote.WithAuthFromKeychain(authn.DefaultKeychain))
} else {
fetchOpts = append(fetchOpts, remote.WithAuthFromKeychain(&wasmKeyChain{data: opt.PullSecret}))
}
if opt.Insecure {
t := remote.DefaultTransport.(*http.Transport).Clone()
t.TLSClientConfig = &tls.Config{
InsecureSkipVerify: opt.Insecure,
}
fetchOpts = append(fetchOpts, remote.WithTransport(t))
}
return &ImageFetcher{
fetchOpts: append(fetchOpts, remote.WithContext(ctx)),
}
}
func (o *ImageFetcher) PrepareFetch(url string) (binaryFetcher func() ([]byte, error), actualDigest string, err error) {
ref, err := name.ParseReference(url)
if err != nil {
err = fmt.Errorf("could not parse url in image reference: %v", err)
return
}
wasmLog.Infof("fetching image %s from registry %s with tag %s", ref.Context().RepositoryStr(),
ref.Context().RegistryStr(), ref.Identifier())
desc, err := remote.Get(ref, o.fetchOpts...)
if err != nil && strings.Contains(err.Error(), "server gave HTTP response") {
wasmLog.Infof("fetching image with plain text from %s", url)
ref, err = name.ParseReference(url, name.Insecure)
if err == nil {
desc, err = remote.Get(ref, o.fetchOpts...)
}
}
if err != nil {
err = fmt.Errorf("could not fetch manifest: %v", err)
return
}
img, err := desc.Image()
if err != nil {
err = fmt.Errorf("could not fetch image: %v", err)
return
}
d, _ := img.Digest()
actualDigest = d.Hex
binaryFetcher = func() ([]byte, error) {
manifest, err := img.Manifest()
if err != nil {
return nil, fmt.Errorf("could not retrieve manifest: %v", err)
}
if manifest.MediaType == types.DockerManifestSchema2 {
ret, err := extractDockerImage(img)
if err != nil {
return nil, fmt.Errorf("could not extract Wasm file from the image as Docker container %v", err)
}
return ret, nil
}
ret, errCompat := extractOCIStandardImage(img)
if errCompat == nil {
return ret, nil
}
ret, errOCI := extractOCIArtifactImage(img)
if errOCI == nil {
return ret, nil
}
return nil, fmt.Errorf("the given image is in invalid format as an OCI image: %v",
multierror.Append(err,
fmt.Errorf("could not parse as compat variant: %v", errCompat),
fmt.Errorf("could not parse as oci variant: %v", errOCI),
),
)
}
return
}
func extractDockerImage(img v1.Image) ([]byte, error) {
layers, err := img.Layers()
if err != nil {
return nil, fmt.Errorf("could not fetch layers: %v", err)
}
if len(layers) == 0 {
return nil, errors.New("number of layers must be greater than zero")
}
layer := layers[len(layers)-1]
mt, err := layer.MediaType()
if err != nil {
return nil, fmt.Errorf("could not get media type: %v", err)
}
if mt != types.DockerLayer {
return nil, fmt.Errorf("invalid media type %s (expect %s)", mt, types.DockerLayer)
}
r, err := layer.Compressed()
if err != nil {
return nil, fmt.Errorf("could not get layer content: %v", err)
}
defer r.Close()
ret, err := extractWasmPluginBinary(r)
if err != nil {
return nil, fmt.Errorf("could not extract wasm binary: %v", err)
}
return ret, nil
}
func extractOCIStandardImage(img v1.Image) ([]byte, error) {
layers, err := img.Layers()
if err != nil {
return nil, fmt.Errorf("could not fetch layers: %v", err)
}
if len(layers) == 0 {
return nil, fmt.Errorf("number of layers must be greater than zero")
}
layer := layers[len(layers)-1]
mt, err := layer.MediaType()
if err != nil {
return nil, fmt.Errorf("could not get media type: %v", err)
}
if types.OCILayer != mt {
return nil, fmt.Errorf("invalid media type %s (expect %s)", mt, types.OCILayer)
}
r, err := layer.Compressed()
if err != nil {
return nil, fmt.Errorf("could not get layer content: %v", err)
}
defer r.Close()
ret, err := extractWasmPluginBinary(r)
if err != nil {
return nil, fmt.Errorf("could not extract wasm binary: %v", err)
}
return ret, nil
}
func extractWasmPluginBinary(r io.Reader) ([]byte, error) {
gr, err := gzip.NewReader(r)
if err != nil {
return nil, fmt.Errorf("failed to parse layer as tar.gz: %v", err)
}
const wasmPluginFileName = "plugin.wasm"
tr := tar.NewReader(io.LimitReader(gr, 1024*1024*256))
for {
h, err := tr.Next()
if err == io.EOF {
break
} else if err != nil {
return nil, err
}
ret := make([]byte, lengthchecker.CheckLength(h.Size))
if filepath.Base(h.Name) == wasmPluginFileName {
_, err := io.ReadFull(tr, ret)
if err != nil {
return nil, fmt.Errorf("failed to read %s: %v", wasmPluginFileName, err)
}
return ret, nil
}
}
return nil, fmt.Errorf("%s not found in the archive", wasmPluginFileName)
}
func extractOCIArtifactImage(img v1.Image) ([]byte, error) {
layers, err := img.Layers()
if err != nil {
return nil, fmt.Errorf("could not fetch layers: %v", err)
}
if len(layers) != 2 {
return nil, fmt.Errorf("number of layers must be 2 but got %d", len(layers))
}
const wasmLayerMediaType = "application/vnd.module.wasm.content.layer.v1+wasm"
var layer v1.Layer
for _, l := range layers {
mt, err := l.MediaType()
if err != nil {
return nil, fmt.Errorf("could not retrieve the media type: %v", err)
}
if mt == wasmLayerMediaType {
layer = l
break
}
}
if layer == nil {
return nil, fmt.Errorf("could not find the layer of type %s", wasmLayerMediaType)
}
r, err := layer.Compressed()
if err != nil {
return nil, fmt.Errorf("could not get layer content: %v", err)
}
defer r.Close()
ret, err := io2.ReadAll(r, "/src/istio/pkg/wasm/imagefetcher.go:266:14 (May be slightly inaccurate) NEW_LINEio.ReadAll", true)
if err != nil {
return nil, fmt.Errorf("could not extract wasm binary: %v", err)
}
return ret, nil
}
type wasmKeyChain struct {
data []byte
}
func (k *wasmKeyChain) Resolve(target authn.Resource) (authn.Authenticator, error) {
if bytes.Equal(k.data, []byte("null")) {
return nil, fmt.Errorf("")
}
reader := bytes.NewReader(k.data)
cf := configfile.ConfigFile{}
if err := cf.LoadFromReader(reader); err != nil {
return nil, err
}
key := target.RegistryStr()
if key == name.DefaultRegistry {
key = authn.DefaultAuthKey
}
cfg, err := cf.GetAuthConfig(key)
if err != nil {
return nil, err
}
empty := dtypes.AuthConfig{}
if cfg == empty {
return authn.Anonymous, nil
}
authConfig := authn.AuthConfig{
Username: cfg.Username,
Password: cfg.Password,
Auth: cfg.Auth,
IdentityToken: cfg.IdentityToken,
RegistryToken: cfg.RegistryToken,
}
return authn.FromConfig(authConfig), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package wasm
import (
"time"
extensions "istio.io/api/extensions/v1alpha1"
"istio.io/istio/pkg/util/sets"
)
const (
DefaultPurgeInterval = 1 * time.Hour
DefaultModuleExpiry = 24 * time.Hour
DefaultHTTPRequestTimeout = 15 * time.Second
DefaultHTTPRequestMaxRetries = 5
)
// Options contains configurations to create a Cache instance.
type Options struct {
PurgeInterval time.Duration
ModuleExpiry time.Duration
InsecureRegistries sets.String
HTTPRequestTimeout time.Duration
HTTPRequestMaxRetries int
}
func defaultOptions() Options {
return Options{
PurgeInterval: DefaultPurgeInterval,
ModuleExpiry: DefaultModuleExpiry,
InsecureRegistries: sets.New[string](),
HTTPRequestTimeout: DefaultHTTPRequestTimeout,
HTTPRequestMaxRetries: DefaultHTTPRequestMaxRetries,
}
}
// GetOptions is a struct for providing options to Get method of Cache.
type GetOptions struct {
Checksum string
ResourceName string
ResourceVersion string
RequestTimeout time.Duration
PullSecret []byte
PullPolicy extensions.PullPolicy
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhooks
import (
"istio.io/istio/pkg/monitoring"
)
var (
// webhookConfigNameTag holds the target webhook config name for the context.
webhookConfigNameTag = monitoring.CreateLabel("name")
// reasonTag holds the error reason for the context.
reasonTag = monitoring.CreateLabel("reason")
)
var (
metricWebhookPatchAttempts = monitoring.NewSum(
"webhook_patch_attempts_total",
"Webhook patching attempts",
)
metricWebhookPatchRetries = monitoring.NewSum(
"webhook_patch_retries_total",
"Webhook patching retries",
)
metricWebhookPatchFailures = monitoring.NewSum(
"webhook_patch_failures_total",
"Webhook patching total failures",
)
)
const (
// webhook patching failure reasons
reasonWrongRevision = "wrong_revision"
reasonLoadCABundleFailure = "load_ca_bundle_failure"
reasonWebhookConfigNotFound = "webhook_config_not_found"
reasonWebhookEntryNotFound = "webhook_entry_not_found"
reasonWebhookUpdateFailure = "webhook_update_failure"
)
func reportWebhookPatchAttempts(webhookConfigName string) {
metricWebhookPatchAttempts.
With(webhookConfigNameTag.Value(webhookConfigName)).
Increment()
}
func reportWebhookPatchRetry(webhookConfigName string) {
metricWebhookPatchRetries.
With(webhookConfigNameTag.Value(webhookConfigName)).
Increment()
}
func reportWebhookPatchFailure(webhookConfigName string, reason string) {
metricWebhookPatchFailures.
With(webhookConfigNameTag.Value(webhookConfigName)).
With(reasonTag.Value(reason)).
Increment()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"istio.io/istio/pilot/pkg/keycertbundle"
)
type ConfigError struct {
err error
reason string
}
func (e ConfigError) Error() string {
return e.err.Error()
}
func (e ConfigError) Reason() string {
return e.reason
}
func LoadCABundle(caBundleWatcher *keycertbundle.Watcher) ([]byte, error) {
caBundle := caBundleWatcher.GetCABundle()
if err := VerifyCABundle(caBundle); err != nil {
return nil, &ConfigError{err, "could not verify caBundle"}
}
return caBundle, nil
}
func VerifyCABundle(caBundle []byte) error {
block, _ := pem.Decode(caBundle)
if block == nil {
return errors.New("could not decode pem")
}
if block.Type != "CERTIFICATE" {
return fmt.Errorf("cert contains wrong pem type: %q", block.Type)
}
if _, err := x509.ParseCertificate(block.Bytes); err != nil {
return fmt.Errorf("cert contains invalid x509 certificate: %v", err)
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package controller implements a k8s controller for managing the lifecycle of a validating webhook.
package controller
import (
"bytes"
"context"
"errors"
"fmt"
"math"
"strings"
"time"
"github.com/hashicorp/go-multierror"
kubeApiAdmission "k8s.io/api/admissionregistration/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
"istio.io/api/label"
networking "istio.io/api/networking/v1alpha3"
"istio.io/client-go/pkg/apis/networking/v1alpha3"
"istio.io/istio/pilot/pkg/keycertbundle"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/ptr"
"istio.io/istio/pkg/webhooks/util"
)
var scope = log.RegisterScope("validationController", "validation webhook controller")
type Options struct {
// Istio system namespace where istiod resides.
WatchedNamespace string
// File path to the x509 certificate bundle used by the webhook server
// and patched into the webhook config.
CABundleWatcher *keycertbundle.Watcher
// Revision for control plane performing patching on the validating webhook.
Revision string
// Name of the service running the webhook server.
ServiceName string
}
// Validate the options that exposed to end users
func (o Options) Validate() error {
var errs *multierror.Error
if o.WatchedNamespace == "" || !labels.IsDNS1123Label(o.WatchedNamespace) {
errs = multierror.Append(errs, fmt.Errorf("invalid namespace: %q", o.WatchedNamespace))
}
if o.ServiceName == "" || !labels.IsDNS1123Label(o.ServiceName) {
errs = multierror.Append(errs, fmt.Errorf("invalid service name: %q", o.ServiceName))
}
if o.CABundleWatcher == nil {
errs = multierror.Append(errs, errors.New("CA bundle watcher not specified"))
}
return errs.ErrorOrNil()
}
// String produces a string field version of the arguments for debugging.
func (o Options) String() string {
buf := &bytes.Buffer{}
_, _ = fmt.Fprintf(buf, "WatchedNamespace: %v\n", o.WatchedNamespace)
_, _ = fmt.Fprintf(buf, "Revision: %v\n", o.Revision)
_, _ = fmt.Fprintf(buf, "ServiceName: %v\n", o.ServiceName)
return buf.String()
}
type Controller struct {
o Options
client kube.Client
queue controllers.Queue
dryRunOfInvalidConfigRejected bool
webhooks kclient.Client[*kubeApiAdmission.ValidatingWebhookConfiguration]
}
// NewValidatingWebhookController creates a new Controller.
func NewValidatingWebhookController(client kube.Client,
revision, ns string, caBundleWatcher *keycertbundle.Watcher,
) *Controller {
o := Options{
WatchedNamespace: ns,
CABundleWatcher: caBundleWatcher,
Revision: revision,
ServiceName: "istiod",
}
return newController(o, client)
}
func newController(o Options, client kube.Client) *Controller {
c := &Controller{
o: o,
client: client,
}
c.queue = controllers.NewQueue("validation",
controllers.WithReconciler(c.Reconcile),
// Webhook patching has to be retried forever. But the retries would be rate limited.
controllers.WithMaxAttempts(math.MaxInt),
// Retry with backoff. Failures could be from conflicts of other instances (quick retry helps), or
// longer lasting concerns which will eventually be retried on 1min interval.
// Unlike the mutating webhook controller, we do not use NewItemFastSlowRateLimiter. This is because
// the validation controller waits for its own service to be ready, so typically this takes a few seconds
// before we are ready; using FastSlow means we tend to always take the Slow time (1min).
controllers.WithRateLimiter(workqueue.NewItemExponentialFailureRateLimiter(100*time.Millisecond, 1*time.Minute)))
c.webhooks = kclient.NewFiltered[*kubeApiAdmission.ValidatingWebhookConfiguration](client, kclient.Filter{
LabelSelector: fmt.Sprintf("%s=%s", label.IoIstioRev.Name, o.Revision),
})
c.webhooks.AddEventHandler(controllers.ObjectHandler(c.queue.AddObject))
return c
}
func (c *Controller) Reconcile(key types.NamespacedName) error {
name := key.Name
whc := c.webhooks.Get(name, "")
scope := scope.WithLabels("webhook", name)
// Stop early if webhook is not present, rather than attempting (and failing) to reconcile permanently
// If the webhook is later added a new reconciliation request will trigger it to update
if whc == nil {
scope.Infof("Skip patching webhook, not found")
return nil
}
scope.Debugf("Reconcile(enter)")
defer func() { scope.Debugf("Reconcile(exit)") }()
caBundle, err := util.LoadCABundle(c.o.CABundleWatcher)
if err != nil {
scope.Errorf("Failed to load CA bundle: %v", err)
reportValidationConfigLoadError(err.(*util.ConfigError).Reason())
// no point in retrying unless cert file changes.
return nil
}
ready := c.readyForFailClose()
if err := c.updateValidatingWebhookConfiguration(whc, caBundle, ready); err != nil {
return fmt.Errorf("fail to update webhook: %v", err)
}
if !ready {
return fmt.Errorf("webhook is not ready, retry")
}
return nil
}
func (c *Controller) Run(stop <-chan struct{}) {
kube.WaitForCacheSync("validation", stop, c.webhooks.HasSynced)
go c.startCaBundleWatcher(stop)
c.queue.Run(stop)
}
// startCaBundleWatcher listens for updates to the CA bundle and patches the webhooks.
// shouldn't we be doing this for both validating and mutating webhooks...?
func (c *Controller) startCaBundleWatcher(stop <-chan struct{}) {
if c.o.CABundleWatcher == nil {
return
}
id, watchCh := c.o.CABundleWatcher.AddWatcher()
defer c.o.CABundleWatcher.RemoveWatcher(id)
for {
select {
case <-watchCh:
c.syncAll()
case <-stop:
return
}
}
}
func (c *Controller) readyForFailClose() bool {
if !c.dryRunOfInvalidConfigRejected {
if rejected, reason := c.isDryRunOfInvalidConfigRejected(); !rejected {
scope.Infof("Not ready to switch validation to fail-closed: %v", reason)
return false
}
scope.Info("Endpoint successfully rejected invalid config. Switching to fail-close.")
c.dryRunOfInvalidConfigRejected = true
// Sync all webhooks; this ensures if we have multiple webhooks all of them are updated
c.syncAll()
}
return true
}
const (
deniedRequestMessageFragment = `denied the request`
missingResourceMessageFragment = `the server could not find the requested resource`
unsupportedDryRunMessageFragment = `does not support dry run`
)
// Confirm invalid configuration is successfully rejected before switching to FAIL-CLOSE.
func (c *Controller) isDryRunOfInvalidConfigRejected() (rejected bool, reason string) {
invalidGateway := &v1alpha3.Gateway{
ObjectMeta: metav1.ObjectMeta{
Name: "invalid-gateway",
Namespace: c.o.WatchedNamespace,
// Must ensure that this is the revision validating the known-bad config
Labels: map[string]string{
label.IoIstioRev.Name: c.o.Revision,
},
Annotations: map[string]string{
// Add always-reject annotation. For now, we are invalid for two reasons: missing `spec.servers`, and this
// annotation. In the future, the CRD will reject a missing `spec.servers` before we hit the webhook, so we will
// only have that annotation. For backwards compatibility, we keep both methods for some time.
constants.AlwaysReject: "true",
},
},
Spec: networking.Gateway{},
}
createOptions := metav1.CreateOptions{DryRun: []string{metav1.DryRunAll}}
istioClient := c.client.Istio().NetworkingV1alpha3()
_, err := istioClient.Gateways(c.o.WatchedNamespace).Create(context.TODO(), invalidGateway, createOptions)
if kerrors.IsAlreadyExists(err) {
updateOptions := metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}}
_, err = istioClient.Gateways(c.o.WatchedNamespace).Update(context.TODO(), invalidGateway, updateOptions)
}
if err == nil {
return false, "dummy invalid config not rejected"
}
// We expect to get deniedRequestMessageFragment (the config was rejected, as expected)
if strings.Contains(err.Error(), deniedRequestMessageFragment) {
return true, ""
}
// If the CRD does not exist, we will get this error. This is to handle when Pilot is run
// without CRDs - in this case, this check will not be possible.
if strings.Contains(err.Error(), missingResourceMessageFragment) {
scope.Warnf("Missing Gateway CRD, cannot perform validation check. Assuming validation is ready")
return true, ""
}
// If some validating webhooks does not support dryRun(sideEffects=Unknown or Some), we will get this error.
// We should assume valdiation is ready because there is no point in retrying this request.
if strings.Contains(err.Error(), unsupportedDryRunMessageFragment) {
scope.Warnf("One of the validating webhooks does not support DryRun, cannot perform validation check. Assuming validation is ready. Details: %v", err)
return true, ""
}
return false, fmt.Sprintf("dummy invalid rejected for the wrong reason: %v", err)
}
func (c *Controller) updateValidatingWebhookConfiguration(current *kubeApiAdmission.ValidatingWebhookConfiguration,
caBundle []byte, ready bool,
) error {
dirty := false
for i := range current.Webhooks {
caNeed := !bytes.Equal(current.Webhooks[i].ClientConfig.CABundle, caBundle)
failureNeed := ready && (current.Webhooks[i].FailurePolicy != nil && *current.Webhooks[i].FailurePolicy != kubeApiAdmission.Fail)
if caNeed || failureNeed {
dirty = true
break
}
}
scope := scope.WithLabels(
"name", current.Name,
"fail closed", ready,
"resource version", current.ResourceVersion,
)
if !dirty {
scope.Infof("up-to-date, no change required")
return nil
}
updated := current.DeepCopy()
for i := range updated.Webhooks {
updated.Webhooks[i].ClientConfig.CABundle = caBundle
if ready {
updated.Webhooks[i].FailurePolicy = ptr.Of(kubeApiAdmission.Fail)
}
}
latest, err := c.webhooks.Update(updated)
if err != nil {
scope.Errorf("failed to updated: %v", err)
reportValidationConfigUpdateError(kerrors.ReasonForError(err))
return err
}
scope.WithLabels("resource version", latest.ResourceVersion).Infof("successfully updated")
reportValidationConfigUpdate()
return nil
}
func (c *Controller) syncAll() {
for _, whc := range c.webhooks.List("", klabels.Everything()) {
c.queue.AddObject(whc)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/pkg/monitoring"
)
var (
// reasonLabel describes reason
reasonLabel = monitoring.CreateLabel("reason")
metricWebhookConfigurationUpdateError = monitoring.NewSum(
"galley_validation_config_update_error",
"k8s webhook configuration update error",
)
metricWebhookConfigurationUpdates = monitoring.NewSum(
"galley_validation_config_updates",
"k8s webhook configuration updates")
metricWebhookConfigurationLoadError = monitoring.NewSum(
"galley_validation_config_load_error",
"k8s webhook configuration (re)load error",
)
)
func reportValidationConfigUpdateError(reason metav1.StatusReason) {
metricWebhookConfigurationUpdateError.With(reasonLabel.Value(string(reason))).Increment()
}
func reportValidationConfigLoadError(reason string) {
metricWebhookConfigurationLoadError.With(reasonLabel.Value(reason)).Increment()
}
func reportValidationConfigUpdate() {
metricWebhookConfigurationUpdates.Increment()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"strconv"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/monitoring"
)
const (
group = "group"
version = "version"
resourceTag = "resource"
reason = "reason"
status = "status"
)
var (
// GroupTag holds the resource group for the context.
GroupTag = monitoring.CreateLabel(group)
// VersionTag holds the resource version for the context.
VersionTag = monitoring.CreateLabel(version)
// ResourceTag holds the resource name for the context.
ResourceTag = monitoring.CreateLabel(resourceTag)
// ReasonTag holds the error reason for the context.
ReasonTag = monitoring.CreateLabel(reason)
// StatusTag holds the error code for the context.
StatusTag = monitoring.CreateLabel(status)
)
var (
metricValidationPassed = monitoring.NewSum(
"galley_validation_passed",
"Resource is valid",
)
metricValidationFailed = monitoring.NewSum(
"galley_validation_failed",
"Resource validation failed",
)
metricValidationHTTPError = monitoring.NewSum(
"galley_validation_http_error",
"Resource validation http serve errors",
)
)
func reportValidationFailed(request *kube.AdmissionRequest, reason string, dryRun bool) {
if dryRun {
return
}
metricValidationFailed.
With(GroupTag.Value(request.Resource.Group)).
With(VersionTag.Value(request.Resource.Version)).
With(ResourceTag.Value(request.Resource.Resource)).
With(ReasonTag.Value(reason)).
Increment()
}
func reportValidationPass(request *kube.AdmissionRequest) {
metricValidationPassed.
With(GroupTag.Value(request.Resource.Group)).
With(VersionTag.Value(request.Resource.Version)).
With(ResourceTag.Value(request.Resource.Resource)).
Increment()
}
func reportValidationHTTPError(status int) {
metricValidationHTTPError.
With(StatusTag.Value(strconv.Itoa(status))).
Increment()
}
const (
reasonUnsupportedOperation = "unsupported_operation"
reasonYamlDecodeError = "yaml_decode_error"
reasonUnknownType = "unknown_type"
reasonCRDConversionError = "crd_conversion_error"
reasonInvalidConfig = "invalid_resource"
)
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"net/http"
multierror "github.com/hashicorp/go-multierror"
admissionv1 "k8s.io/api/admission/v1"
kubeApiAdmissionv1beta1 "k8s.io/api/admission/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer"
"istio.io/istio/pilot/pkg/config/kube/crd"
"istio.io/istio/pkg/config/schema/collection"
"istio.io/istio/pkg/config/schema/resource"
"istio.io/istio/pkg/config/validation"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/log"
)
var scope = log.RegisterScope("validationServer", "validation webhook server")
var (
runtimeScheme = runtime.NewScheme()
codecs = serializer.NewCodecFactory(runtimeScheme)
deserializer = codecs.UniversalDeserializer()
// Expect AdmissionRequest to only include these top-level field names
validFields = map[string]bool{
"apiVersion": true,
"kind": true,
"metadata": true,
"spec": true,
"status": true,
}
)
func init() {
_ = admissionv1.AddToScheme(runtimeScheme)
_ = kubeApiAdmissionv1beta1.AddToScheme(runtimeScheme)
}
// Options contains the configuration for the Istio Pilot validation
// admission controller.
type Options struct {
// Schemas provides a description of all configuration resources.
Schemas collection.Schemas
// DomainSuffix is the DNS domain suffix for Pilot CRD resources,
// e.g. cluster.local.
DomainSuffix string
// Port where the webhook is served. the number should be greater than 1024 for non-root
// user, because non-root user cannot bind port number less than 1024
// Mainly used for testing. Webhook server is started by Istiod.
Port uint
// Use an existing mux instead of creating our own.
Mux *http.ServeMux
}
// String produces a stringified version of the arguments for debugging.
func (o Options) String() string {
buf := &bytes.Buffer{}
_, _ = fmt.Fprintf(buf, "DomainSuffix: %s\n", o.DomainSuffix)
_, _ = fmt.Fprintf(buf, "Port: %d\n", o.Port)
return buf.String()
}
// DefaultArgs allocates an Options struct initialized with Webhook's default configuration.
func DefaultArgs() Options {
return Options{
Port: 9443,
}
}
// Webhook implements the validating admission webhook for validating Istio configuration.
type Webhook struct {
// pilot
schemas collection.Schemas
domainSuffix string
}
// New creates a new instance of the admission webhook server.
func New(o Options) (*Webhook, error) {
if o.Mux == nil {
scope.Error("mux not set correctly")
return nil, errors.New("expected mux to be passed, but was not passed")
}
wh := &Webhook{
schemas: o.Schemas,
domainSuffix: o.DomainSuffix,
}
o.Mux.HandleFunc("/validate", wh.serveValidate)
o.Mux.HandleFunc("/validate/", wh.serveValidate)
return wh, nil
}
func toAdmissionResponse(err error) *kube.AdmissionResponse {
return &kube.AdmissionResponse{Result: &metav1.Status{Message: err.Error()}}
}
type admitFunc func(*kube.AdmissionRequest) *kube.AdmissionResponse
func serve(w http.ResponseWriter, r *http.Request, admit admitFunc) {
var body []byte
if r.Body != nil {
if data, err := kube.HTTPConfigReader(r); err == nil {
body = data
} else {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
if len(body) == 0 {
reportValidationHTTPError(http.StatusBadRequest)
http.Error(w, "no body found", http.StatusBadRequest)
return
}
// verify the content type is accurate
contentType := r.Header.Get("Content-Type")
if contentType != "application/json" {
reportValidationHTTPError(http.StatusUnsupportedMediaType)
http.Error(w, "invalid Content-Type, want `application/json`", http.StatusUnsupportedMediaType)
return
}
var reviewResponse *kube.AdmissionResponse
var obj runtime.Object
var ar *kube.AdmissionReview
if out, _, err := deserializer.Decode(body, nil, obj); err != nil {
reviewResponse = toAdmissionResponse(fmt.Errorf("could not decode body: %v", err))
} else {
ar, err = kube.AdmissionReviewKubeToAdapter(out)
if err != nil {
reviewResponse = toAdmissionResponse(fmt.Errorf("could not decode object: %v", err))
} else {
reviewResponse = admit(ar.Request)
}
}
response := kube.AdmissionReview{}
response.Response = reviewResponse
var responseKube runtime.Object
var apiVersion string
if ar != nil {
apiVersion = ar.APIVersion
response.TypeMeta = ar.TypeMeta
if response.Response != nil {
if ar.Request != nil {
response.Response.UID = ar.Request.UID
}
}
}
responseKube = kube.AdmissionReviewAdapterToKube(&response, apiVersion)
resp, err := json.Marshal(responseKube)
if err != nil {
reportValidationHTTPError(http.StatusInternalServerError)
http.Error(w, fmt.Sprintf("could encode response: %v", err), http.StatusInternalServerError)
return
}
if _, err := w.Write(resp); err != nil {
reportValidationHTTPError(http.StatusInternalServerError)
http.Error(w, fmt.Sprintf("could write response: %v", err), http.StatusInternalServerError)
}
}
func (wh *Webhook) serveValidate(w http.ResponseWriter, r *http.Request) {
serve(w, r, wh.validate)
}
func (wh *Webhook) validate(request *kube.AdmissionRequest) *kube.AdmissionResponse {
isDryRun := request.DryRun != nil && *request.DryRun
addDryRunMessageIfNeeded := func(errStr string) error {
err := fmt.Errorf("%s", errStr)
if isDryRun {
err = fmt.Errorf("%s (dry run)", err)
}
return err
}
switch request.Operation {
case kube.Create, kube.Update:
default:
scope.Warnf("Unsupported webhook operation %v", addDryRunMessageIfNeeded(request.Operation))
reportValidationFailed(request, reasonUnsupportedOperation, isDryRun)
return &kube.AdmissionResponse{Allowed: true}
}
var obj crd.IstioKind
if err := json.Unmarshal(request.Object.Raw, &obj); err != nil {
scope.Infof("cannot decode configuration: %v", addDryRunMessageIfNeeded(err.Error()))
reportValidationFailed(request, reasonYamlDecodeError, isDryRun)
return toAdmissionResponse(fmt.Errorf("cannot decode configuration: %v", err))
}
gvk := obj.GroupVersionKind()
s, exists := wh.schemas.FindByGroupVersionAliasesKind(resource.FromKubernetesGVK(&gvk))
if !exists {
scope.Infof("unrecognized type %v", addDryRunMessageIfNeeded(obj.GroupVersionKind().String()))
reportValidationFailed(request, reasonUnknownType, isDryRun)
return toAdmissionResponse(fmt.Errorf("unrecognized type %v", obj.GroupVersionKind()))
}
out, err := crd.ConvertObject(s, &obj, wh.domainSuffix)
if err != nil {
scope.Infof("error decoding configuration: %v", addDryRunMessageIfNeeded(err.Error()))
reportValidationFailed(request, reasonCRDConversionError, isDryRun)
return toAdmissionResponse(fmt.Errorf("error decoding configuration: %v", err))
}
warnings, err := s.ValidateConfig(*out)
if err != nil {
scope.Infof("configuration is invalid: %v", addDryRunMessageIfNeeded(err.Error()))
reportValidationFailed(request, reasonInvalidConfig, isDryRun)
return toAdmissionResponse(fmt.Errorf("configuration is invalid: %v", err))
}
if reason, err := checkFields(request.Object.Raw, request.Kind.Kind, request.Namespace, obj.Name); err != nil {
reportValidationFailed(request, reason, isDryRun)
return toAdmissionResponse(err)
}
reportValidationPass(request)
return &kube.AdmissionResponse{Allowed: true, Warnings: toKubeWarnings(warnings)}
}
func toKubeWarnings(warn validation.Warning) []string {
if warn == nil {
return nil
}
me, ok := warn.(*multierror.Error)
if ok {
res := []string{}
for _, e := range me.Errors {
res = append(res, e.Error())
}
return res
}
return []string{warn.Error()}
}
func checkFields(raw []byte, kind string, namespace string, name string) (string, error) {
trial := make(map[string]json.RawMessage)
if err := json.Unmarshal(raw, &trial); err != nil {
scope.Infof("cannot decode configuration fields: %v", err)
return reasonYamlDecodeError, fmt.Errorf("cannot decode configuration fields: %v", err)
}
for key := range trial {
if _, ok := validFields[key]; !ok {
scope.Infof("unknown field %q on %s resource %s/%s",
key, kind, namespace, name)
return reasonInvalidConfig, fmt.Errorf("unknown field %q on %s resource %s/%s",
key, kind, namespace, name)
}
}
return "", nil
}
// validatePort checks that the network port is in range
func validatePort(port int) error {
if 1 <= port && port <= 65535 {
return nil
}
return fmt.Errorf("port number %d must be in the range 1..65535", port)
}
// Validate tests if the Options has valid params.
func (o Options) Validate() error {
var errs *multierror.Error
if err := validatePort(int(o.Port)); err != nil {
errs = multierror.Append(errs, err)
}
return errs.ErrorOrNil()
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package webhooks
import (
"bytes"
"errors"
"math"
"strings"
"time"
v1 "k8s.io/api/admissionregistration/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
klabels "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/util/workqueue"
"istio.io/api/label"
"istio.io/istio/pilot/pkg/keycertbundle"
kubelib "istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/controllers"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/webhooks/util"
)
var (
errWrongRevision = errors.New("webhook does not belong to target revision")
errNotFound = errors.New("webhook not found")
errNoWebhookWithName = errors.New("webhook configuration did not contain webhook with target name")
)
// WebhookCertPatcher listens for webhooks on specified revision and patches their CA bundles
type WebhookCertPatcher struct {
// revision to patch webhooks for
revision string
webhookName string
queue controllers.Queue
// File path to the x509 certificate bundle used by the webhook server
// and patched into the webhook config.
CABundleWatcher *keycertbundle.Watcher
webhooks kclient.Client[*v1.MutatingWebhookConfiguration]
}
// NewWebhookCertPatcher creates a WebhookCertPatcher
func NewWebhookCertPatcher(
client kubelib.Client,
revision, webhookName string, caBundleWatcher *keycertbundle.Watcher,
) (*WebhookCertPatcher, error) {
p := &WebhookCertPatcher{
revision: revision,
webhookName: webhookName,
CABundleWatcher: caBundleWatcher,
}
p.queue = newWebhookPatcherQueue(p.webhookPatchTask)
p.webhooks = kclient.New[*v1.MutatingWebhookConfiguration](client)
p.webhooks.AddEventHandler(controllers.ObjectHandler(p.queue.AddObject))
return p, nil
}
func newWebhookPatcherQueue(reconciler controllers.ReconcilerFn) controllers.Queue {
return controllers.NewQueue("webhook patcher",
controllers.WithReconciler(reconciler),
// Try first few(5) retries quickly so that we can detect true conflicts by multiple Istiod instances fast.
// If there is a conflict beyond this, it means Istiods are seeing different ca certs and are in inconsistent
// state for longer duration. Slowdown the retries, so that we do not overload kube api server and etcd.
controllers.WithRateLimiter(workqueue.NewItemFastSlowRateLimiter(100*time.Millisecond, 1*time.Minute, 5)),
// Webhook patching has to be retried forever. But the retries would be rate limited.
controllers.WithMaxAttempts(math.MaxInt))
}
// Run runs the WebhookCertPatcher
func (w *WebhookCertPatcher) Run(stopChan <-chan struct{}) {
go w.startCaBundleWatcher(stopChan)
w.webhooks.Start(stopChan)
kubelib.WaitForCacheSync("webhook patcher", stopChan, w.webhooks.HasSynced)
w.queue.Run(stopChan)
}
func (w *WebhookCertPatcher) HasSynced() bool {
return w.queue.HasSynced()
}
// webhookPatchTask takes the result of patchMutatingWebhookConfig and modifies the result for use in task queue
func (w *WebhookCertPatcher) webhookPatchTask(o types.NamespacedName) error {
err := w.patchMutatingWebhookConfig(o.Name)
// do not want to retry the task if these errors occur, they indicate that
// we should no longer be patching the given webhook
if kerrors.IsNotFound(err) || errors.Is(err, errWrongRevision) || errors.Is(err, errNoWebhookWithName) || errors.Is(err, errNotFound) {
return nil
}
if err != nil {
log.Errorf("patching webhook %s failed: %v", o.Name, err)
reportWebhookPatchRetry(o.Name)
}
return err
}
// patchMutatingWebhookConfig takes a webhookConfigName and patches the CA bundle for that webhook configuration
func (w *WebhookCertPatcher) patchMutatingWebhookConfig(webhookConfigName string) error {
config := w.webhooks.Get(webhookConfigName, "")
if config == nil {
reportWebhookPatchFailure(webhookConfigName, reasonWebhookConfigNotFound)
return errNotFound
}
// prevents a race condition between multiple istiods when the revision is changed or modified
v, ok := config.Labels[label.IoIstioRev.Name]
if !ok {
log.Debugf("webhook config %q does not have revision label. It is not a Istio webhook. Skipping patching", webhookConfigName)
return nil
}
if v != w.revision {
reportWebhookPatchFailure(webhookConfigName, reasonWrongRevision)
return errWrongRevision
}
found := false
updated := false
caCertPem, err := util.LoadCABundle(w.CABundleWatcher)
if err != nil {
log.Errorf("Failed to load CA bundle: %v", err)
reportWebhookPatchFailure(webhookConfigName, reasonLoadCABundleFailure)
return err
}
for i, wh := range config.Webhooks {
if strings.HasSuffix(wh.Name, w.webhookName) {
if !bytes.Equal(caCertPem, config.Webhooks[i].ClientConfig.CABundle) {
updated = true
}
config.Webhooks[i].ClientConfig.CABundle = caCertPem
found = true
}
}
if !found {
reportWebhookPatchFailure(webhookConfigName, reasonWebhookEntryNotFound)
return errNoWebhookWithName
}
if updated {
reportWebhookPatchAttempts(w.webhookName)
_, err := w.webhooks.Update(config)
if err != nil {
reportWebhookPatchFailure(webhookConfigName, reasonWebhookUpdateFailure)
}
}
return err
}
// startCaBundleWatcher listens for updates to the CA bundle and patches the webhooks.
func (w *WebhookCertPatcher) startCaBundleWatcher(stop <-chan struct{}) {
id, watchCh := w.CABundleWatcher.AddWatcher()
defer w.CABundleWatcher.RemoveWatcher(id)
for {
select {
case <-watchCh:
for _, whc := range w.webhooks.List("", klabels.Everything()) {
log.Debugf("updating caBundle for webhook %q", whc.Name)
w.queue.AddObject(whc)
}
case <-stop:
return
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc (unknown)
// source: workloadapi/security/authorization.proto
package security
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
emptypb "google.golang.org/protobuf/types/known/emptypb"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type Scope int32
const (
// ALL means that the authorization policy will be applied to all workloads
// in the mesh (any namespace).
Scope_GLOBAL Scope = 0
// NAMESPACE means that the policy will only be applied to workloads in a
// specific namespace.
Scope_NAMESPACE Scope = 1
// WORKLOAD_SELECTOR means that the policy will only be applied to specific
// workloads that were selected by their labels.
Scope_WORKLOAD_SELECTOR Scope = 2
)
// Enum value maps for Scope.
var (
Scope_name = map[int32]string{
0: "GLOBAL",
1: "NAMESPACE",
2: "WORKLOAD_SELECTOR",
}
Scope_value = map[string]int32{
"GLOBAL": 0,
"NAMESPACE": 1,
"WORKLOAD_SELECTOR": 2,
}
)
func (x Scope) Enum() *Scope {
p := new(Scope)
*p = x
return p
}
func (x Scope) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Scope) Descriptor() protoreflect.EnumDescriptor {
return file_workloadapi_security_authorization_proto_enumTypes[0].Descriptor()
}
func (Scope) Type() protoreflect.EnumType {
return &file_workloadapi_security_authorization_proto_enumTypes[0]
}
func (x Scope) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Scope.Descriptor instead.
func (Scope) EnumDescriptor() ([]byte, []int) {
return file_workloadapi_security_authorization_proto_rawDescGZIP(), []int{0}
}
type Action int32
const (
// Allow the request if it matches with the rules.
Action_ALLOW Action = 0
// Deny the request if it matches with the rules.
Action_DENY Action = 1
)
// Enum value maps for Action.
var (
Action_name = map[int32]string{
0: "ALLOW",
1: "DENY",
}
Action_value = map[string]int32{
"ALLOW": 0,
"DENY": 1,
}
)
func (x Action) Enum() *Action {
p := new(Action)
*p = x
return p
}
func (x Action) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Action) Descriptor() protoreflect.EnumDescriptor {
return file_workloadapi_security_authorization_proto_enumTypes[1].Descriptor()
}
func (Action) Type() protoreflect.EnumType {
return &file_workloadapi_security_authorization_proto_enumTypes[1]
}
func (x Action) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Action.Descriptor instead.
func (Action) EnumDescriptor() ([]byte, []int) {
return file_workloadapi_security_authorization_proto_rawDescGZIP(), []int{1}
}
type Authorization struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
// Determine the scope of this RBAC policy.
// If set to NAMESPACE, the 'namespace' field value will be used.
Scope Scope `protobuf:"varint,3,opt,name=scope,proto3,enum=istio.security.Scope" json:"scope,omitempty"`
// The action to take if the request is matched with the rules.
// Default is ALLOW if not specified.
Action Action `protobuf:"varint,4,opt,name=action,proto3,enum=istio.security.Action" json:"action,omitempty"`
// Set of RBAC policy groups each containing its rules.
// If at least one of the groups is matched the policy action will
// take place.
// Groups are OR-ed.
Groups []*Group `protobuf:"bytes,5,rep,name=groups,proto3" json:"groups,omitempty"`
}
func (x *Authorization) Reset() {
*x = Authorization{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_security_authorization_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Authorization) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Authorization) ProtoMessage() {}
func (x *Authorization) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_security_authorization_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Authorization.ProtoReflect.Descriptor instead.
func (*Authorization) Descriptor() ([]byte, []int) {
return file_workloadapi_security_authorization_proto_rawDescGZIP(), []int{0}
}
func (x *Authorization) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Authorization) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *Authorization) GetScope() Scope {
if x != nil {
return x.Scope
}
return Scope_GLOBAL
}
func (x *Authorization) GetAction() Action {
if x != nil {
return x.Action
}
return Action_ALLOW
}
func (x *Authorization) GetGroups() []*Group {
if x != nil {
return x.Groups
}
return nil
}
type Group struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Rules are OR-ed (e.g. ANY rule can match)
// This is a generic form of the authz policy's to, from and when
Rules []*Rules `protobuf:"bytes,1,rep,name=rules,proto3" json:"rules,omitempty"`
}
func (x *Group) Reset() {
*x = Group{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_security_authorization_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Group) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Group) ProtoMessage() {}
func (x *Group) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_security_authorization_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Group.ProtoReflect.Descriptor instead.
func (*Group) Descriptor() ([]byte, []int) {
return file_workloadapi_security_authorization_proto_rawDescGZIP(), []int{1}
}
func (x *Group) GetRules() []*Rules {
if x != nil {
return x.Rules
}
return nil
}
type Rules struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Conditions within a rule are AND-ed (e.g. ALL conditions must be true)
Matches []*Match `protobuf:"bytes,2,rep,name=matches,proto3" json:"matches,omitempty"`
}
func (x *Rules) Reset() {
*x = Rules{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_security_authorization_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Rules) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Rules) ProtoMessage() {}
func (x *Rules) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_security_authorization_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Rules.ProtoReflect.Descriptor instead.
func (*Rules) Descriptor() ([]byte, []int) {
return file_workloadapi_security_authorization_proto_rawDescGZIP(), []int{2}
}
func (x *Rules) GetMatches() []*Match {
if x != nil {
return x.Matches
}
return nil
}
type Match struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Namespaces []*StringMatch `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces,omitempty"`
NotNamespaces []*StringMatch `protobuf:"bytes,2,rep,name=not_namespaces,json=notNamespaces,proto3" json:"not_namespaces,omitempty"`
Principals []*StringMatch `protobuf:"bytes,3,rep,name=principals,proto3" json:"principals,omitempty"`
NotPrincipals []*StringMatch `protobuf:"bytes,4,rep,name=not_principals,json=notPrincipals,proto3" json:"not_principals,omitempty"`
SourceIps []*Address `protobuf:"bytes,5,rep,name=source_ips,json=sourceIps,proto3" json:"source_ips,omitempty"`
NotSourceIps []*Address `protobuf:"bytes,6,rep,name=not_source_ips,json=notSourceIps,proto3" json:"not_source_ips,omitempty"`
DestinationIps []*Address `protobuf:"bytes,7,rep,name=destination_ips,json=destinationIps,proto3" json:"destination_ips,omitempty"`
NotDestinationIps []*Address `protobuf:"bytes,8,rep,name=not_destination_ips,json=notDestinationIps,proto3" json:"not_destination_ips,omitempty"`
DestinationPorts []uint32 `protobuf:"varint,9,rep,packed,name=destination_ports,json=destinationPorts,proto3" json:"destination_ports,omitempty"`
NotDestinationPorts []uint32 `protobuf:"varint,10,rep,packed,name=not_destination_ports,json=notDestinationPorts,proto3" json:"not_destination_ports,omitempty"`
}
func (x *Match) Reset() {
*x = Match{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_security_authorization_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Match) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Match) ProtoMessage() {}
func (x *Match) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_security_authorization_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Match.ProtoReflect.Descriptor instead.
func (*Match) Descriptor() ([]byte, []int) {
return file_workloadapi_security_authorization_proto_rawDescGZIP(), []int{3}
}
func (x *Match) GetNamespaces() []*StringMatch {
if x != nil {
return x.Namespaces
}
return nil
}
func (x *Match) GetNotNamespaces() []*StringMatch {
if x != nil {
return x.NotNamespaces
}
return nil
}
func (x *Match) GetPrincipals() []*StringMatch {
if x != nil {
return x.Principals
}
return nil
}
func (x *Match) GetNotPrincipals() []*StringMatch {
if x != nil {
return x.NotPrincipals
}
return nil
}
func (x *Match) GetSourceIps() []*Address {
if x != nil {
return x.SourceIps
}
return nil
}
func (x *Match) GetNotSourceIps() []*Address {
if x != nil {
return x.NotSourceIps
}
return nil
}
func (x *Match) GetDestinationIps() []*Address {
if x != nil {
return x.DestinationIps
}
return nil
}
func (x *Match) GetNotDestinationIps() []*Address {
if x != nil {
return x.NotDestinationIps
}
return nil
}
func (x *Match) GetDestinationPorts() []uint32 {
if x != nil {
return x.DestinationPorts
}
return nil
}
func (x *Match) GetNotDestinationPorts() []uint32 {
if x != nil {
return x.NotDestinationPorts
}
return nil
}
type Address struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
}
func (x *Address) Reset() {
*x = Address{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_security_authorization_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Address) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Address) ProtoMessage() {}
func (x *Address) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_security_authorization_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Address.ProtoReflect.Descriptor instead.
func (*Address) Descriptor() ([]byte, []int) {
return file_workloadapi_security_authorization_proto_rawDescGZIP(), []int{4}
}
func (x *Address) GetAddress() []byte {
if x != nil {
return x.Address
}
return nil
}
func (x *Address) GetLength() uint32 {
if x != nil {
return x.Length
}
return 0
}
type StringMatch struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to MatchType:
//
// *StringMatch_Exact
// *StringMatch_Prefix
// *StringMatch_Suffix
// *StringMatch_Presence
MatchType isStringMatch_MatchType `protobuf_oneof:"match_type"`
}
func (x *StringMatch) Reset() {
*x = StringMatch{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_security_authorization_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StringMatch) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StringMatch) ProtoMessage() {}
func (x *StringMatch) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_security_authorization_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StringMatch.ProtoReflect.Descriptor instead.
func (*StringMatch) Descriptor() ([]byte, []int) {
return file_workloadapi_security_authorization_proto_rawDescGZIP(), []int{5}
}
func (m *StringMatch) GetMatchType() isStringMatch_MatchType {
if m != nil {
return m.MatchType
}
return nil
}
func (x *StringMatch) GetExact() string {
if x, ok := x.GetMatchType().(*StringMatch_Exact); ok {
return x.Exact
}
return ""
}
func (x *StringMatch) GetPrefix() string {
if x, ok := x.GetMatchType().(*StringMatch_Prefix); ok {
return x.Prefix
}
return ""
}
func (x *StringMatch) GetSuffix() string {
if x, ok := x.GetMatchType().(*StringMatch_Suffix); ok {
return x.Suffix
}
return ""
}
func (x *StringMatch) GetPresence() *emptypb.Empty {
if x, ok := x.GetMatchType().(*StringMatch_Presence); ok {
return x.Presence
}
return nil
}
type isStringMatch_MatchType interface {
isStringMatch_MatchType()
}
type StringMatch_Exact struct {
// exact string match
Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"`
}
type StringMatch_Prefix struct {
// prefix-based match
Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"`
}
type StringMatch_Suffix struct {
// suffix-based match
Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"`
}
type StringMatch_Presence struct {
Presence *emptypb.Empty `protobuf:"bytes,4,opt,name=presence,proto3,oneof"`
}
func (*StringMatch_Exact) isStringMatch_MatchType() {}
func (*StringMatch_Prefix) isStringMatch_MatchType() {}
func (*StringMatch_Suffix) isStringMatch_MatchType() {}
func (*StringMatch_Presence) isStringMatch_MatchType() {}
var File_workloadapi_security_authorization_proto protoreflect.FileDescriptor
var file_workloadapi_security_authorization_proto_rawDesc = []byte{
0x0a, 0x28, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65,
0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x69, 0x73, 0x74, 0x69,
0x6f, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74,
0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcd, 0x01, 0x0a, 0x0d, 0x41, 0x75, 0x74, 0x68,
0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a,
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x73,
0x63, 0x6f, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x69, 0x73, 0x74,
0x69, 0x6f, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x53, 0x63, 0x6f, 0x70,
0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f,
0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x06, 0x67, 0x72, 0x6f, 0x75,
0x70, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f,
0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52,
0x06, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x22, 0x34, 0x0a, 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70,
0x12, 0x2b, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x15, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
0x2e, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x22, 0x38, 0x0a,
0x05, 0x52, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x07,
0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x22, 0xec, 0x04, 0x0a, 0x05, 0x4d, 0x61, 0x74, 0x63,
0x68, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18,
0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x73, 0x65,
0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74,
0x63, 0x68, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x42,
0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61,
0x74, 0x63, 0x68, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
0x65, 0x73, 0x12, 0x3b, 0x0a, 0x0a, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x73,
0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x73,
0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61,
0x74, 0x63, 0x68, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x73, 0x12,
0x42, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c,
0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d,
0x61, 0x74, 0x63, 0x68, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70,
0x61, 0x6c, 0x73, 0x12, 0x36, 0x0a, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70,
0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e,
0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x6e,
0x6f, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x06, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x73, 0x65, 0x63, 0x75,
0x72, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0c, 0x6e, 0x6f,
0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x70, 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x64, 0x65,
0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x07, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x73, 0x65, 0x63, 0x75,
0x72, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x64, 0x65,
0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x70, 0x73, 0x12, 0x47, 0x0a, 0x13,
0x6e, 0x6f, 0x74, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
0x69, 0x70, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x69, 0x73, 0x74, 0x69,
0x6f, 0x2e, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x52, 0x11, 0x6e, 0x6f, 0x74, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x49, 0x70, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0d,
0x52, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x72,
0x74, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28,
0x0d, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
0x6e, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x3b, 0x0a, 0x07, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0c, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6c,
0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e,
0x67, 0x74, 0x68, 0x22, 0x9d, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61,
0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x12, 0x18, 0x0a, 0x06, 0x70,
0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x70,
0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x18, 0x0a, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18,
0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12,
0x34, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x08, 0x70, 0x72, 0x65,
0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x0c, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74,
0x79, 0x70, 0x65, 0x2a, 0x39, 0x0a, 0x05, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06,
0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x41, 0x4d, 0x45,
0x53, 0x50, 0x41, 0x43, 0x45, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x57, 0x4f, 0x52, 0x4b, 0x4c,
0x4f, 0x41, 0x44, 0x5f, 0x53, 0x45, 0x4c, 0x45, 0x43, 0x54, 0x4f, 0x52, 0x10, 0x02, 0x2a, 0x1d,
0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f,
0x57, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x45, 0x4e, 0x59, 0x10, 0x01, 0x42, 0x1a, 0x5a,
0x18, 0x70, 0x6b, 0x67, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x61, 0x70, 0x69,
0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (
file_workloadapi_security_authorization_proto_rawDescOnce sync.Once
file_workloadapi_security_authorization_proto_rawDescData = file_workloadapi_security_authorization_proto_rawDesc
)
func file_workloadapi_security_authorization_proto_rawDescGZIP() []byte {
file_workloadapi_security_authorization_proto_rawDescOnce.Do(func() {
file_workloadapi_security_authorization_proto_rawDescData = protoimpl.X.CompressGZIP(file_workloadapi_security_authorization_proto_rawDescData)
})
return file_workloadapi_security_authorization_proto_rawDescData
}
var file_workloadapi_security_authorization_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_workloadapi_security_authorization_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_workloadapi_security_authorization_proto_goTypes = []interface{}{
(Scope)(0), // 0: istio.security.Scope
(Action)(0), // 1: istio.security.Action
(*Authorization)(nil), // 2: istio.security.Authorization
(*Group)(nil), // 3: istio.security.Group
(*Rules)(nil), // 4: istio.security.Rules
(*Match)(nil), // 5: istio.security.Match
(*Address)(nil), // 6: istio.security.Address
(*StringMatch)(nil), // 7: istio.security.StringMatch
(*emptypb.Empty)(nil), // 8: google.protobuf.Empty
}
var file_workloadapi_security_authorization_proto_depIdxs = []int32{
0, // 0: istio.security.Authorization.scope:type_name -> istio.security.Scope
1, // 1: istio.security.Authorization.action:type_name -> istio.security.Action
3, // 2: istio.security.Authorization.groups:type_name -> istio.security.Group
4, // 3: istio.security.Group.rules:type_name -> istio.security.Rules
5, // 4: istio.security.Rules.matches:type_name -> istio.security.Match
7, // 5: istio.security.Match.namespaces:type_name -> istio.security.StringMatch
7, // 6: istio.security.Match.not_namespaces:type_name -> istio.security.StringMatch
7, // 7: istio.security.Match.principals:type_name -> istio.security.StringMatch
7, // 8: istio.security.Match.not_principals:type_name -> istio.security.StringMatch
6, // 9: istio.security.Match.source_ips:type_name -> istio.security.Address
6, // 10: istio.security.Match.not_source_ips:type_name -> istio.security.Address
6, // 11: istio.security.Match.destination_ips:type_name -> istio.security.Address
6, // 12: istio.security.Match.not_destination_ips:type_name -> istio.security.Address
8, // 13: istio.security.StringMatch.presence:type_name -> google.protobuf.Empty
14, // [14:14] is the sub-list for method output_type
14, // [14:14] is the sub-list for method input_type
14, // [14:14] is the sub-list for extension type_name
14, // [14:14] is the sub-list for extension extendee
0, // [0:14] is the sub-list for field type_name
}
func init() { file_workloadapi_security_authorization_proto_init() }
func file_workloadapi_security_authorization_proto_init() {
if File_workloadapi_security_authorization_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_workloadapi_security_authorization_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Authorization); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_security_authorization_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Group); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_security_authorization_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Rules); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_security_authorization_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Match); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_security_authorization_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Address); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_security_authorization_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StringMatch); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_workloadapi_security_authorization_proto_msgTypes[5].OneofWrappers = []interface{}{
(*StringMatch_Exact)(nil),
(*StringMatch_Prefix)(nil),
(*StringMatch_Suffix)(nil),
(*StringMatch_Presence)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_workloadapi_security_authorization_proto_rawDesc,
NumEnums: 2,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_workloadapi_security_authorization_proto_goTypes,
DependencyIndexes: file_workloadapi_security_authorization_proto_depIdxs,
EnumInfos: file_workloadapi_security_authorization_proto_enumTypes,
MessageInfos: file_workloadapi_security_authorization_proto_msgTypes,
}.Build()
File_workloadapi_security_authorization_proto = out.File
file_workloadapi_security_authorization_proto_rawDesc = nil
file_workloadapi_security_authorization_proto_goTypes = nil
file_workloadapi_security_authorization_proto_depIdxs = nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.32.0
// protoc (unknown)
// source: workloadapi/workload.proto
package workloadapi
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type WorkloadStatus int32
const (
// Workload is healthy and ready to serve traffic.
WorkloadStatus_HEALTHY WorkloadStatus = 0
// Workload is unhealthy and NOT ready to serve traffic.
WorkloadStatus_UNHEALTHY WorkloadStatus = 1
)
// Enum value maps for WorkloadStatus.
var (
WorkloadStatus_name = map[int32]string{
0: "HEALTHY",
1: "UNHEALTHY",
}
WorkloadStatus_value = map[string]int32{
"HEALTHY": 0,
"UNHEALTHY": 1,
}
)
func (x WorkloadStatus) Enum() *WorkloadStatus {
p := new(WorkloadStatus)
*p = x
return p
}
func (x WorkloadStatus) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (WorkloadStatus) Descriptor() protoreflect.EnumDescriptor {
return file_workloadapi_workload_proto_enumTypes[0].Descriptor()
}
func (WorkloadStatus) Type() protoreflect.EnumType {
return &file_workloadapi_workload_proto_enumTypes[0]
}
func (x WorkloadStatus) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use WorkloadStatus.Descriptor instead.
func (WorkloadStatus) EnumDescriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{0}
}
type WorkloadType int32
const (
WorkloadType_DEPLOYMENT WorkloadType = 0
WorkloadType_CRONJOB WorkloadType = 1
WorkloadType_POD WorkloadType = 2
WorkloadType_JOB WorkloadType = 3
)
// Enum value maps for WorkloadType.
var (
WorkloadType_name = map[int32]string{
0: "DEPLOYMENT",
1: "CRONJOB",
2: "POD",
3: "JOB",
}
WorkloadType_value = map[string]int32{
"DEPLOYMENT": 0,
"CRONJOB": 1,
"POD": 2,
"JOB": 3,
}
)
func (x WorkloadType) Enum() *WorkloadType {
p := new(WorkloadType)
*p = x
return p
}
func (x WorkloadType) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (WorkloadType) Descriptor() protoreflect.EnumDescriptor {
return file_workloadapi_workload_proto_enumTypes[1].Descriptor()
}
func (WorkloadType) Type() protoreflect.EnumType {
return &file_workloadapi_workload_proto_enumTypes[1]
}
func (x WorkloadType) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use WorkloadType.Descriptor instead.
func (WorkloadType) EnumDescriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{1}
}
// TunnelProtocol indicates the tunneling protocol for requests.
type TunnelProtocol int32
const (
// NONE means requests should be forwarded as-is, without tunneling.
TunnelProtocol_NONE TunnelProtocol = 0
// HBONE means requests should be tunneled over HTTP.
// This does not dictate HTTP/1.1 vs HTTP/2; ALPN should be used for that purpose.
TunnelProtocol_HBONE TunnelProtocol = 1 // Future options may include things like QUIC/HTTP3, etc.
)
// Enum value maps for TunnelProtocol.
var (
TunnelProtocol_name = map[int32]string{
0: "NONE",
1: "HBONE",
}
TunnelProtocol_value = map[string]int32{
"NONE": 0,
"HBONE": 1,
}
)
func (x TunnelProtocol) Enum() *TunnelProtocol {
p := new(TunnelProtocol)
*p = x
return p
}
func (x TunnelProtocol) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TunnelProtocol) Descriptor() protoreflect.EnumDescriptor {
return file_workloadapi_workload_proto_enumTypes[2].Descriptor()
}
func (TunnelProtocol) Type() protoreflect.EnumType {
return &file_workloadapi_workload_proto_enumTypes[2]
}
func (x TunnelProtocol) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TunnelProtocol.Descriptor instead.
func (TunnelProtocol) EnumDescriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{2}
}
// Address represents a unique address.
//
// Address joins two sub-resources, Workload and Service, to support querying by IP address.
// Address is intended to be able to be looked up on-demand, allowing a client
// to answer a question like "what is this IP address", similar to a reverse DNS lookup.
//
// Each resource will have a mesh-wide unique opaque name, defined in the individual messages.
// In addition, to support lookup by IP address, they will have *alias* names for each IP the resource represents.
// There may be multiple aliases for the same resource (examples: service in multiple networks, or a dual-stack workload).
// Aliases are keyed by network/IP address. Example: "default/1.2.3.4".
//
// In some cases, we do not know the IP address of a Workload. For instance, we may simply know
// that there is a workload behind a gateway, and rely on the gateway to handle the rest.
// In this case, the key format will be "resource-uid". The resource can be a Pod, WorkloadEntry, etc.
// These resources cannot be looked up on-demand.
//
// In some cases, we do not know the IP address of a Service. These services cannot be used for matching
// outbound traffic, as we only have L4 attributes to route based on. However,
// they can be used for Gateways.
// In this case, the key format will be "network/hostname".
// These resources cannot be looked up on-demand.
type Address struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Types that are assignable to Type:
//
// *Address_Workload
// *Address_Service
Type isAddress_Type `protobuf_oneof:"type"`
}
func (x *Address) Reset() {
*x = Address{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_workload_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Address) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Address) ProtoMessage() {}
func (x *Address) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_workload_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Address.ProtoReflect.Descriptor instead.
func (*Address) Descriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{0}
}
func (m *Address) GetType() isAddress_Type {
if m != nil {
return m.Type
}
return nil
}
func (x *Address) GetWorkload() *Workload {
if x, ok := x.GetType().(*Address_Workload); ok {
return x.Workload
}
return nil
}
func (x *Address) GetService() *Service {
if x, ok := x.GetType().(*Address_Service); ok {
return x.Service
}
return nil
}
type isAddress_Type interface {
isAddress_Type()
}
type Address_Workload struct {
// Workload represents an individual workload.
// This could be a single Pod, a VM instance, etc.
Workload *Workload `protobuf:"bytes,1,opt,name=workload,proto3,oneof"`
}
type Address_Service struct {
// Service represents a service - a group of workloads that can be accessed together.
Service *Service `protobuf:"bytes,2,opt,name=service,proto3,oneof"`
}
func (*Address_Workload) isAddress_Type() {}
func (*Address_Service) isAddress_Type() {}
// Service represents a service - a group of workloads that can be accessed together.
// The xds primary key is "namespace/hostname".
// Secondary (alias) keys are the unique `network/IP` pairs that the service can be reached at.
type Service struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Name represents the name for the service.
// For Kubernetes, this is the Service name.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Namespace represents the namespace for the service.
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
// Hostname represents the FQDN of the service.
// For Kubernetes, this would be <name>.<namespace>.svc.<cluster domain>.
// TODO: support this field
Hostname string `protobuf:"bytes,3,opt,name=hostname,proto3" json:"hostname,omitempty"`
// Address represents the addresses the service can be reached at.
// There may be multiple addresses for a single service if it resides in multiple networks,
// multiple clusters, and/or if it's dual stack (TODO: support dual stack).
// For a headless kubernetes service, this list will be empty.
Addresses []*NetworkAddress `protobuf:"bytes,4,rep,name=addresses,proto3" json:"addresses,omitempty"`
// Ports for the service.
// The target_port may be overridden on a per-workload basis.
Ports []*Port `protobuf:"bytes,5,rep,name=ports,proto3" json:"ports,omitempty"`
// Optional; if set, the SAN to verify for TLS connections.
// Typically, this is not set and per-workload identity is used to verify
// TODO: support this field
SubjectAltNames []string `protobuf:"bytes,6,rep,name=subject_alt_names,json=subjectAltNames,proto3" json:"subject_alt_names,omitempty"`
}
func (x *Service) Reset() {
*x = Service{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_workload_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Service) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Service) ProtoMessage() {}
func (x *Service) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_workload_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Service.ProtoReflect.Descriptor instead.
func (*Service) Descriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{1}
}
func (x *Service) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Service) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *Service) GetHostname() string {
if x != nil {
return x.Hostname
}
return ""
}
func (x *Service) GetAddresses() []*NetworkAddress {
if x != nil {
return x.Addresses
}
return nil
}
func (x *Service) GetPorts() []*Port {
if x != nil {
return x.Ports
}
return nil
}
func (x *Service) GetSubjectAltNames() []string {
if x != nil {
return x.SubjectAltNames
}
return nil
}
// Workload represents a workload - an endpoint (or collection behind a hostname).
// The xds primary key is "uid" as defined on the workload below.
// Secondary (alias) keys are the unique `network/IP` pairs that the workload can be reached at.
type Workload struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// UID represents a globally unique opaque identifier for this workload.
// For k8s resources, it is recommended to use the more readable format:
//
// cluster/group/kind/namespace/name/section-name
//
// As an example, a ServiceEntry with two WorkloadEntries inlined could become
// two Workloads with the following UIDs:
// - cluster1/networking.istio.io/v1alpha3/ServiceEntry/default/external-svc/endpoint1
// - cluster1/networking.istio.io/v1alpha3/ServiceEntry/default/external-svc/endpoint2
//
// For VMs and other workloads other formats are also supported; for example,
// a single UID string: "0ae5c03d-5fb3-4eb9-9de8-2bd4b51606ba"
Uid string `protobuf:"bytes,20,opt,name=uid,proto3" json:"uid,omitempty"`
// Name represents the name for the workload.
// For Kubernetes, this is the pod name.
// This is just for debugging and may be elided as an optimization.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Namespace represents the namespace for the workload.
// This is just for debugging and may be elided as an optimization.
Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
// Address represents the IPv4/IPv6 address for the workload.
// This should be globally unique.
// This should not have a port number.
// Each workload must have at least either an address or hostname; not both.
Addresses [][]byte `protobuf:"bytes,3,rep,name=addresses,proto3" json:"addresses,omitempty"`
// The hostname for the workload to be resolved by the ztunnel.
// DNS queries are sent on-demand by default.
// If the resolved DNS query has several endpoints, the request will be forwarded
// to the first response.
//
// At a minimum, each workload must have either an address or hostname. For example,
// a workload that backs a Kubernetes service will typically have only endpoints. A
// workload that backs a headless Kubernetes service, however, will have both
// addresses as well as a hostname used for direct access to the headless endpoint.
// TODO: support this field
Hostname string `protobuf:"bytes,21,opt,name=hostname,proto3" json:"hostname,omitempty"`
// Network represents the network this workload is on. This may be elided for the default network.
// A (network,address) pair makeup a unique key for a workload *at a point in time*.
Network string `protobuf:"bytes,4,opt,name=network,proto3" json:"network,omitempty"`
// Protocol that should be used to connect to this workload.
TunnelProtocol TunnelProtocol `protobuf:"varint,5,opt,name=tunnel_protocol,json=tunnelProtocol,proto3,enum=istio.workload.TunnelProtocol" json:"tunnel_protocol,omitempty"`
// The SPIFFE identity of the workload. The identity is joined to form spiffe://<trust_domain>/ns/<namespace>/sa/<service_account>.
// TrustDomain of the workload. May be elided if this is the mesh wide default (typically cluster.local)
TrustDomain string `protobuf:"bytes,6,opt,name=trust_domain,json=trustDomain,proto3" json:"trust_domain,omitempty"`
// ServiceAccount of the workload. May be elided if this is "default"
ServiceAccount string `protobuf:"bytes,7,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"`
// If present, the waypoint proxy for this workload.
// All incoming requests must go through the waypoint.
Waypoint *GatewayAddress `protobuf:"bytes,8,opt,name=waypoint,proto3" json:"waypoint,omitempty"`
// If present, East West network gateway this workload can be reached through.
// Requests from remote networks should traverse this gateway.
NetworkGateway *GatewayAddress `protobuf:"bytes,19,opt,name=network_gateway,json=networkGateway,proto3" json:"network_gateway,omitempty"`
// Name of the node the workload runs on
Node string `protobuf:"bytes,9,opt,name=node,proto3" json:"node,omitempty"`
// CanonicalName for the workload. Used for telemetry.
CanonicalName string `protobuf:"bytes,10,opt,name=canonical_name,json=canonicalName,proto3" json:"canonical_name,omitempty"`
// CanonicalRevision for the workload. Used for telemetry.
CanonicalRevision string `protobuf:"bytes,11,opt,name=canonical_revision,json=canonicalRevision,proto3" json:"canonical_revision,omitempty"`
// WorkloadType represents the type of the workload. Used for telemetry.
WorkloadType WorkloadType `protobuf:"varint,12,opt,name=workload_type,json=workloadType,proto3,enum=istio.workload.WorkloadType" json:"workload_type,omitempty"`
// WorkloadName represents the name for the workload (of type WorkloadType). Used for telemetry.
WorkloadName string `protobuf:"bytes,13,opt,name=workload_name,json=workloadName,proto3" json:"workload_name,omitempty"`
// If set, this indicates a workload expects to directly receive tunnel traffic.
// In ztunnel, this means:
// * Requests *from* this workload do not need to be tunneled if they already are tunneled by the tunnel_protocol.
// * Requests *to* this workload, via the tunnel_protocol, do not need to be de-tunneled.
NativeTunnel bool `protobuf:"varint,14,opt,name=native_tunnel,json=nativeTunnel,proto3" json:"native_tunnel,omitempty"`
// The services for which this workload is an endpoint.
// The key is the NamespacedHostname string of the format namespace/hostname.
Services map[string]*PortList `protobuf:"bytes,22,rep,name=services,proto3" json:"services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// A list of authorization policies applicable to this workload.
// NOTE: this *only* includes Selector based policies. Namespace and global polices
// are returned out of band.
// Authorization policies are only valid for workloads with `addresses` rather than `hostname`.
AuthorizationPolicies []string `protobuf:"bytes,16,rep,name=authorization_policies,json=authorizationPolicies,proto3" json:"authorization_policies,omitempty"`
Status WorkloadStatus `protobuf:"varint,17,opt,name=status,proto3,enum=istio.workload.WorkloadStatus" json:"status,omitempty"`
// The cluster ID that the workload instance belongs to
ClusterId string `protobuf:"bytes,18,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
}
func (x *Workload) Reset() {
*x = Workload{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_workload_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Workload) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Workload) ProtoMessage() {}
func (x *Workload) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_workload_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Workload.ProtoReflect.Descriptor instead.
func (*Workload) Descriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{2}
}
func (x *Workload) GetUid() string {
if x != nil {
return x.Uid
}
return ""
}
func (x *Workload) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Workload) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *Workload) GetAddresses() [][]byte {
if x != nil {
return x.Addresses
}
return nil
}
func (x *Workload) GetHostname() string {
if x != nil {
return x.Hostname
}
return ""
}
func (x *Workload) GetNetwork() string {
if x != nil {
return x.Network
}
return ""
}
func (x *Workload) GetTunnelProtocol() TunnelProtocol {
if x != nil {
return x.TunnelProtocol
}
return TunnelProtocol_NONE
}
func (x *Workload) GetTrustDomain() string {
if x != nil {
return x.TrustDomain
}
return ""
}
func (x *Workload) GetServiceAccount() string {
if x != nil {
return x.ServiceAccount
}
return ""
}
func (x *Workload) GetWaypoint() *GatewayAddress {
if x != nil {
return x.Waypoint
}
return nil
}
func (x *Workload) GetNetworkGateway() *GatewayAddress {
if x != nil {
return x.NetworkGateway
}
return nil
}
func (x *Workload) GetNode() string {
if x != nil {
return x.Node
}
return ""
}
func (x *Workload) GetCanonicalName() string {
if x != nil {
return x.CanonicalName
}
return ""
}
func (x *Workload) GetCanonicalRevision() string {
if x != nil {
return x.CanonicalRevision
}
return ""
}
func (x *Workload) GetWorkloadType() WorkloadType {
if x != nil {
return x.WorkloadType
}
return WorkloadType_DEPLOYMENT
}
func (x *Workload) GetWorkloadName() string {
if x != nil {
return x.WorkloadName
}
return ""
}
func (x *Workload) GetNativeTunnel() bool {
if x != nil {
return x.NativeTunnel
}
return false
}
func (x *Workload) GetServices() map[string]*PortList {
if x != nil {
return x.Services
}
return nil
}
func (x *Workload) GetAuthorizationPolicies() []string {
if x != nil {
return x.AuthorizationPolicies
}
return nil
}
func (x *Workload) GetStatus() WorkloadStatus {
if x != nil {
return x.Status
}
return WorkloadStatus_HEALTHY
}
func (x *Workload) GetClusterId() string {
if x != nil {
return x.ClusterId
}
return ""
}
// PorList represents the ports for a service
type PortList struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Ports []*Port `protobuf:"bytes,1,rep,name=ports,proto3" json:"ports,omitempty"`
}
func (x *PortList) Reset() {
*x = PortList{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_workload_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *PortList) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*PortList) ProtoMessage() {}
func (x *PortList) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_workload_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use PortList.ProtoReflect.Descriptor instead.
func (*PortList) Descriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{3}
}
func (x *PortList) GetPorts() []*Port {
if x != nil {
return x.Ports
}
return nil
}
type Port struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Port the service is reached at (frontend).
ServicePort uint32 `protobuf:"varint,1,opt,name=service_port,json=servicePort,proto3" json:"service_port,omitempty"`
// Port the service forwards to (backend).
TargetPort uint32 `protobuf:"varint,2,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"`
}
func (x *Port) Reset() {
*x = Port{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_workload_proto_msgTypes[4]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Port) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Port) ProtoMessage() {}
func (x *Port) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_workload_proto_msgTypes[4]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Port.ProtoReflect.Descriptor instead.
func (*Port) Descriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{4}
}
func (x *Port) GetServicePort() uint32 {
if x != nil {
return x.ServicePort
}
return 0
}
func (x *Port) GetTargetPort() uint32 {
if x != nil {
return x.TargetPort
}
return 0
}
// GatewayAddress represents the address of a gateway
type GatewayAddress struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// address can either be a hostname (ex: gateway.example.com) or an IP (ex: 1.2.3.4).
//
// Types that are assignable to Destination:
//
// *GatewayAddress_Hostname
// *GatewayAddress_Address
Destination isGatewayAddress_Destination `protobuf_oneof:"destination"`
// port to reach the gateway at for mTLS HBONE connections
HboneMtlsPort uint32 `protobuf:"varint,3,opt,name=hbone_mtls_port,json=hboneMtlsPort,proto3" json:"hbone_mtls_port,omitempty"`
// port to reach the gateway at for single tls HBONE connections
// used for sending unauthenticated traffic originating outside the mesh to a waypoint-enabled destination
// A value of 0 = unset
HboneSingleTlsPort uint32 `protobuf:"varint,4,opt,name=hbone_single_tls_port,json=hboneSingleTlsPort,proto3" json:"hbone_single_tls_port,omitempty"`
}
func (x *GatewayAddress) Reset() {
*x = GatewayAddress{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_workload_proto_msgTypes[5]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *GatewayAddress) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*GatewayAddress) ProtoMessage() {}
func (x *GatewayAddress) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_workload_proto_msgTypes[5]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use GatewayAddress.ProtoReflect.Descriptor instead.
func (*GatewayAddress) Descriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{5}
}
func (m *GatewayAddress) GetDestination() isGatewayAddress_Destination {
if m != nil {
return m.Destination
}
return nil
}
func (x *GatewayAddress) GetHostname() *NamespacedHostname {
if x, ok := x.GetDestination().(*GatewayAddress_Hostname); ok {
return x.Hostname
}
return nil
}
func (x *GatewayAddress) GetAddress() *NetworkAddress {
if x, ok := x.GetDestination().(*GatewayAddress_Address); ok {
return x.Address
}
return nil
}
func (x *GatewayAddress) GetHboneMtlsPort() uint32 {
if x != nil {
return x.HboneMtlsPort
}
return 0
}
func (x *GatewayAddress) GetHboneSingleTlsPort() uint32 {
if x != nil {
return x.HboneSingleTlsPort
}
return 0
}
type isGatewayAddress_Destination interface {
isGatewayAddress_Destination()
}
type GatewayAddress_Hostname struct {
// TODO: add support for hostname lookup
Hostname *NamespacedHostname `protobuf:"bytes,1,opt,name=hostname,proto3,oneof"`
}
type GatewayAddress_Address struct {
Address *NetworkAddress `protobuf:"bytes,2,opt,name=address,proto3,oneof"`
}
func (*GatewayAddress_Hostname) isGatewayAddress_Destination() {}
func (*GatewayAddress_Address) isGatewayAddress_Destination() {}
// NetworkAddress represents an address bound to a specific network.
type NetworkAddress struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Network represents the network this address is on.
Network string `protobuf:"bytes,1,opt,name=network,proto3" json:"network,omitempty"`
// Address presents the IP (v4 or v6).
Address []byte `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
}
func (x *NetworkAddress) Reset() {
*x = NetworkAddress{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_workload_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NetworkAddress) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NetworkAddress) ProtoMessage() {}
func (x *NetworkAddress) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_workload_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NetworkAddress.ProtoReflect.Descriptor instead.
func (*NetworkAddress) Descriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{6}
}
func (x *NetworkAddress) GetNetwork() string {
if x != nil {
return x.Network
}
return ""
}
func (x *NetworkAddress) GetAddress() []byte {
if x != nil {
return x.Address
}
return nil
}
// NamespacedHostname represents a service bound to a specific namespace.
type NamespacedHostname struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// The namespace the service is in.
Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
// hostname (ex: gateway.example.com)
Hostname string `protobuf:"bytes,2,opt,name=hostname,proto3" json:"hostname,omitempty"`
}
func (x *NamespacedHostname) Reset() {
*x = NamespacedHostname{}
if protoimpl.UnsafeEnabled {
mi := &file_workloadapi_workload_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *NamespacedHostname) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*NamespacedHostname) ProtoMessage() {}
func (x *NamespacedHostname) ProtoReflect() protoreflect.Message {
mi := &file_workloadapi_workload_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use NamespacedHostname.ProtoReflect.Descriptor instead.
func (*NamespacedHostname) Descriptor() ([]byte, []int) {
return file_workloadapi_workload_proto_rawDescGZIP(), []int{7}
}
func (x *NamespacedHostname) GetNamespace() string {
if x != nil {
return x.Namespace
}
return ""
}
func (x *NamespacedHostname) GetHostname() string {
if x != nil {
return x.Hostname
}
return ""
}
var File_workloadapi_workload_proto protoreflect.FileDescriptor
var file_workloadapi_workload_proto_rawDesc = []byte{
0x0a, 0x1a, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x61, 0x70, 0x69, 0x2f, 0x77, 0x6f,
0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x69, 0x73,
0x74, 0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x7e, 0x0a, 0x07,
0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x36, 0x0a, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x6c,
0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x69, 0x73, 0x74, 0x69,
0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c,
0x6f, 0x61, 0x64, 0x48, 0x00, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12,
0x33, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x17, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61,
0x64, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x07, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0xed, 0x01, 0x0a,
0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09,
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f,
0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f,
0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x73, 0x74, 0x69,
0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x4e, 0x65, 0x74, 0x77, 0x6f,
0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20,
0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b,
0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73,
0x12, 0x2a, 0x0a, 0x11, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74, 0x5f,
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x75, 0x62,
0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x22, 0xe2, 0x07, 0x0a,
0x08, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64,
0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1c, 0x0a,
0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c,
0x52, 0x09, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x68,
0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68,
0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f,
0x72, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72,
0x6b, 0x12, 0x47, 0x0a, 0x0f, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x69, 0x73, 0x74,
0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x75, 0x6e, 0x6e,
0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x52, 0x0e, 0x74, 0x75, 0x6e, 0x6e,
0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x72,
0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a,
0x0f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x08, 0x77, 0x61, 0x79, 0x70, 0x6f, 0x69,
0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f,
0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61,
0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x77, 0x61, 0x79, 0x70, 0x6f, 0x69,
0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x67, 0x61,
0x74, 0x65, 0x77, 0x61, 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x73,
0x74, 0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x47, 0x61, 0x74,
0x65, 0x77, 0x61, 0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x6e, 0x65, 0x74,
0x77, 0x6f, 0x72, 0x6b, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e,
0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12,
0x25, 0x0a, 0x0e, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d,
0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63,
0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69,
0x63, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01,
0x28, 0x09, 0x52, 0x11, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x52, 0x65, 0x76,
0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61,
0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x69,
0x73, 0x74, 0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x57, 0x6f,
0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b,
0x6c, 0x6f, 0x61, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b,
0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a,
0x0d, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x0e,
0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x54, 0x75, 0x6e, 0x6e,
0x65, 0x6c, 0x12, 0x42, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x16,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72,
0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x53,
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65,
0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73,
0x18, 0x10, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x36, 0x0a,
0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e,
0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x57,
0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
0x5f, 0x69, 0x64, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74,
0x65, 0x72, 0x49, 0x64, 0x1a, 0x55, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x2e, 0x77,
0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x50, 0x6f, 0x72, 0x74, 0x4c, 0x69, 0x73, 0x74,
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x0f, 0x10,
0x10, 0x22, 0x36, 0x0a, 0x08, 0x50, 0x6f, 0x72, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a,
0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x69,
0x73, 0x74, 0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x50, 0x6f,
0x72, 0x74, 0x52, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x4a, 0x0a, 0x04, 0x50, 0x6f, 0x72,
0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x72,
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x50, 0x6f, 0x72, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x70,
0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65,
0x74, 0x50, 0x6f, 0x72, 0x74, 0x22, 0xf8, 0x01, 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x65, 0x77, 0x61,
0x79, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x40, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x73, 0x74,
0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x4e, 0x61, 0x6d, 0x65,
0x73, 0x70, 0x61, 0x63, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x48, 0x00,
0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x61, 0x64,
0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x73,
0x74, 0x69, 0x6f, 0x2e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x4e, 0x65, 0x74,
0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x48, 0x00, 0x52, 0x07, 0x61,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x68, 0x62, 0x6f, 0x6e, 0x65, 0x5f,
0x6d, 0x74, 0x6c, 0x73, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52,
0x0d, 0x68, 0x62, 0x6f, 0x6e, 0x65, 0x4d, 0x74, 0x6c, 0x73, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x31,
0x0a, 0x15, 0x68, 0x62, 0x6f, 0x6e, 0x65, 0x5f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x74,
0x6c, 0x73, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x68,
0x62, 0x6f, 0x6e, 0x65, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x54, 0x6c, 0x73, 0x50, 0x6f, 0x72,
0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
0x22, 0x44, 0x0a, 0x0e, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x41, 0x64, 0x64, 0x72, 0x65,
0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x18, 0x0a, 0x07,
0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x61,
0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x4e, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70,
0x61, 0x63, 0x65, 0x64, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09,
0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f,
0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f,
0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x2a, 0x2c, 0x0a, 0x0e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f,
0x61, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c,
0x54, 0x48, 0x59, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54,
0x48, 0x59, 0x10, 0x01, 0x2a, 0x3d, 0x0a, 0x0c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64,
0x54, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45,
0x4e, 0x54, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x52, 0x4f, 0x4e, 0x4a, 0x4f, 0x42, 0x10,
0x01, 0x12, 0x07, 0x0a, 0x03, 0x50, 0x4f, 0x44, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x4a, 0x4f,
0x42, 0x10, 0x03, 0x2a, 0x25, 0x0a, 0x0e, 0x54, 0x75, 0x6e, 0x6e, 0x65, 0x6c, 0x50, 0x72, 0x6f,
0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12,
0x09, 0x0a, 0x05, 0x48, 0x42, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x42, 0x11, 0x5a, 0x0f, 0x70, 0x6b,
0x67, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_workloadapi_workload_proto_rawDescOnce sync.Once
file_workloadapi_workload_proto_rawDescData = file_workloadapi_workload_proto_rawDesc
)
func file_workloadapi_workload_proto_rawDescGZIP() []byte {
file_workloadapi_workload_proto_rawDescOnce.Do(func() {
file_workloadapi_workload_proto_rawDescData = protoimpl.X.CompressGZIP(file_workloadapi_workload_proto_rawDescData)
})
return file_workloadapi_workload_proto_rawDescData
}
var file_workloadapi_workload_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_workloadapi_workload_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
var file_workloadapi_workload_proto_goTypes = []interface{}{
(WorkloadStatus)(0), // 0: istio.workload.WorkloadStatus
(WorkloadType)(0), // 1: istio.workload.WorkloadType
(TunnelProtocol)(0), // 2: istio.workload.TunnelProtocol
(*Address)(nil), // 3: istio.workload.Address
(*Service)(nil), // 4: istio.workload.Service
(*Workload)(nil), // 5: istio.workload.Workload
(*PortList)(nil), // 6: istio.workload.PortList
(*Port)(nil), // 7: istio.workload.Port
(*GatewayAddress)(nil), // 8: istio.workload.GatewayAddress
(*NetworkAddress)(nil), // 9: istio.workload.NetworkAddress
(*NamespacedHostname)(nil), // 10: istio.workload.NamespacedHostname
nil, // 11: istio.workload.Workload.ServicesEntry
}
var file_workloadapi_workload_proto_depIdxs = []int32{
5, // 0: istio.workload.Address.workload:type_name -> istio.workload.Workload
4, // 1: istio.workload.Address.service:type_name -> istio.workload.Service
9, // 2: istio.workload.Service.addresses:type_name -> istio.workload.NetworkAddress
7, // 3: istio.workload.Service.ports:type_name -> istio.workload.Port
2, // 4: istio.workload.Workload.tunnel_protocol:type_name -> istio.workload.TunnelProtocol
8, // 5: istio.workload.Workload.waypoint:type_name -> istio.workload.GatewayAddress
8, // 6: istio.workload.Workload.network_gateway:type_name -> istio.workload.GatewayAddress
1, // 7: istio.workload.Workload.workload_type:type_name -> istio.workload.WorkloadType
11, // 8: istio.workload.Workload.services:type_name -> istio.workload.Workload.ServicesEntry
0, // 9: istio.workload.Workload.status:type_name -> istio.workload.WorkloadStatus
7, // 10: istio.workload.PortList.ports:type_name -> istio.workload.Port
10, // 11: istio.workload.GatewayAddress.hostname:type_name -> istio.workload.NamespacedHostname
9, // 12: istio.workload.GatewayAddress.address:type_name -> istio.workload.NetworkAddress
6, // 13: istio.workload.Workload.ServicesEntry.value:type_name -> istio.workload.PortList
14, // [14:14] is the sub-list for method output_type
14, // [14:14] is the sub-list for method input_type
14, // [14:14] is the sub-list for extension type_name
14, // [14:14] is the sub-list for extension extendee
0, // [0:14] is the sub-list for field type_name
}
func init() { file_workloadapi_workload_proto_init() }
func file_workloadapi_workload_proto_init() {
if File_workloadapi_workload_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_workloadapi_workload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Address); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_workload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Service); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_workload_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Workload); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_workload_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*PortList); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_workload_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Port); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_workload_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*GatewayAddress); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_workload_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NetworkAddress); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_workloadapi_workload_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*NamespacedHostname); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
file_workloadapi_workload_proto_msgTypes[0].OneofWrappers = []interface{}{
(*Address_Workload)(nil),
(*Address_Service)(nil),
}
file_workloadapi_workload_proto_msgTypes[5].OneofWrappers = []interface{}{
(*GatewayAddress_Hostname)(nil),
(*GatewayAddress_Address)(nil),
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_workloadapi_workload_proto_rawDesc,
NumEnums: 3,
NumMessages: 9,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_workloadapi_workload_proto_goTypes,
DependencyIndexes: file_workloadapi_workload_proto_depIdxs,
EnumInfos: file_workloadapi_workload_proto_enumTypes,
MessageInfos: file_workloadapi_workload_proto_msgTypes,
}.Build()
File_workloadapi_workload_proto = out.File
file_workloadapi_workload_proto_rawDesc = nil
file_workloadapi_workload_proto_goTypes = nil
file_workloadapi_workload_proto_depIdxs = nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package chiron
import (
"context"
"crypto/x509"
"encoding/pem"
"fmt"
"net"
"os"
"time"
cert "k8s.io/api/certificates/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
clientset "k8s.io/client-go/kubernetes"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/ptr"
"istio.io/istio/security/pkg/pki/util"
)
const (
// The size of a private key for a leaf certificate.
keySize = 2048
)
var certWatchTimeout = 60 * time.Second
// GenKeyCertK8sCA : Generates a key pair and gets public certificate signed by K8s_CA
// Options are meant to sign DNS certs
// 1. Generate a CSR
// 2. Call SignCSRK8s to finish rest of the flow
func GenKeyCertK8sCA(client clientset.Interface, dnsName,
caFilePath string, signerName string, approveCsr bool, requestedLifetime time.Duration,
) ([]byte, []byte, []byte, error) {
// 1. Generate a CSR
options := util.CertOptions{
Host: dnsName,
RSAKeySize: keySize,
IsDualUse: false,
PKCS8Key: false,
}
csrPEM, keyPEM, err := util.GenCSR(options)
if err != nil {
log.Errorf("CSR generation error (%v)", err)
return nil, nil, nil, err
}
usages := []cert.KeyUsage{
cert.UsageDigitalSignature,
cert.UsageKeyEncipherment,
cert.UsageServerAuth,
}
if signerName == "" {
signerName = "kubernetes.io/legacy-unknown"
}
certChain, caCert, err := SignCSRK8s(client, csrPEM, signerName, usages, dnsName, caFilePath, approveCsr, true, requestedLifetime)
return certChain, keyPEM, caCert, err
}
// SignCSRK8s generates a certificate from CSR using the K8s CA
// 1. Submit a CSR
// 2. Approve a CSR
// 3. Read the signed certificate
// 4. Clean up the artifacts (e.g., delete CSR)
func SignCSRK8s(client clientset.Interface, csrData []byte, signerName string, usages []cert.KeyUsage,
dnsName, caFilePath string, approveCsr, appendCaCert bool, requestedLifetime time.Duration,
) ([]byte, []byte, error) {
// 1. Submit the CSR
csr, err := submitCSR(client, csrData, signerName, usages, requestedLifetime)
if err != nil {
return nil, nil, err
}
log.Debugf("CSR (%v) has been created", csr.Name)
// clean up certificate request after deletion
defer func() {
_ = cleanupCSR(client, csr)
}()
// 2. Approve the CSR
if approveCsr {
approvalMessage := fmt.Sprintf("CSR (%s) for the certificate (%s) is approved", csr.Name, dnsName)
err = approveCSR(client, csr, approvalMessage)
if err != nil {
return nil, nil, fmt.Errorf("failed to approve CSR request: %v", err)
}
log.Debugf("CSR (%v) is approved", csr.Name)
}
// 3. Read the signed certificate
certChain, caCert, err := readSignedCertificate(client, csr, certWatchTimeout, caFilePath, appendCaCert)
if err != nil {
return nil, nil, err
}
// If there is a failure of cleaning up CSR, the error is returned.
return certChain, caCert, err
}
// Read CA certificate and check whether it is a valid certificate.
func readCACert(caCertPath string) ([]byte, error) {
caCert, err := os.ReadFile(caCertPath)
if err != nil {
log.Errorf("failed to read CA cert, cert. path: %v, error: %v", caCertPath, err)
return nil, fmt.Errorf("failed to read CA cert, cert. path: %v, error: %v", caCertPath, err)
}
b, _ := pem.Decode(caCert)
if b == nil {
return nil, fmt.Errorf("could not decode pem")
}
if b.Type != "CERTIFICATE" {
return nil, fmt.Errorf("ca certificate contains wrong type: %v", b.Type)
}
if _, err := x509.ParseCertificate(b.Bytes); err != nil {
return nil, fmt.Errorf("ca certificate parsing returns an error: %v", err)
}
return caCert, nil
}
func isTCPReachable(host string, port int) bool {
addr := fmt.Sprintf("%s:%d", host, port)
conn, err := net.DialTimeout("tcp", addr, 1*time.Second)
if err != nil {
log.Debugf("DialTimeout() returns err: %v", err)
// No connection yet, so no need to conn.Close()
return false
}
err = conn.Close()
if err != nil {
log.Infof("tcp connection is not closed: %v", err)
}
return true
}
func submitCSR(
client clientset.Interface,
csrData []byte,
signerName string,
usages []cert.KeyUsage,
requestedLifetime time.Duration,
) (*cert.CertificateSigningRequest, error) {
log.Debugf("create CSR for signer %v", signerName)
csr := &cert.CertificateSigningRequest{
// Username, UID, Groups will be injected by API server.
TypeMeta: metav1.TypeMeta{Kind: "CertificateSigningRequest"},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "csr-workload-",
},
Spec: cert.CertificateSigningRequestSpec{
Request: csrData,
Usages: usages,
SignerName: signerName,
},
}
if requestedLifetime != time.Duration(0) {
csr.Spec.ExpirationSeconds = ptr.Of(int32(requestedLifetime.Seconds()))
}
resp, err := client.CertificatesV1().CertificateSigningRequests().Create(context.Background(), csr, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create CSR: %v", err)
}
return resp, nil
}
func approveCSR(client clientset.Interface, csr *cert.CertificateSigningRequest, approvalMessage string) error {
csr.Status.Conditions = append(csr.Status.Conditions, cert.CertificateSigningRequestCondition{
Type: cert.CertificateApproved,
Reason: approvalMessage,
Message: approvalMessage,
Status: corev1.ConditionTrue,
})
_, err := client.CertificatesV1().CertificateSigningRequests().UpdateApproval(context.TODO(), csr.Name, csr, metav1.UpdateOptions{})
if err != nil {
log.Errorf("failed to approve CSR (%v): %v", csr.Name, err)
return err
}
return nil
}
// Read the signed certificate
// verify and append CA certificate to certChain if appendCaCert is true
func readSignedCertificate(client clientset.Interface, csr *cert.CertificateSigningRequest,
watchTimeout time.Duration, caCertPath string, appendCaCert bool,
) ([]byte, []byte, error) {
// First try to read the signed CSR through a watching mechanism
certPEM, err := readSignedCsr(client, csr.Name, watchTimeout)
if err != nil {
return nil, nil, err
}
if len(certPEM) == 0 {
return nil, nil, fmt.Errorf("no certificate returned for the CSR: %q", csr.Name)
}
certsParsed, _, err := util.ParsePemEncodedCertificateChain(certPEM)
if err != nil {
return nil, nil, fmt.Errorf("decoding certificate failed")
}
if !appendCaCert || caCertPath == "" {
return certPEM, nil, nil
}
caCert, err := readCACert(caCertPath)
if err != nil {
return nil, nil, fmt.Errorf("error when retrieving CA cert: (%v)", err)
}
// Verify the certificate chain before returning the certificate
roots := x509.NewCertPool()
if roots == nil {
return nil, nil, fmt.Errorf("failed to create cert pool")
}
if ok := roots.AppendCertsFromPEM(caCert); !ok {
return nil, nil, fmt.Errorf("failed to append CA certificate")
}
intermediates := x509.NewCertPool()
if len(certsParsed) > 1 {
for _, cert := range certsParsed[1:] {
intermediates.AddCert(cert)
}
}
_, err = certsParsed[0].Verify(x509.VerifyOptions{
Roots: roots,
Intermediates: intermediates,
})
if err != nil {
return nil, nil, fmt.Errorf("failed to verify the certificate chain: %v", err)
}
return append(certPEM, caCert...), caCert, nil
}
// Return signed CSR through a watcher. If no CSR is read, return nil.
func readSignedCsr(client clientset.Interface, csr string, watchTimeout time.Duration) ([]byte, error) {
selector := fields.OneTermEqualSelector("metadata.name", csr).String()
// Setup a List+Watch, like informers do
// A simple Watch will fail if the cert is signed too quickly
l, _ := client.CertificatesV1().CertificateSigningRequests().List(context.Background(), metav1.ListOptions{
FieldSelector: selector,
})
if l != nil && len(l.Items) > 0 {
reqSigned := l.Items[0]
if reqSigned.Status.Certificate != nil {
return reqSigned.Status.Certificate, nil
}
}
var rv string
if l != nil {
rv = l.ResourceVersion
}
watcher, err := client.CertificatesV1().CertificateSigningRequests().Watch(context.Background(), metav1.ListOptions{
ResourceVersion: rv,
FieldSelector: selector,
})
if err != nil {
return nil, fmt.Errorf("failed to watch CSR %v", csr)
}
// Set a timeout
timer := time.After(watchTimeout)
for {
select {
case r := <-watcher.ResultChan():
reqSigned := r.Object.(*cert.CertificateSigningRequest)
if reqSigned.Status.Certificate != nil {
return reqSigned.Status.Certificate, nil
}
case <-timer:
return nil, fmt.Errorf("timeout when watching CSR %v", csr)
}
}
}
// Clean up the CSR
func cleanupCSR(client clientset.Interface, csr *cert.CertificateSigningRequest) error {
err := client.CertificatesV1().CertificateSigningRequests().Delete(context.TODO(), csr.Name, metav1.DeleteOptions{})
if err != nil {
log.Errorf("failed to delete CSR (%v): %v", csr.Name, err)
} else {
log.Debugf("deleted CSR: %v", csr.Name)
}
return err
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package k8s
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/kube/kclient"
)
// InsertDataToConfigMap inserts a data to a configmap in a namespace.
// client: the k8s client interface.
// lister: the configmap lister.
// meta: the metadata of configmap.
// caBundle: ca cert data bytes.
func InsertDataToConfigMap(client kclient.Client[*v1.ConfigMap], meta metav1.ObjectMeta, caBundle []byte) error {
configmap := client.Get(meta.Name, meta.Namespace)
if configmap == nil {
// Create a new ConfigMap.
configmap = &v1.ConfigMap{
ObjectMeta: meta,
Data: map[string]string{
constants.CACertNamespaceConfigMapDataName: string(caBundle),
},
}
if _, err := client.Create(configmap); err != nil {
// Namespace may be deleted between now... and our previous check. Just skip this, we cannot create into deleted ns
// And don't retry a create if the namespace is terminating
if errors.IsAlreadyExists(err) || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
return nil
}
return fmt.Errorf("error when creating configmap %v: %v", meta.Name, err)
}
} else {
// Otherwise, update the config map if changes are required
err := updateDataInConfigMap(client, configmap, caBundle)
if err != nil {
return err
}
}
return nil
}
// insertData merges a configmap with a map, and returns true if any changes were made
func insertData(cm *v1.ConfigMap, data map[string]string) bool {
if cm.Data == nil {
cm.Data = data
return true
}
needsUpdate := false
for k, v := range data {
if cm.Data[k] != v {
needsUpdate = true
}
cm.Data[k] = v
}
return needsUpdate
}
func updateDataInConfigMap(c kclient.Client[*v1.ConfigMap], cm *v1.ConfigMap, caBundle []byte) error {
if cm == nil {
return fmt.Errorf("cannot update nil configmap")
}
newCm := cm.DeepCopy()
data := map[string]string{
constants.CACertNamespaceConfigMapDataName: string(caBundle),
}
if needsUpdate := insertData(newCm, data); !needsUpdate {
return nil
}
if _, err := c.Update(newCm); err != nil {
return fmt.Errorf("error when updating configmap %v: %v", cm.Name, err)
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"context"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"istio.io/istio/pkg/log"
)
var k8sControllerLog = log.RegisterScope("secretcontroller", "Citadel kubernetes controller log")
// CaSecretController manages the self-signed signing CA secret.
type CaSecretController struct {
client corev1.CoreV1Interface
}
// NewCaSecretController returns a pointer to a newly constructed SecretController instance.
func NewCaSecretController(core corev1.CoreV1Interface) *CaSecretController {
cs := &CaSecretController{
client: core,
}
return cs
}
// LoadCASecretWithRetry reads CA secret with retries until timeout.
func (csc *CaSecretController) LoadCASecretWithRetry(secretName, namespace string,
retryInterval, timeout time.Duration,
) (*v1.Secret, error) {
start := time.Now()
var caSecret *v1.Secret
var scrtErr error
for {
caSecret, scrtErr = csc.client.Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
if scrtErr == nil {
return caSecret, nil
}
k8sControllerLog.Errorf("Failed on loading CA secret %s:%s.",
namespace, secretName)
if time.Since(start) > timeout {
k8sControllerLog.Errorf("Timeout on loading CA secret %s:%s.",
namespace, secretName)
return caSecret, scrtErr
}
time.Sleep(retryInterval)
}
}
// UpdateCASecretWithRetry updates CA secret with retries until timeout.
func (csc *CaSecretController) UpdateCASecretWithRetry(caSecret *v1.Secret,
retryInterval, timeout time.Duration,
) error {
start := time.Now()
for {
_, scrtErr := csc.client.Secrets(caSecret.Namespace).Update(context.TODO(), caSecret, metav1.UpdateOptions{})
if scrtErr == nil {
return nil
}
k8sControllerLog.Errorf("Failed on updating CA secret %s:%s.",
caSecret.Namespace, caSecret.Name)
if time.Since(start) > timeout {
k8sControllerLog.Errorf("Timeout on updating CA secret %s:%s.",
caSecret.Namespace, caSecret.Name)
return scrtErr
}
time.Sleep(retryInterval)
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tokenreview
import (
"context"
"fmt"
"strings"
k8sauth "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"istio.io/istio/pkg/security"
)
// nolint: lll
// From https://github.com/kubernetes/kubernetes/blob/4f2faa2f1ce8f49983173ef29214156afdf405f9/staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go#L41
const (
// PodNameKey is the key used in a user's "extra" to specify the pod name of
// the authenticating request.
PodNameKey = "authentication.kubernetes.io/pod-name"
// PodUIDKey is the key used in a user's "extra" to specify the pod UID of
// the authenticating request.
PodUIDKey = "authentication.kubernetes.io/pod-uid"
)
// ValidateK8sJwt validates a k8s JWT at API server.
// Return {<namespace>, <serviceaccountname>} in the targetToken when the validation passes.
// Otherwise, return the error.
// targetToken: the JWT of the K8s service account to be reviewed
// aud: list of audiences to check. If empty 1st party tokens will be checked.
func ValidateK8sJwt(kubeClient kubernetes.Interface, targetToken string, aud []string) (security.KubernetesInfo, error) {
tokenReview := &k8sauth.TokenReview{
Spec: k8sauth.TokenReviewSpec{
Token: targetToken,
},
}
if aud != nil {
tokenReview.Spec.Audiences = aud
}
reviewRes, err := kubeClient.AuthenticationV1().TokenReviews().Create(context.TODO(), tokenReview, metav1.CreateOptions{})
if err != nil {
return security.KubernetesInfo{}, err
}
return getTokenReviewResult(reviewRes)
}
func getTokenReviewResult(tokenReview *k8sauth.TokenReview) (security.KubernetesInfo, error) {
if tokenReview.Status.Error != "" {
return security.KubernetesInfo{}, fmt.Errorf("the service account authentication returns an error: %v",
tokenReview.Status.Error)
}
// An example SA token:
// {"alg":"RS256","typ":"JWT"}
// {"iss":"kubernetes/serviceaccount",
// "kubernetes.io/serviceaccount/namespace":"default",
// "kubernetes.io/serviceaccount/secret.name":"example-pod-sa-token-h4jqx",
// "kubernetes.io/serviceaccount/service-account.name":"example-pod-sa",
// "kubernetes.io/serviceaccount/service-account.uid":"ff578a9e-65d3-11e8-aad2-42010a8a001d",
// "sub":"system:serviceaccount:default:example-pod-sa"
// }
// An example token review status
// "status":{
// "authenticated":true,
// "user":{
// "username":"system:serviceaccount:default:example-pod-sa",
// "uid":"ff578a9e-65d3-11e8-aad2-42010a8a001d",
// "groups":["system:serviceaccounts","system:serviceaccounts:default","system:authenticated"]
// }
// }
if !tokenReview.Status.Authenticated {
return security.KubernetesInfo{}, fmt.Errorf("the token is not authenticated")
}
inServiceAccountGroup := false
for _, group := range tokenReview.Status.User.Groups {
if group == "system:serviceaccounts" {
inServiceAccountGroup = true
break
}
}
if !inServiceAccountGroup {
return security.KubernetesInfo{}, fmt.Errorf("the token is not a service account")
}
// "username" is in the form of system:serviceaccount:{namespace}:{service account name}",
// e.g., "username":"system:serviceaccount:default:example-pod-sa"
subStrings := strings.Split(tokenReview.Status.User.Username, ":")
if len(subStrings) != 4 {
return security.KubernetesInfo{}, fmt.Errorf("invalid username field in the token review result")
}
return security.KubernetesInfo{
PodName: extractExtra(tokenReview, PodNameKey),
PodNamespace: subStrings[2],
PodUID: extractExtra(tokenReview, PodUIDKey),
PodServiceAccount: subStrings[3],
}, nil
}
func extractExtra(review *k8sauth.TokenReview, s string) string {
values, ok := review.Status.User.Extra[s]
if !ok || len(values) == 0 {
return ""
}
return values[0]
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ca
import (
"context"
"crypto/elliptic"
"crypto/x509"
"encoding/pem"
"fmt"
"os"
"time"
v1 "k8s.io/api/core/v1"
apierror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"istio.io/istio/pkg/backoff"
"istio.io/istio/pkg/log"
"istio.io/istio/security/pkg/cmd"
caerror "istio.io/istio/security/pkg/pki/error"
"istio.io/istio/security/pkg/pki/util"
certutil "istio.io/istio/security/pkg/util"
)
const (
// istioCASecretType is the Istio secret annotation type.
istioCASecretType = "istio.io/ca-root"
// CACertFile is the CA certificate chain file.
CACertFile = "ca-cert.pem"
// CAPrivateKeyFile is the private key file of CA.
CAPrivateKeyFile = "ca-key.pem"
// CASecret stores the key/cert of self-signed CA for persistency purpose.
CASecret = "istio-ca-secret"
// CertChainFile is the ID/name for the certificate chain file.
CertChainFile = "cert-chain.pem"
// PrivateKeyFile is the ID/name for the private key file.
PrivateKeyFile = "key.pem"
// RootCertFile is the ID/name for the CA root certificate file.
RootCertFile = "root-cert.pem"
// TLSSecretCACertFile is the CA certificate file name as it exists in tls type k8s secret.
TLSSecretCACertFile = "tls.crt"
// TLSSecretCAPrivateKeyFile is the CA certificate key file name as it exists in tls type k8s secret.
TLSSecretCAPrivateKeyFile = "tls.key"
// TLSSecretRootCertFile is the root cert file name as it exists in tls type k8s secret.
TLSSecretRootCertFile = "ca.crt"
// The standard key size to use when generating an RSA private key
rsaKeySize = 2048
// CACertsSecret stores the plugin CA certificates, in external istiod scenario, the secret can be in the config cluster.
CACertsSecret = "cacerts"
// IstioGenerated is the key indicating the secret is generated by Istio.
IstioGenerated = "istio-generated"
)
// SigningCAFileBundle locations of the files used for the signing CA
type SigningCAFileBundle struct {
RootCertFile string
CertChainFiles []string
SigningCertFile string
SigningKeyFile string
}
var pkiCaLog = log.RegisterScope("pkica", "Citadel CA log")
// caTypes is the enum for the CA type.
type caTypes int
type CertOpts struct {
// SubjectIDs are used for building the SAN extension for the certificate.
SubjectIDs []string
// TTL is the requested lifetime (Time to live) to be applied in the certificate.
TTL time.Duration
// ForCA indicates whether the signed certificate if for CA.
// If true, the signed certificate is a CA certificate, otherwise, it is a workload certificate.
ForCA bool
// Cert Signer info
CertSigner string
}
const (
// selfSignedCA means the Istio CA uses a self signed certificate.
selfSignedCA caTypes = iota
// pluggedCertCA means the Istio CA uses a operator-specified key/cert.
pluggedCertCA
)
// IstioCAOptions holds the configurations for creating an Istio CA.
type IstioCAOptions struct {
CAType caTypes
DefaultCertTTL time.Duration
MaxCertTTL time.Duration
CARSAKeySize int
KeyCertBundle *util.KeyCertBundle
// Config for creating self-signed root cert rotator.
RotatorConfig *SelfSignedCARootCertRotatorConfig
// OnRootCertUpdate is the cb which can only be called by self-signed root cert rotator
OnRootCertUpdate func() error
}
type RootCertUpdateFunc func() error
// NewSelfSignedIstioCAOptions returns a new IstioCAOptions instance using self-signed certificate.
func NewSelfSignedIstioCAOptions(ctx context.Context,
rootCertGracePeriodPercentile int, caCertTTL, rootCertCheckInverval, defaultCertTTL,
maxCertTTL time.Duration, org string, useCacertsSecretName, dualUse bool, namespace string, client corev1.CoreV1Interface,
rootCertFile string, enableJitter bool, caRSAKeySize int,
) (caOpts *IstioCAOptions, err error) {
caOpts = &IstioCAOptions{
CAType: selfSignedCA,
DefaultCertTTL: defaultCertTTL,
MaxCertTTL: maxCertTTL,
RotatorConfig: &SelfSignedCARootCertRotatorConfig{
CheckInterval: rootCertCheckInverval,
caCertTTL: caCertTTL,
retryInterval: cmd.ReadSigningCertRetryInterval,
retryMax: cmd.ReadSigningCertRetryMax,
certInspector: certutil.NewCertUtil(rootCertGracePeriodPercentile),
caStorageNamespace: namespace,
dualUse: dualUse,
org: org,
rootCertFile: rootCertFile,
enableJitter: enableJitter,
client: client,
},
}
// always use ``istio-ca-secret` in priority, otherwise fall back to `cacerts`
var caCertName string
b := backoff.NewExponentialBackOff(backoff.DefaultOption())
err = b.RetryWithContext(ctx, func() error {
caCertName = CASecret
// 1. fetch `istio-ca-secret` in priority
err := loadSelfSignedCaSecret(client, namespace, caCertName, rootCertFile, caOpts)
if err == nil {
return nil
} else if apierror.IsNotFound(err) {
// 2. if `istio-ca-secret` not exist and use cacerts enabled, fallback to fetch `cacerts`
if useCacertsSecretName {
caCertName = CACertsSecret
err := loadSelfSignedCaSecret(client, namespace, caCertName, rootCertFile, caOpts)
if err == nil {
return nil
} else if apierror.IsNotFound(err) { // if neither `istio-ca-secret` nor `cacerts` exists, we create a `cacerts`
// continue to create `cacerts`
} else {
return err
}
}
// 3. if use cacerts disabled, create `istio-ca-secret`, otherwise create `cacerts`.
pkiCaLog.Infof("CASecret %s not found, will create one", caCertName)
options := util.CertOptions{
TTL: caCertTTL,
Org: org,
IsCA: true,
IsSelfSigned: true,
RSAKeySize: caRSAKeySize,
IsDualUse: dualUse,
}
pemCert, pemKey, ckErr := util.GenCertKeyFromOptions(options)
if ckErr != nil {
pkiCaLog.Warnf("unable to generate CA cert and key for self-signed CA (%v)", ckErr)
return fmt.Errorf("unable to generate CA cert and key for self-signed CA (%v)", ckErr)
}
rootCerts, err := util.AppendRootCerts(pemCert, rootCertFile)
if err != nil {
pkiCaLog.Warnf("failed to append root certificates (%v)", err)
return fmt.Errorf("failed to append root certificates (%v)", err)
}
if caOpts.KeyCertBundle, err = util.NewVerifiedKeyCertBundleFromPem(pemCert, pemKey, nil, rootCerts); err != nil {
pkiCaLog.Warnf("failed to create CA KeyCertBundle (%v)", err)
return fmt.Errorf("failed to create CA KeyCertBundle (%v)", err)
}
// Write the key/cert back to secret, so they will be persistent when CA restarts.
secret := BuildSecret(caCertName, namespace, nil, nil, pemCert, pemCert, pemKey, istioCASecretType)
_, err = client.Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{})
if err != nil {
pkiCaLog.Warnf("Failed to create secret %s (%v)", caCertName, err)
return err
}
pkiCaLog.Infof("Using self-generated public key: %v", string(rootCerts))
return nil
}
return err
})
pkiCaLog.Infof("Set secret name for self-signed CA cert rotator to %s", caCertName)
caOpts.RotatorConfig.secretName = caCertName
return caOpts, err
}
func loadSelfSignedCaSecret(client corev1.CoreV1Interface, namespace string, caCertName string, rootCertFile string, caOpts *IstioCAOptions) error {
caSecret, err := client.Secrets(namespace).Get(context.TODO(), caCertName, metav1.GetOptions{})
if err == nil {
pkiCaLog.Infof("Load signing key and cert from existing secret %s/%s", caSecret.Namespace, caSecret.Name)
rootCerts, err := util.AppendRootCerts(caSecret.Data[CACertFile], rootCertFile)
if err != nil {
return fmt.Errorf("failed to append root certificates (%v)", err)
}
if caOpts.KeyCertBundle, err = util.NewVerifiedKeyCertBundleFromPem(caSecret.Data[CACertFile],
caSecret.Data[CAPrivateKeyFile], nil, rootCerts); err != nil {
return fmt.Errorf("failed to create CA KeyCertBundle (%v)", err)
}
pkiCaLog.Infof("Using existing public key: %v", string(rootCerts))
}
return err
}
// NewSelfSignedDebugIstioCAOptions returns a new IstioCAOptions instance using self-signed certificate produced by in-memory CA,
// which runs without K8s, and no local ca key file presented.
func NewSelfSignedDebugIstioCAOptions(rootCertFile string, caCertTTL, defaultCertTTL, maxCertTTL time.Duration,
org string, caRSAKeySize int,
) (caOpts *IstioCAOptions, err error) {
caOpts = &IstioCAOptions{
CAType: selfSignedCA,
DefaultCertTTL: defaultCertTTL,
MaxCertTTL: maxCertTTL,
CARSAKeySize: caRSAKeySize,
}
options := util.CertOptions{
TTL: caCertTTL,
Org: org,
IsCA: true,
IsSelfSigned: true,
RSAKeySize: caRSAKeySize,
IsDualUse: true, // hardcoded to true for K8S as well
}
pemCert, pemKey, ckErr := util.GenCertKeyFromOptions(options)
if ckErr != nil {
return nil, fmt.Errorf("unable to generate CA cert and key for self-signed CA (%v)", ckErr)
}
rootCerts, err := util.AppendRootCerts(pemCert, rootCertFile)
if err != nil {
return nil, fmt.Errorf("failed to append root certificates (%v)", err)
}
if caOpts.KeyCertBundle, err = util.NewVerifiedKeyCertBundleFromPem(pemCert, pemKey, nil, rootCerts); err != nil {
return nil, fmt.Errorf("failed to create CA KeyCertBundle (%v)", err)
}
return caOpts, nil
}
// NewPluggedCertIstioCAOptions returns a new IstioCAOptions instance using given certificate.
func NewPluggedCertIstioCAOptions(fileBundle SigningCAFileBundle,
defaultCertTTL, maxCertTTL time.Duration, caRSAKeySize int,
) (caOpts *IstioCAOptions, err error) {
caOpts = &IstioCAOptions{
CAType: pluggedCertCA,
DefaultCertTTL: defaultCertTTL,
MaxCertTTL: maxCertTTL,
CARSAKeySize: caRSAKeySize,
}
if caOpts.KeyCertBundle, err = util.NewVerifiedKeyCertBundleFromFile(
fileBundle.SigningCertFile, fileBundle.SigningKeyFile, fileBundle.CertChainFiles, fileBundle.RootCertFile); err != nil {
return nil, fmt.Errorf("failed to create CA KeyCertBundle (%v)", err)
}
// Validate that the passed in signing cert can be used as CA.
// The check can't be done inside `KeyCertBundle`, since bundle could also be used to
// validate workload certificates (i.e., where the leaf certificate is not a CA).
b, err := os.ReadFile(fileBundle.SigningCertFile)
if err != nil {
return nil, err
}
block, _ := pem.Decode(b)
if block == nil {
return nil, fmt.Errorf("invalid PEM encoded certificate")
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, fmt.Errorf("failed to parse X.509 certificate")
}
if !cert.IsCA {
return nil, fmt.Errorf("certificate is not authorized to sign other certificates")
}
return caOpts, nil
}
// BuildSecret returns a secret struct, contents of which are filled with parameters passed in.
// Adds the "istio-generated" key if the secret name is `cacerts`.
func BuildSecret(scrtName, namespace string, certChain, privateKey, rootCert, caCert, caPrivateKey []byte, secretType v1.SecretType) *v1.Secret {
secret := &v1.Secret{
Data: map[string][]byte{
CertChainFile: certChain,
PrivateKeyFile: privateKey,
RootCertFile: rootCert,
CACertFile: caCert,
CAPrivateKeyFile: caPrivateKey,
},
ObjectMeta: metav1.ObjectMeta{
Name: scrtName,
Namespace: namespace,
},
Type: secretType,
}
if scrtName == CACertsSecret {
secret.Data[IstioGenerated] = []byte("")
}
return secret
}
// IstioCA generates keys and certificates for Istio identities.
type IstioCA struct {
defaultCertTTL time.Duration
maxCertTTL time.Duration
caRSAKeySize int
keyCertBundle *util.KeyCertBundle
// rootCertRotator periodically rotates self-signed root cert for CA. It is nil
// if CA is not self-signed CA.
rootCertRotator *SelfSignedCARootCertRotator
}
// NewIstioCA returns a new IstioCA instance.
func NewIstioCA(opts *IstioCAOptions) (*IstioCA, error) {
ca := &IstioCA{
maxCertTTL: opts.MaxCertTTL,
keyCertBundle: opts.KeyCertBundle,
caRSAKeySize: opts.CARSAKeySize,
}
if opts.CAType == selfSignedCA && opts.RotatorConfig != nil && opts.RotatorConfig.CheckInterval > time.Duration(0) {
ca.rootCertRotator = NewSelfSignedCARootCertRotator(opts.RotatorConfig, ca, opts.OnRootCertUpdate)
}
// if CA cert becomes invalid before workload cert it's going to cause workload cert to be invalid too,
// however citatel won't rotate if that happens, this function will prevent that using cert chain TTL as
// the workload TTL
defaultCertTTL, err := ca.minTTL(opts.DefaultCertTTL)
if err != nil {
return ca, fmt.Errorf("failed to get default cert TTL %s", err.Error())
}
ca.defaultCertTTL = defaultCertTTL
return ca, nil
}
func (ca *IstioCA) Run(stopChan chan struct{}) {
if ca.rootCertRotator != nil {
// Start root cert rotator in a separate goroutine.
go ca.rootCertRotator.Run(stopChan)
}
}
// Sign takes a PEM-encoded CSR and cert opts, and returns a signed certificate.
func (ca *IstioCA) Sign(csrPEM []byte, certOpts CertOpts) (
[]byte, error,
) {
return ca.sign(csrPEM, certOpts.SubjectIDs, certOpts.TTL, true, certOpts.ForCA)
}
// SignWithCertChain is similar to Sign but returns the leaf cert and the entire cert chain.
func (ca *IstioCA) SignWithCertChain(csrPEM []byte, certOpts CertOpts) (
[]string, error,
) {
cert, err := ca.signWithCertChain(csrPEM, certOpts.SubjectIDs, certOpts.TTL, true, certOpts.ForCA)
if err != nil {
return nil, err
}
return []string{string(cert)}, nil
}
// GetCAKeyCertBundle returns the KeyCertBundle for the CA.
func (ca *IstioCA) GetCAKeyCertBundle() *util.KeyCertBundle {
return ca.keyCertBundle
}
// GenKeyCert generates a certificate signed by the CA,
// returns the certificate chain and the private key.
func (ca *IstioCA) GenKeyCert(hostnames []string, certTTL time.Duration, checkLifetime bool) ([]byte, []byte, error) {
opts := util.CertOptions{
RSAKeySize: rsaKeySize,
}
// use the type of private key the CA uses to generate an intermediate CA of that type (e.g. CA cert using RSA will
// cause intermediate CAs using RSA to be generated)
_, signingKey, _, _ := ca.keyCertBundle.GetAll()
curve, err := util.GetEllipticCurve(signingKey)
if err == nil {
opts.ECSigAlg = util.EcdsaSigAlg
switch curve {
case elliptic.P384():
opts.ECCCurve = util.P384Curve
default:
opts.ECCCurve = util.P256Curve
}
}
csrPEM, privPEM, err := util.GenCSR(opts)
if err != nil {
return nil, nil, err
}
certPEM, err := ca.signWithCertChain(csrPEM, hostnames, certTTL, checkLifetime, false)
if err != nil {
return nil, nil, err
}
return certPEM, privPEM, nil
}
func (ca *IstioCA) minTTL(defaultCertTTL time.Duration) (time.Duration, error) {
certChainPem := ca.keyCertBundle.GetCertChainPem()
if len(certChainPem) == 0 {
return defaultCertTTL, nil
}
certChainExpiration, err := util.TimeBeforeCertExpires(certChainPem, time.Now())
if err != nil {
return 0, fmt.Errorf("failed to get cert chain TTL %s", err.Error())
}
if certChainExpiration.Seconds() <= 0 {
return 0, fmt.Errorf("cert chain has expired")
}
if defaultCertTTL.Seconds() > certChainExpiration.Seconds() {
return certChainExpiration, nil
}
return defaultCertTTL, nil
}
func (ca *IstioCA) sign(csrPEM []byte, subjectIDs []string, requestedLifetime time.Duration, checkLifetime, forCA bool) ([]byte, error) {
signingCert, signingKey, _, _ := ca.keyCertBundle.GetAll()
if signingCert == nil {
return nil, caerror.NewError(caerror.CANotReady, fmt.Errorf("Istio CA is not ready")) // nolint
}
csr, err := util.ParsePemEncodedCSR(csrPEM)
if err != nil {
return nil, caerror.NewError(caerror.CSRError, err)
}
if err := csr.CheckSignature(); err != nil {
return nil, caerror.NewError(caerror.CSRError, err)
}
lifetime := requestedLifetime
// If the requested requestedLifetime is non-positive, apply the default TTL.
if requestedLifetime.Seconds() <= 0 {
lifetime = ca.defaultCertTTL
}
// If checkLifetime is set and the requested TTL is greater than maxCertTTL, return an error
if checkLifetime && requestedLifetime.Seconds() > ca.maxCertTTL.Seconds() {
return nil, caerror.NewError(caerror.TTLError, fmt.Errorf(
"requested TTL %s is greater than the max allowed TTL %s", requestedLifetime, ca.maxCertTTL))
}
certBytes, err := util.GenCertFromCSR(csr, signingCert, csr.PublicKey, *signingKey, subjectIDs, lifetime, forCA)
if err != nil {
return nil, caerror.NewError(caerror.CertGenError, err)
}
block := &pem.Block{
Type: "CERTIFICATE",
Bytes: certBytes,
}
cert := pem.EncodeToMemory(block)
return cert, nil
}
func (ca *IstioCA) signWithCertChain(csrPEM []byte, subjectIDs []string, requestedLifetime time.Duration, lifetimeCheck,
forCA bool,
) ([]byte, error) {
cert, err := ca.sign(csrPEM, subjectIDs, requestedLifetime, lifetimeCheck, forCA)
if err != nil {
return nil, err
}
chainPem := ca.GetCAKeyCertBundle().GetCertChainPem()
if len(chainPem) > 0 {
cert = append(cert, chainPem...)
}
return cert, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mock
import (
"istio.io/istio/security/pkg/pki/ca"
caerror "istio.io/istio/security/pkg/pki/error"
"istio.io/istio/security/pkg/pki/util"
)
// FakeCA is a mock of CertificateAuthority.
type FakeCA struct {
SignedCert []byte
SignErr *caerror.Error
KeyCertBundle *util.KeyCertBundle
ReceivedIDs []string
}
// Sign returns the SignErr if SignErr is not nil, otherwise, it returns SignedCert.
func (ca *FakeCA) Sign(csr []byte, certOpts ca.CertOpts) ([]byte, error) {
ca.ReceivedIDs = certOpts.SubjectIDs
if ca.SignErr != nil {
return nil, ca.SignErr
}
return ca.SignedCert, nil
}
// SignWithCertChain returns the SignErr if SignErr is not nil, otherwise, it returns SignedCert and the cert chain.
func (ca *FakeCA) SignWithCertChain(csr []byte, certOpts ca.CertOpts) ([]string, error) {
if ca.SignErr != nil {
return nil, ca.SignErr
}
cert := ca.SignedCert
respCertChain := []string{string(cert)}
if ca.KeyCertBundle != nil {
respCertChain = append(respCertChain, string(ca.KeyCertBundle.GetCertChainPem()))
}
_, _, _, rootCertBytes := ca.GetCAKeyCertBundle().GetAll()
if len(rootCertBytes) != 0 {
respCertChain = append(respCertChain, string(rootCertBytes))
}
return respCertChain, nil
}
// GetCAKeyCertBundle returns KeyCertBundle if KeyCertBundle is not nil, otherwise, it returns an empty
// FakeKeyCertBundle.
func (ca *FakeCA) GetCAKeyCertBundle() *util.KeyCertBundle {
if ca.KeyCertBundle == nil {
return util.NewKeyCertBundleFromPem([]byte{}, []byte("foo"), []byte("fake"), []byte("fake"))
}
return ca.KeyCertBundle
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ca
import (
"bytes"
"fmt"
"math/rand"
"time"
v1 "k8s.io/api/core/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"istio.io/istio/pkg/log"
"istio.io/istio/security/pkg/k8s/controller"
"istio.io/istio/security/pkg/pki/util"
certutil "istio.io/istio/security/pkg/util"
)
var rootCertRotatorLog = log.RegisterScope("rootcertrotator", "Self-signed CA root cert rotator log")
type SelfSignedCARootCertRotatorConfig struct {
certInspector certutil.CertUtil
caStorageNamespace string
org string
rootCertFile string
secretName string
client corev1.CoreV1Interface
CheckInterval time.Duration
caCertTTL time.Duration
retryInterval time.Duration
retryMax time.Duration
dualUse bool
enableJitter bool
}
// SelfSignedCARootCertRotator automatically checks self-signed signing root
// certificate and rotates root certificate if it is going to expire.
type SelfSignedCARootCertRotator struct {
caSecretController *controller.CaSecretController
config *SelfSignedCARootCertRotatorConfig
backOffTime time.Duration
ca *IstioCA
onRootCertUpdate func() error
}
// NewSelfSignedCARootCertRotator returns a new root cert rotator instance that
// rotates self-signed root cert periodically.
// nolint: gosec
// Not security sensitive code
func NewSelfSignedCARootCertRotator(config *SelfSignedCARootCertRotatorConfig,
ca *IstioCA,
onRootCertUpdate func() error,
) *SelfSignedCARootCertRotator {
rotator := &SelfSignedCARootCertRotator{
caSecretController: controller.NewCaSecretController(config.client),
config: config,
ca: ca,
onRootCertUpdate: onRootCertUpdate,
}
if config.enableJitter {
// Select a back off time in seconds, which is in the range of [0, rotator.config.CheckInterval).
randSource := rand.NewSource(time.Now().UnixNano())
randBackOff := rand.New(randSource)
backOffSeconds := int(time.Duration(randBackOff.Int63n(int64(rotator.config.CheckInterval))).Seconds())
rotator.backOffTime = time.Duration(backOffSeconds) * time.Second
rootCertRotatorLog.Infof("Set up back off time %s to start rotator.", rotator.backOffTime.String())
} else {
rotator.backOffTime = time.Duration(0)
}
return rotator
}
// Run refreshes root certs and updates config map accordingly.
func (rotator *SelfSignedCARootCertRotator) Run(stopCh chan struct{}) {
if rotator.config.enableJitter {
rootCertRotatorLog.Infof("Jitter is enabled, wait %s before "+
"starting root cert rotator.", rotator.backOffTime.String())
select {
case <-time.After(rotator.backOffTime):
rootCertRotatorLog.Infof("Jitter complete, start rotator.")
case <-stopCh:
rootCertRotatorLog.Info("Received stop signal, so stop the root cert rotator.")
return
}
}
ticker := time.NewTicker(rotator.config.CheckInterval)
for {
select {
case <-ticker.C:
rootCertRotatorLog.Info("Check and rotate root cert.")
rotator.checkAndRotateRootCert()
case _, ok := <-stopCh:
if !ok {
rootCertRotatorLog.Info("Received stop signal, so stop the root cert rotator.")
if ticker != nil {
ticker.Stop()
}
return
}
}
}
}
// checkAndRotateRootCert decides whether root cert should be refreshed, and rotates
// root cert for self-signed Citadel.
func (rotator *SelfSignedCARootCertRotator) checkAndRotateRootCert() {
caSecret, scrtErr := rotator.caSecretController.LoadCASecretWithRetry(rotator.config.secretName,
rotator.config.caStorageNamespace, rotator.config.retryInterval, rotator.config.retryMax)
if scrtErr != nil {
rootCertRotatorLog.Errorf("Fail to load CA secret %s:%s (error: %s), skip cert rotation job",
rotator.config.caStorageNamespace, rotator.config.secretName, scrtErr.Error())
} else {
rotator.checkAndRotateRootCertForSigningCertCitadel(caSecret)
}
}
// checkAndRotateRootCertForSigningCertCitadel checks root cert secret and rotates
// root cert if the current one is about to expire. The rotation uses existing
// root private key to generate a new root cert, and updates root cert secret.
func (rotator *SelfSignedCARootCertRotator) checkAndRotateRootCertForSigningCertCitadel(
caSecret *v1.Secret,
) {
if caSecret == nil {
rootCertRotatorLog.Errorf("root cert secret %s is nil, skip cert rotation job",
rotator.config.secretName)
return
}
// Check root certificate expiration time in CA secret
waitTime, err := rotator.config.certInspector.GetWaitTime(caSecret.Data[CACertFile], time.Now())
if err == nil && waitTime > 0 {
rootCertRotatorLog.Info("Root cert is not about to expire, skipping root cert rotation.")
caCertInMem, _, _, _ := rotator.ca.GetCAKeyCertBundle().GetAllPem()
// If CA certificate is different from the CA certificate in local key
// cert bundle, it implies that other Citadels have updated istio-ca-secret or cacerts.
// Reload root certificate into key cert bundle.
if !bytes.Equal(caCertInMem, caSecret.Data[CACertFile]) {
rootCertRotatorLog.Warnf("CA cert in KeyCertBundle does not match CA cert in "+
"%s. Start to reload root cert into KeyCertBundle", rotator.config.secretName)
rootCerts, err := util.AppendRootCerts(caSecret.Data[CACertFile], rotator.config.rootCertFile)
if err != nil {
rootCertRotatorLog.Errorf("failed to append root certificates from file: %s", err.Error())
return
}
if err := rotator.ca.GetCAKeyCertBundle().VerifyAndSetAll(caSecret.Data[CACertFile],
caSecret.Data[CAPrivateKeyFile], nil, rootCerts); err != nil {
rootCertRotatorLog.Errorf("failed to reload root cert into KeyCertBundle (%v)", err)
} else {
rootCertRotatorLog.Info("Successfully reloaded root cert into KeyCertBundle.")
}
if rotator.onRootCertUpdate != nil {
_ = rotator.onRootCertUpdate()
}
}
return
}
rootCertRotatorLog.Infof("Refresh root certificate, root cert is about to expire: %s", err.Error())
oldCertOptions, err := util.GetCertOptionsFromExistingCert(caSecret.Data[CACertFile])
if err != nil {
rootCertRotatorLog.Warnf("Failed to generate cert options from existing root certificate (%v), "+
"new root certificate may not match old root certificate", err)
}
options := util.CertOptions{
TTL: rotator.config.caCertTTL,
SignerPrivPem: caSecret.Data[CAPrivateKeyFile],
Org: rotator.config.org,
IsCA: true,
IsSelfSigned: true,
RSAKeySize: rotator.ca.caRSAKeySize,
IsDualUse: rotator.config.dualUse,
}
// options should be consistent with the one used in NewSelfSignedIstioCAOptions().
// This is to make sure when rotate the root cert, we don't make unnecessary changes
// to the certificate or add extra fields to the certificate.
options = util.MergeCertOptions(options, oldCertOptions)
pemCert, pemKey, ckErr := util.GenRootCertFromExistingKey(options)
if ckErr != nil {
rootCertRotatorLog.Errorf("unable to generate CA cert and key for self-signed CA: %s", ckErr.Error())
return
}
pemRootCerts, err := util.AppendRootCerts(pemCert, rotator.config.rootCertFile)
if err != nil {
rootCertRotatorLog.Errorf("failed to append root certificates: %s", err.Error())
return
}
oldCaCert := caSecret.Data[CACertFile]
oldCaPrivateKey := caSecret.Data[CAPrivateKeyFile]
oldRootCerts := rotator.ca.GetCAKeyCertBundle().GetRootCertPem()
if rollback, err := rotator.updateRootCertificate(caSecret, true, pemCert, pemKey, pemRootCerts); err != nil {
if !rollback {
rootCertRotatorLog.Errorf("Failed to roll forward root certificate (error: %s). "+
"Abort new root certificate", err.Error())
return
}
// caSecret is out-of-date. Need to load the latest istio-ca-secret to roll back root certificate.
_, err = rotator.updateRootCertificate(nil, false, oldCaCert, oldCaPrivateKey, oldRootCerts)
if err != nil {
rootCertRotatorLog.Errorf("Failed to roll backward root certificate (error: %s).", err.Error())
}
return
}
rootCertRotatorLog.Info("Root certificate rotation is completed successfully.")
}
// updateRootCertificate updates root certificate in istio-ca-secret, keycertbundle and configmap. It takes a scrt
// object, cert, and key, and a flag rollForward indicating whether this update is to roll forward root certificate or
// to roll backward.
// updateRootCertificate returns error when any step is failed, and a flag indicating whether a rollback is required.
// Only when rollForward is true and failure happens, the returned rollback flag is true.
func (rotator *SelfSignedCARootCertRotator) updateRootCertificate(caSecret *v1.Secret, rollForward bool, cert, key, rootCert []byte) (bool, error) {
var err error
if caSecret == nil {
caSecret, err = rotator.caSecretController.LoadCASecretWithRetry(rotator.config.secretName,
rotator.config.caStorageNamespace, rotator.config.retryInterval, rotator.config.retryMax)
if err != nil {
return false, fmt.Errorf("failed to load CA secret %s:%s (error: %s)", rotator.config.caStorageNamespace, rotator.config.secretName,
err.Error())
}
}
caSecret.Data[CACertFile] = cert
caSecret.Data[CAPrivateKeyFile] = key
if err = rotator.caSecretController.UpdateCASecretWithRetry(caSecret, rotator.config.retryInterval, rotator.config.retryMax); err != nil {
return false, fmt.Errorf("failed to update CA secret (error: %s)", err.Error())
}
rootCertRotatorLog.Infof("Root certificate is written into CA secret: %v", string(cert))
if err := rotator.ca.GetCAKeyCertBundle().VerifyAndSetAll(cert, key, nil, rootCert); err != nil {
if rollForward {
// Rolling forward root certificate fails at keycertbundle update, notify caller to rollback.
return true, fmt.Errorf("failed to update CA KeyCertBundle (error: %s)", err.Error())
}
return false, fmt.Errorf("failed to update CA KeyCertBundle (error: %s)", err.Error())
}
rootCertRotatorLog.Infof("Root certificate is updated in CA KeyCertBundle: %v", string(cert))
if rotator.onRootCertUpdate != nil {
_ = rotator.onRootCertUpdate()
}
return false, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package error
import "google.golang.org/grpc/codes"
// ErrType is the type for CA errors.
type ErrType int
const (
// CANotReady means the CA is not ready to sign CSRs.
CANotReady ErrType = iota
// CSRError means the CA cannot sign CSR due to CSR error.
CSRError
// TTLError means the required TTL is invalid.
TTLError
// CertGenError means an error happened during the certificate generation.
CertGenError
// CAIllegalConfig means the configuration/deployment parameters for CA are incorrect
CAIllegalConfig
// CAInitFail means some other unexpected and fatal initilization failure
CAInitFail
)
// Error encapsulates the short and long errors.
type Error struct {
t ErrType
err error
}
// Error returns the string error message.
func (e Error) Error() string {
return e.err.Error()
}
// ErrorType returns a short string representing the error type.
func (e Error) ErrorType() string {
switch e.t {
case CANotReady:
return "CA_NOT_READY"
case CSRError:
return "CSR_ERROR"
case TTLError:
return "TTL_ERROR"
case CertGenError:
return "CERT_GEN_ERROR"
}
return "UNKNOWN"
}
// HTTPErrorCode returns an HTTP error code representing the error type.
func (e Error) HTTPErrorCode() codes.Code {
switch e.t {
case CANotReady:
return codes.Internal
case CertGenError:
return codes.Internal
case CSRError:
return codes.InvalidArgument
case TTLError:
return codes.InvalidArgument
}
return codes.Internal
}
// NewError creates a new Error instance.
func NewError(t ErrType, err error) *Error {
return &Error{
t: t,
err: err,
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ra
import (
"fmt"
"time"
clientset "k8s.io/client-go/kubernetes"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pkg/slices"
raerror "istio.io/istio/security/pkg/pki/error"
"istio.io/istio/security/pkg/pki/util"
caserver "istio.io/istio/security/pkg/server/ca"
)
// RegistrationAuthority : Registration Authority interface.
type RegistrationAuthority interface {
caserver.CertificateAuthority
// SetCACertificatesFromMeshConfig sets the CACertificates using the ones from mesh config
SetCACertificatesFromMeshConfig([]*meshconfig.MeshConfig_CertificateData)
// GetRootCertFromMeshConfig returns the root cert for the specific signer in mesh config
GetRootCertFromMeshConfig(signerName string) ([]byte, error)
}
// CaExternalType : Type of External CA integration
type CaExternalType string
// IstioRAOptions : Configuration Options for the IstioRA
type IstioRAOptions struct {
// ExternalCAType: Integration API type with external CA
ExternalCAType CaExternalType
// DefaultCertTTL: Default Certificate TTL
DefaultCertTTL time.Duration
// MaxCertTTL: Maximum Certificate TTL that can be requested
MaxCertTTL time.Duration
// CaCertFile : File containing PEM encoded CA root certificate of external CA
CaCertFile string
// CaSigner : To indicate custom CA Signer name when using external K8s CA
CaSigner string
// VerifyAppendCA : Whether to use caCertFile containing CA root cert to verify and append to signed cert-chain
VerifyAppendCA bool
// K8sClient : K8s API client
K8sClient clientset.Interface
// TrustDomain
TrustDomain string
// CertSignerDomain info
CertSignerDomain string
}
const (
// ExtCAK8s : Integrate with external CA using k8s CSR API
ExtCAK8s CaExternalType = "ISTIOD_RA_KUBERNETES_API"
// DefaultExtCACertDir : Location of external CA certificate
DefaultExtCACertDir string = "./etc/external-ca-cert"
)
// ValidateCSR : Validate all SAN extensions in csrPEM match authenticated identities
func ValidateCSR(csrPEM []byte, subjectIDs []string) bool {
csr, err := util.ParsePemEncodedCSR(csrPEM)
if err != nil {
return false
}
if err := csr.CheckSignature(); err != nil {
return false
}
csrIDs, err := util.ExtractIDs(csr.Extensions)
if err != nil {
return false
}
for _, s1 := range csrIDs {
if !slices.Contains(subjectIDs, s1) {
return false
}
}
return true
}
// NewIstioRA is a factory method that returns an RA that implements the RegistrationAuthority functionality.
// the caOptions defines the external provider
func NewIstioRA(opts *IstioRAOptions) (RegistrationAuthority, error) {
if opts.ExternalCAType == ExtCAK8s {
istioRA, err := NewKubernetesRA(opts)
if err != nil {
return nil, fmt.Errorf("failed to create an K8s CA: %v", err)
}
return istioRA, err
}
return nil, fmt.Errorf("invalid CA Name %s", opts.ExternalCAType)
}
// preSign : Validation checks to execute before signing certificates
func preSign(raOpts *IstioRAOptions, csrPEM []byte, subjectIDs []string, requestedLifetime time.Duration, forCA bool) (time.Duration, error) {
if forCA {
return requestedLifetime, raerror.NewError(raerror.CSRError,
fmt.Errorf("unable to generate CA certifificates"))
}
if !ValidateCSR(csrPEM, subjectIDs) {
return requestedLifetime, raerror.NewError(raerror.CSRError, fmt.Errorf(
"unable to validate SAN Identities in CSR"))
}
// If the requested requestedLifetime is non-positive, apply the default TTL.
lifetime := requestedLifetime
if requestedLifetime.Seconds() <= 0 {
lifetime = raOpts.DefaultCertTTL
}
// If the requested TTL is greater than maxCertTTL, return an error
if requestedLifetime.Seconds() > raOpts.MaxCertTTL.Seconds() {
return lifetime, raerror.NewError(raerror.TTLError, fmt.Errorf(
"requested TTL %s is greater than the max allowed TTL %s", requestedLifetime, raOpts.MaxCertTTL))
}
return lifetime, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ra
import (
"bytes"
"fmt"
"strings"
"sync"
"time"
cert "k8s.io/api/certificates/v1"
clientset "k8s.io/client-go/kubernetes"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pkg/log"
"istio.io/istio/security/pkg/k8s/chiron"
"istio.io/istio/security/pkg/pki/ca"
raerror "istio.io/istio/security/pkg/pki/error"
"istio.io/istio/security/pkg/pki/util"
)
// KubernetesRA integrated with an external CA using Kubernetes CSR API
type KubernetesRA struct {
csrInterface clientset.Interface
keyCertBundle *util.KeyCertBundle
raOpts *IstioRAOptions
caCertificatesFromMeshConfig map[string]string
certSignerDomain string
// mutex protects the R/W to caCertificatesFromMeshConfig.
mutex sync.RWMutex
}
var pkiRaLog = log.RegisterScope("pkira", "Istiod RA log")
// NewKubernetesRA : Create a RA that interfaces with K8S CSR CA
func NewKubernetesRA(raOpts *IstioRAOptions) (*KubernetesRA, error) {
keyCertBundle, err := util.NewKeyCertBundleWithRootCertFromFile(raOpts.CaCertFile)
if err != nil {
return nil, raerror.NewError(raerror.CAInitFail, fmt.Errorf("error processing Certificate Bundle for Kubernetes RA"))
}
istioRA := &KubernetesRA{
csrInterface: raOpts.K8sClient,
raOpts: raOpts,
keyCertBundle: keyCertBundle,
certSignerDomain: raOpts.CertSignerDomain,
caCertificatesFromMeshConfig: make(map[string]string),
}
return istioRA, nil
}
func (r *KubernetesRA) kubernetesSign(csrPEM []byte, caCertFile string, certSigner string,
requestedLifetime time.Duration,
) ([]byte, error) {
certSignerDomain := r.certSignerDomain
if certSignerDomain == "" && certSigner != "" {
return nil, raerror.NewError(raerror.CertGenError, fmt.Errorf("certSignerDomain is required for signer %s", certSigner))
}
if certSignerDomain != "" && certSigner != "" {
certSigner = certSignerDomain + "/" + certSigner
} else {
certSigner = r.raOpts.CaSigner
}
usages := []cert.KeyUsage{
cert.UsageDigitalSignature,
cert.UsageKeyEncipherment,
cert.UsageServerAuth,
cert.UsageClientAuth,
}
certChain, _, err := chiron.SignCSRK8s(r.csrInterface, csrPEM, certSigner, usages, "", caCertFile, true, false, requestedLifetime)
if err != nil {
return nil, raerror.NewError(raerror.CertGenError, err)
}
return certChain, err
}
// Sign takes a PEM-encoded CSR and cert opts, and returns a certificate signed by k8s CA.
func (r *KubernetesRA) Sign(csrPEM []byte, certOpts ca.CertOpts) ([]byte, error) {
_, err := preSign(r.raOpts, csrPEM, certOpts.SubjectIDs, certOpts.TTL, certOpts.ForCA)
if err != nil {
return nil, err
}
certSigner := certOpts.CertSigner
return r.kubernetesSign(csrPEM, r.raOpts.CaCertFile, certSigner, certOpts.TTL)
}
// SignWithCertChain is similar to Sign but returns the leaf cert and the entire cert chain.
// root cert comes from two sources, order matters:
// 1. Specified in mesh config
// 2. Extract from the cert-chain signed by the CSR signer.
// If no root cert can be found from either of the two sources, error returned.
// There are several possible situations:
// 1. root cert is specified in mesh config and is empty in signed cert chain, in this case
// we verify the signed cert chain against the root cert from mesh config and append the
// root cert into the cert chain.
// 2. root cert is specified in mesh config and also can be extracted in signed cert chain, in this
// case we verify the signed cert chain against the root cert from mesh config and append it
// into the cert chain if the two root certs are different. This is typical when
// the returned cert chain only contains the intermediate CA.
// 3. root cert is not specified in mesh config but can be extracted in signed cert chain, in this case
// we verify the signed cert chain against the root cert and return the cert chain directly.
func (r *KubernetesRA) SignWithCertChain(csrPEM []byte, certOpts ca.CertOpts) ([]string, error) {
cert, err := r.Sign(csrPEM, certOpts)
if err != nil {
return nil, err
}
chainPem := r.GetCAKeyCertBundle().GetCertChainPem()
if len(chainPem) > 0 {
cert = append(cert, chainPem...)
}
respCertChain := []string{string(cert)}
var possibleRootCert, rootCertFromMeshConfig, rootCertFromCertChain []byte
certSigner := r.certSignerDomain + "/" + certOpts.CertSigner
if len(r.GetCAKeyCertBundle().GetRootCertPem()) == 0 {
rootCertFromCertChain, err = util.FindRootCertFromCertificateChainBytes(cert)
if err != nil {
pkiRaLog.Infof("failed to find root cert from signed cert-chain (%v)", err.Error())
}
rootCertFromMeshConfig, err = r.GetRootCertFromMeshConfig(certSigner)
if err != nil {
pkiRaLog.Infof("failed to find root cert from mesh config (%v)", err.Error())
}
if rootCertFromMeshConfig != nil {
possibleRootCert = rootCertFromMeshConfig
} else if rootCertFromCertChain != nil {
possibleRootCert = rootCertFromCertChain
}
if possibleRootCert == nil {
return nil, raerror.NewError(raerror.CSRError, fmt.Errorf("failed to find root cert from either signed cert-chain or mesh config"))
}
if verifyErr := util.VerifyCertificate(nil, cert, possibleRootCert, nil); verifyErr != nil {
return nil, raerror.NewError(raerror.CSRError, fmt.Errorf("root cert from signed cert-chain is invalid (%v)", verifyErr))
}
if !bytes.Equal(possibleRootCert, rootCertFromCertChain) {
respCertChain = append(respCertChain, string(possibleRootCert))
}
}
return respCertChain, nil
}
// GetCAKeyCertBundle returns the KeyCertBundle for the CA.
func (r *KubernetesRA) GetCAKeyCertBundle() *util.KeyCertBundle {
return r.keyCertBundle
}
func (r *KubernetesRA) SetCACertificatesFromMeshConfig(caCertificates []*meshconfig.MeshConfig_CertificateData) {
r.mutex.Lock()
for _, pemCert := range caCertificates {
// TODO: take care of spiffe bundle format as well
cert := pemCert.GetPem()
certSigners := pemCert.CertSigners
if len(certSigners) != 0 {
certSigner := strings.Join(certSigners, ",")
if cert != "" {
r.caCertificatesFromMeshConfig[certSigner] = cert
}
}
}
r.mutex.Unlock()
}
func (r *KubernetesRA) GetRootCertFromMeshConfig(signerName string) ([]byte, error) {
r.mutex.RLock()
defer r.mutex.RUnlock()
caCertificates := r.caCertificatesFromMeshConfig
if len(caCertificates) == 0 {
return nil, fmt.Errorf("no caCertificates defined in mesh config")
}
for signers, caCertificate := range caCertificates {
signerList := strings.Split(signers, ",")
if len(signerList) == 0 {
continue
}
for _, signer := range signerList {
if signer == signerName {
return []byte(caCertificate), nil
}
}
}
return nil, fmt.Errorf("failed to find root cert for signer: %v in mesh config", signerName)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"reflect"
"strings"
)
const (
blockTypeECPrivateKey = "EC PRIVATE KEY"
blockTypeRSAPrivateKey = "RSA PRIVATE KEY" // PKCS#1 private key
blockTypePKCS8PrivateKey = "PRIVATE KEY" // PKCS#8 plain private key
)
// ParsePemEncodedCertificate constructs a `x509.Certificate` object using the
// given a PEM-encoded certificate.
func ParsePemEncodedCertificate(certBytes []byte) (*x509.Certificate, error) {
cb, _ := pem.Decode(certBytes)
if cb == nil {
return nil, fmt.Errorf("invalid PEM encoded certificate")
}
cert, err := x509.ParseCertificate(cb.Bytes)
if err != nil {
return nil, fmt.Errorf("failed to parse X.509 certificate")
}
return cert, nil
}
// ParsePemEncodedCertificateChain constructs a slice of `x509.Certificate` and `rootCertBytes`
// objects using the given a PEM-encoded certificate chain.
func ParsePemEncodedCertificateChain(certBytes []byte) ([]*x509.Certificate, []byte, error) {
var (
certs []*x509.Certificate
cb *pem.Block
rootCertBytes []byte
)
for {
rootCertBytes = certBytes
cb, certBytes = pem.Decode(certBytes)
if cb == nil {
return nil, nil, fmt.Errorf("invalid PEM encoded certificate")
}
cert, err := x509.ParseCertificate(cb.Bytes)
if err != nil {
return nil, nil, fmt.Errorf("failed to parse X.509 certificate")
}
certs = append(certs, cert)
if len(certBytes) == 0 {
break
}
}
if len(certs) == 0 {
return nil, nil, fmt.Errorf("no PEM encoded X.509 certificates parsed")
}
return certs, rootCertBytes, nil
}
// ParsePemEncodedCSR constructs a `x509.CertificateRequest` object using the
// given PEM-encoded certificate signing request.
func ParsePemEncodedCSR(csrBytes []byte) (*x509.CertificateRequest, error) {
block, _ := pem.Decode(csrBytes)
if block == nil {
return nil, fmt.Errorf("certificate signing request is not properly encoded")
}
csr, err := x509.ParseCertificateRequest(block.Bytes)
if err != nil {
return nil, fmt.Errorf("failed to parse X.509 certificate signing request")
}
return csr, nil
}
// ParsePemEncodedKey takes a PEM-encoded key and parsed the bytes into a `crypto.PrivateKey`.
func ParsePemEncodedKey(keyBytes []byte) (crypto.PrivateKey, error) {
kb, _ := pem.Decode(keyBytes)
if kb == nil {
return nil, fmt.Errorf("invalid PEM-encoded key")
}
switch kb.Type {
case blockTypeECPrivateKey:
key, err := x509.ParseECPrivateKey(kb.Bytes)
if err != nil {
return nil, fmt.Errorf("failed to parse the ECDSA private key: %v", err)
}
return key, nil
case blockTypeRSAPrivateKey:
key, err := x509.ParsePKCS1PrivateKey(kb.Bytes)
if err != nil {
return nil, fmt.Errorf("failed to parse the RSA private key: %v", err)
}
return key, nil
case blockTypePKCS8PrivateKey:
key, err := x509.ParsePKCS8PrivateKey(kb.Bytes)
if err != nil {
return nil, fmt.Errorf("failed to parse the PKCS8 private key: %v", err)
}
return key, nil
default:
return nil, fmt.Errorf("unsupported PEM block type for a private key: %s", kb.Type)
}
}
// GetRSAKeySize returns the size if it is RSA key, otherwise it returns an error.
func GetRSAKeySize(privKey crypto.PrivateKey) (int, error) {
if t := reflect.TypeOf(privKey); t != reflect.TypeOf(&rsa.PrivateKey{}) {
return 0, fmt.Errorf("key type is not RSA: %v", t)
}
pkey := privKey.(*rsa.PrivateKey)
return pkey.N.BitLen(), nil
}
// GetEllipticCurve returns the type of curve associated with the private key;
// if ECDSA is used, then only 384 and 256 (default) are returned; if non-ECDSA
// is used then an error is returned
func GetEllipticCurve(privKey *crypto.PrivateKey) (elliptic.Curve, error) {
switch key := (*privKey).(type) {
// this should agree with var SupportedECSignatureAlgorithms
case *ecdsa.PrivateKey:
if key.Curve == elliptic.P384() {
return key.Curve, nil
}
return elliptic.P256(), nil
default:
return nil, fmt.Errorf("private key is not ECDSA based")
}
}
// PemCertBytestoString: takes an array of PEM certs in bytes and returns a string array in the same order with
// trailing newline characters removed
func PemCertBytestoString(caCerts []byte) []string {
certs := []string{}
var cert string
pemBlock := caCerts
for block, rest := pem.Decode(pemBlock); block != nil && len(block.Bytes) != 0; block, rest = pem.Decode(pemBlock) {
if len(rest) == 0 {
cert = strings.TrimPrefix(strings.TrimSuffix(string(pemBlock), "\n"), "\n")
certs = append(certs, cert)
break
}
cert = string(pemBlock[0 : len(pemBlock)-len(rest)])
cert = strings.TrimPrefix(strings.TrimSuffix(cert, "\n"), "\n")
certs = append(certs, cert)
pemBlock = rest
}
return certs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"strings"
)
// DualUseCommonName extracts a valid CommonName from a comma-delimited host string
// for dual-use certificates.
func DualUseCommonName(host string) (string, error) {
// cn uses one hostname, drop the rest
first := strings.SplitN(host, ",", 2)[0]
// cn max length is 64 (ub-common-name @ https://tools.ietf.org/html/rfc5280)
if l := len(first); l > 64 {
return "", fmt.Errorf("certificate CN upper bound exceeded (%v>64): %s", l, first)
}
return first, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provides utility methods to generate X.509 certificates with different
// options. This implementation is Largely inspired from
// https://golang.org/src/crypto/tls/generate_cert.go.
package util
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"math/big"
"os"
"strings"
"time"
"istio.io/istio/pkg/log"
)
// SupportedECSignatureAlgorithms are the types of EC Signature Algorithms
// to be used in key generation (e.g. ECDSA or ED2551)
type SupportedECSignatureAlgorithms string
// SupportedEllipticCurves are the types of curves
// to be used in key generation (e.g. P256, P384)
type SupportedEllipticCurves string
const (
// only ECDSA is currently supported
EcdsaSigAlg SupportedECSignatureAlgorithms = "ECDSA"
// supported curves when using ECC
P256Curve SupportedEllipticCurves = "P256"
P384Curve SupportedEllipticCurves = "P384"
)
// CertOptions contains options for generating a new certificate.
type CertOptions struct {
// Comma-separated hostnames and IPs to generate a certificate for.
// This can also be set to the identity running the workload,
// like kubernetes service account.
Host string
// The NotBefore field of the issued certificate.
NotBefore time.Time
// TTL of the certificate. NotAfter - NotBefore.
TTL time.Duration
// Signer certificate.
SignerCert *x509.Certificate
// Signer private key.
SignerPriv crypto.PrivateKey
// Signer private key (PEM encoded).
SignerPrivPem []byte
// Organization for this certificate.
Org string
// The size of RSA private key to be generated.
RSAKeySize int
// Whether this certificate is used as signing cert for CA.
IsCA bool
// Whether this certificate is self-signed.
IsSelfSigned bool
// Whether this certificate is for a client.
IsClient bool
// Whether this certificate is for a server.
IsServer bool
// Whether this certificate is for dual-use clients (SAN+CN).
IsDualUse bool
// If true, the private key is encoded with PKCS#8.
PKCS8Key bool
// The type of Elliptical Signature algorithm to use
// when generating private keys. Currently only ECDSA is supported.
// If empty, RSA is used, otherwise ECC is used.
ECSigAlg SupportedECSignatureAlgorithms
// The type of Elliptical Signature algorithm to use
// when generating private keys. Currently only ECDSA is supported.
// If empty, RSA is used, otherwise ECC is used.
ECCCurve SupportedEllipticCurves
// Subjective Alternative Name values.
DNSNames string
}
// GenCertKeyFromOptions generates a X.509 certificate and a private key with the given options.
func GenCertKeyFromOptions(options CertOptions) (pemCert []byte, pemKey []byte, err error) {
// Generate the appropriate private&public key pair based on options.
// The public key will be bound to the certificate generated below. The
// private key will be used to sign this certificate in the self-signed
// case, otherwise the certificate is signed by the signer private key
// as specified in the CertOptions.
if options.ECSigAlg != "" {
var ecPriv *ecdsa.PrivateKey
switch options.ECSigAlg {
case EcdsaSigAlg:
var curve elliptic.Curve
switch options.ECCCurve {
case P384Curve:
curve = elliptic.P384()
default:
curve = elliptic.P256()
}
ecPriv, err = ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
return nil, nil, fmt.Errorf("cert generation fails at EC key generation (%v)", err)
}
default:
return nil, nil, errors.New("cert generation fails due to unsupported EC signature algorithm")
}
return genCert(options, ecPriv, &ecPriv.PublicKey)
}
if options.RSAKeySize < minimumRsaKeySize {
return nil, nil, fmt.Errorf("requested key size does not meet the minimum required size of %d (requested: %d)", minimumRsaKeySize, options.RSAKeySize)
}
rsaPriv, err := rsa.GenerateKey(rand.Reader, options.RSAKeySize)
if err != nil {
return nil, nil, fmt.Errorf("cert generation fails at RSA key generation (%v)", err)
}
return genCert(options, rsaPriv, &rsaPriv.PublicKey)
}
func genCert(options CertOptions, priv any, key any) ([]byte, []byte, error) {
template, err := genCertTemplateFromOptions(options)
if err != nil {
return nil, nil, fmt.Errorf("cert generation fails at cert template creation (%v)", err)
}
signerCert, signerKey := template, crypto.PrivateKey(priv)
if !options.IsSelfSigned {
signerCert, signerKey = options.SignerCert, options.SignerPriv
}
certBytes, err := x509.CreateCertificate(rand.Reader, template, signerCert, key, signerKey)
if err != nil {
return nil, nil, fmt.Errorf("cert generation fails at X509 cert creation (%v)", err)
}
pemCert, pemKey, err := encodePem(false, certBytes, priv, options.PKCS8Key)
return pemCert, pemKey, err
}
func publicKey(priv any) any {
switch k := priv.(type) {
case *rsa.PrivateKey:
return &k.PublicKey
case *ecdsa.PrivateKey:
return &k.PublicKey
case ed25519.PrivateKey:
return k.Public().(ed25519.PublicKey)
default:
return nil
}
}
// GenRootCertFromExistingKey generates a X.509 certificate using existing
// CA private key. Only called by a self-signed Citadel.
func GenRootCertFromExistingKey(options CertOptions) (pemCert []byte, pemKey []byte, err error) {
if !options.IsSelfSigned || len(options.SignerPrivPem) == 0 {
return nil, nil, fmt.Errorf("skip cert " +
"generation. Citadel is not in self-signed mode or CA private key is not " +
"available")
}
template, err := genCertTemplateFromOptions(options)
if err != nil {
return nil, nil, fmt.Errorf("cert generation fails at cert template creation (%v)", err)
}
caPrivateKey, err := ParsePemEncodedKey(options.SignerPrivPem)
if err != nil {
return nil, nil, fmt.Errorf("unrecogniazed CA "+
"private key, skip root cert rotation: %s", err.Error())
}
certBytes, err := x509.CreateCertificate(rand.Reader, template, template, publicKey(caPrivateKey), caPrivateKey)
if err != nil {
return nil, nil, fmt.Errorf("cert generation fails at X509 cert creation (%v)", err)
}
pemCert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certBytes})
return pemCert, options.SignerPrivPem, nil
}
// GetCertOptionsFromExistingCert parses cert and generates a CertOptions
// that contains information about the cert. This is the reverse operation of
// genCertTemplateFromOptions(), and only called by a self-signed Citadel.
func GetCertOptionsFromExistingCert(certBytes []byte) (opts CertOptions, err error) {
cert, certErr := ParsePemEncodedCertificate(certBytes)
if certErr != nil {
return opts, certErr
}
orgs := cert.Subject.Organization
if len(orgs) > 0 {
opts.Org = orgs[0]
}
// TODO(JimmyCYJ): parse other fields from certificate, e.g. CommonName.
return opts, nil
}
// MergeCertOptions merges deltaOpts into defaultOpts and returns the merged
// CertOptions. Only called by a self-signed Citadel.
func MergeCertOptions(defaultOpts, deltaOpts CertOptions) CertOptions {
if len(deltaOpts.Org) > 0 {
defaultOpts.Org = deltaOpts.Org
}
// TODO(JimmyCYJ): merge other fields, e.g. Host, IsDualUse, etc.
return defaultOpts
}
// GenCertFromCSR generates a X.509 certificate with the given CSR.
func GenCertFromCSR(csr *x509.CertificateRequest, signingCert *x509.Certificate, publicKey any,
signingKey crypto.PrivateKey, subjectIDs []string, ttl time.Duration, isCA bool,
) (cert []byte, err error) {
tmpl, err := genCertTemplateFromCSR(csr, subjectIDs, ttl, isCA)
if err != nil {
return nil, err
}
return x509.CreateCertificate(rand.Reader, tmpl, signingCert, publicKey, signingKey)
}
// LoadSignerCredsFromFiles loads the signer cert&key from the given files.
//
// signerCertFile: cert file name
// signerPrivFile: private key file name
func LoadSignerCredsFromFiles(signerCertFile string, signerPrivFile string) (*x509.Certificate, crypto.PrivateKey, error) {
signerCertBytes, err := os.ReadFile(signerCertFile)
if err != nil {
return nil, nil, fmt.Errorf("certificate file reading failure (%v)", err)
}
signerPrivBytes, err := os.ReadFile(signerPrivFile)
if err != nil {
return nil, nil, fmt.Errorf("private key file reading failure (%v)", err)
}
cert, err := ParsePemEncodedCertificate(signerCertBytes)
if err != nil {
return nil, nil, fmt.Errorf("pem encoded cert parsing failure (%v)", err)
}
key, err := ParsePemEncodedKey(signerPrivBytes)
if err != nil {
return nil, nil, fmt.Errorf("pem encoded key parsing failure (%v)", err)
}
return cert, key, nil
}
// ClockSkewGracePeriod defines the period of time a certificate will be valid before its creation.
// This is meant to handle cases where we have clock skew between the CA and workloads.
const ClockSkewGracePeriod = time.Minute * 2
// genCertTemplateFromCSR generates a certificate template with the given CSR.
// The NotBefore value of the cert is set to current time.
func genCertTemplateFromCSR(csr *x509.CertificateRequest, subjectIDs []string, ttl time.Duration, isCA bool) (
*x509.Certificate, error,
) {
subjectIDsInString := strings.Join(subjectIDs, ",")
var keyUsage x509.KeyUsage
extKeyUsages := []x509.ExtKeyUsage{}
if isCA {
// If the cert is a CA cert, the private key is allowed to sign other certificates.
keyUsage = x509.KeyUsageCertSign
} else {
// Otherwise the private key is allowed for digital signature and key encipherment.
keyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment
// For now, we do not differentiate non-CA certs to be used on client auth or server auth.
extKeyUsages = append(extKeyUsages, x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth)
}
// Build cert extensions with the subjectIDs.
ext, err := BuildSubjectAltNameExtension(subjectIDsInString)
if err != nil {
return nil, err
}
exts := []pkix.Extension{*ext}
subject := pkix.Name{}
// Dual use mode if common name in CSR is not empty.
// In this case, set CN as determined by DualUseCommonName(subjectIDsInString).
if len(csr.Subject.CommonName) != 0 {
if cn, err := DualUseCommonName(subjectIDsInString); err != nil {
// log and continue
log.Errorf("dual-use failed for cert template - omitting CN (%v)", err)
} else {
subject.CommonName = cn
}
}
now := time.Now()
serialNum, err := genSerialNum()
if err != nil {
return nil, err
}
// SignatureAlgorithm will use the default algorithm.
// See https://golang.org/src/crypto/x509/x509.go?s=5131:5158#L1965 .
return &x509.Certificate{
SerialNumber: serialNum,
Subject: subject,
NotBefore: now.Add(-ClockSkewGracePeriod),
NotAfter: now.Add(ttl),
KeyUsage: keyUsage,
ExtKeyUsage: extKeyUsages,
IsCA: isCA,
BasicConstraintsValid: true,
ExtraExtensions: exts,
}, nil
}
// genCertTemplateFromoptions generates a certificate template with the given options.
func genCertTemplateFromOptions(options CertOptions) (*x509.Certificate, error) {
var keyUsage x509.KeyUsage
if options.IsCA {
// If the cert is a CA cert, the private key is allowed to sign other certificates.
keyUsage = x509.KeyUsageCertSign
} else {
// Otherwise the private key is allowed for digital signature and key encipherment.
keyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment
}
extKeyUsages := []x509.ExtKeyUsage{}
if options.IsServer {
extKeyUsages = append(extKeyUsages, x509.ExtKeyUsageServerAuth)
}
if options.IsClient {
extKeyUsages = append(extKeyUsages, x509.ExtKeyUsageClientAuth)
}
notBefore := time.Now()
if !options.NotBefore.IsZero() {
notBefore = options.NotBefore
}
serialNum, err := genSerialNum()
if err != nil {
return nil, err
}
subject := pkix.Name{
Organization: []string{options.Org},
}
exts := []pkix.Extension{}
if h := options.Host; len(h) > 0 {
s, err := BuildSubjectAltNameExtension(h)
if err != nil {
return nil, err
}
if options.IsDualUse {
cn, err := DualUseCommonName(h)
if err != nil {
// log and continue
log.Errorf("dual-use failed for cert template - omitting CN (%v)", err)
} else {
subject.CommonName = cn
}
}
exts = []pkix.Extension{*s}
}
dnsNames := strings.Split(options.DNSNames, ",")
if len(dnsNames[0]) == 0 {
dnsNames = nil
}
return &x509.Certificate{
SerialNumber: serialNum,
Subject: subject,
NotBefore: notBefore,
NotAfter: notBefore.Add(options.TTL),
KeyUsage: keyUsage,
ExtKeyUsage: extKeyUsages,
IsCA: options.IsCA,
BasicConstraintsValid: true,
ExtraExtensions: exts,
DNSNames: dnsNames,
}, nil
}
func genSerialNum() (*big.Int, error) {
serialNumLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNum, err := rand.Int(rand.Reader, serialNumLimit)
if err != nil {
return nil, fmt.Errorf("serial number generation failure (%v)", err)
}
return serialNum, nil
}
func encodePem(isCSR bool, csrOrCert []byte, priv any, pkcs8 bool) (
csrOrCertPem []byte, privPem []byte, err error,
) {
encodeMsg := "CERTIFICATE"
if isCSR {
encodeMsg = "CERTIFICATE REQUEST"
}
csrOrCertPem = pem.EncodeToMemory(&pem.Block{Type: encodeMsg, Bytes: csrOrCert})
var encodedKey []byte
if pkcs8 {
if encodedKey, err = x509.MarshalPKCS8PrivateKey(priv); err != nil {
return nil, nil, err
}
privPem = pem.EncodeToMemory(&pem.Block{Type: blockTypePKCS8PrivateKey, Bytes: encodedKey})
} else {
switch k := priv.(type) {
case *rsa.PrivateKey:
encodedKey = x509.MarshalPKCS1PrivateKey(k)
privPem = pem.EncodeToMemory(&pem.Block{Type: blockTypeRSAPrivateKey, Bytes: encodedKey})
case *ecdsa.PrivateKey:
encodedKey, err = x509.MarshalECPrivateKey(k)
if err != nil {
return nil, nil, err
}
privPem = pem.EncodeToMemory(&pem.Block{Type: blockTypeECPrivateKey, Bytes: encodedKey})
}
}
err = nil
return
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provides utility methods to generate X.509 certificates with different
// options. This implementation is Largely inspired from
// https://golang.org/src/crypto/tls/generate_cert.go.
package util
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"errors"
"fmt"
"os"
"strings"
"istio.io/istio/pkg/log"
)
// minimumRsaKeySize is the minimum RSA key size to generate certificates
// to ensure proper security
const minimumRsaKeySize = 2048
// GenCSR generates a X.509 certificate sign request and private key with the given options.
func GenCSR(options CertOptions) ([]byte, []byte, error) {
var priv any
var err error
if options.ECSigAlg != "" {
switch options.ECSigAlg {
case EcdsaSigAlg:
var curve elliptic.Curve
switch options.ECCCurve {
case P384Curve:
curve = elliptic.P384()
default:
curve = elliptic.P256()
}
priv, err = ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
return nil, nil, fmt.Errorf("EC key generation failed (%v)", err)
}
default:
return nil, nil, errors.New("csr cert generation fails due to unsupported EC signature algorithm")
}
} else {
if options.RSAKeySize < minimumRsaKeySize {
return nil, nil, fmt.Errorf("requested key size does not meet the minimum required size of %d (requested: %d)", minimumRsaKeySize, options.RSAKeySize)
}
priv, err = rsa.GenerateKey(rand.Reader, options.RSAKeySize)
if err != nil {
return nil, nil, fmt.Errorf("RSA key generation failed (%v)", err)
}
}
template, err := GenCSRTemplate(options)
if err != nil {
return nil, nil, fmt.Errorf("CSR template creation failed (%v)", err)
}
csrBytes, err := x509.CreateCertificateRequest(rand.Reader, template, crypto.PrivateKey(priv))
if err != nil {
return nil, nil, fmt.Errorf("CSR creation failed (%v)", err)
}
csr, privKey, err := encodePem(true, csrBytes, priv, options.PKCS8Key)
return csr, privKey, err
}
// GenCSRTemplate generates a certificateRequest template with the given options.
func GenCSRTemplate(options CertOptions) (*x509.CertificateRequest, error) {
template := &x509.CertificateRequest{
Subject: pkix.Name{
Organization: []string{options.Org},
},
}
if h := options.Host; len(h) > 0 {
s, err := BuildSubjectAltNameExtension(h)
if err != nil {
return nil, err
}
if options.IsDualUse {
cn, err := DualUseCommonName(h)
if err != nil {
// log and continue
log.Errorf("dual-use failed for CSR template - omitting CN (%v)", err)
} else {
template.Subject.CommonName = cn
}
}
template.ExtraExtensions = []pkix.Extension{*s}
}
return template, nil
}
// AppendRootCerts appends root certificates in RootCertFile to the input certificate.
func AppendRootCerts(pemCert []byte, rootCertFile string) ([]byte, error) {
rootCerts := pemCert
if len(rootCertFile) > 0 {
log.Debugf("append root certificates from %v", rootCertFile)
certBytes, err := os.ReadFile(rootCertFile)
if err != nil && !os.IsNotExist(err) {
return rootCerts, fmt.Errorf("failed to read root certificates (%v)", err)
}
rootCerts = AppendCertByte(pemCert, certBytes)
}
return rootCerts, nil
}
// AppendCertByte: Append x.509 rootCert in bytes to existing certificate chain (in bytes)
func AppendCertByte(pemCert []byte, rootCert []byte) []byte {
rootCerts := []byte{}
if len(pemCert) > 0 {
// Copy the input certificate
rootCerts = []byte(strings.TrimSuffix(string(pemCert), "\n") + "\n")
}
rootCerts = append(rootCerts, rootCert...)
return rootCerts
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Provides utility methods to generate X.509 certificates with different
// options. This implementation is Largely inspired from
// https://golang.org/src/crypto/tls/generate_cert.go.
package util
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"os"
"sync"
"time"
)
// KeyCertBundle stores the cert, private key, cert chain and root cert for an entity. It is thread safe.
// The cert and privKey should be a public/private key pair.
// The cert should be verifiable from the rootCert through the certChain.
// cert and priveKey are pointers to the cert/key parsed from certBytes/privKeyBytes.
type KeyCertBundle struct {
certBytes []byte
cert *x509.Certificate
privKeyBytes []byte
privKey *crypto.PrivateKey
certChainBytes []byte
rootCertBytes []byte
// mutex protects the R/W to all keys and certs.
mutex sync.RWMutex
}
// NewKeyCertBundleFromPem returns a new KeyCertBundle, regardless of whether or not the key can be correctly parsed.
func NewKeyCertBundleFromPem(certBytes, privKeyBytes, certChainBytes, rootCertBytes []byte) *KeyCertBundle {
bundle := &KeyCertBundle{}
bundle.setAllFromPem(certBytes, privKeyBytes, certChainBytes, rootCertBytes)
return bundle
}
// NewVerifiedKeyCertBundleFromPem returns a new KeyCertBundle, or error if the provided certs failed the
// verification.
func NewVerifiedKeyCertBundleFromPem(certBytes, privKeyBytes, certChainBytes, rootCertBytes []byte) (
*KeyCertBundle, error,
) {
bundle := &KeyCertBundle{}
if err := bundle.VerifyAndSetAll(certBytes, privKeyBytes, certChainBytes, rootCertBytes); err != nil {
return nil, err
}
return bundle, nil
}
// NewVerifiedKeyCertBundleFromFile returns a new KeyCertBundle, or error if the provided certs failed the
// verification.
func NewVerifiedKeyCertBundleFromFile(certFile string, privKeyFile string, certChainFiles []string, rootCertFile string) (
*KeyCertBundle, error,
) {
certBytes, err := os.ReadFile(certFile)
if err != nil {
return nil, err
}
privKeyBytes, err := os.ReadFile(privKeyFile)
if err != nil {
return nil, err
}
var certChainBytes []byte
if len(certChainFiles) > 0 {
for _, f := range certChainFiles {
var b []byte
if b, err = os.ReadFile(f); err != nil {
return nil, err
}
certChainBytes = append(certChainBytes, b...)
}
}
rootCertBytes, err := os.ReadFile(rootCertFile)
if err != nil {
return nil, err
}
return NewVerifiedKeyCertBundleFromPem(certBytes, privKeyBytes, certChainBytes, rootCertBytes)
}
// NewKeyCertBundleWithRootCertFromFile returns a new KeyCertBundle with the root cert without verification.
func NewKeyCertBundleWithRootCertFromFile(rootCertFile string) (*KeyCertBundle, error) {
var rootCertBytes []byte
var err error
if rootCertFile == "" {
rootCertBytes = []byte{}
} else {
rootCertBytes, err = os.ReadFile(rootCertFile)
if err != nil {
return nil, err
}
}
return &KeyCertBundle{
certBytes: []byte{},
cert: nil,
privKeyBytes: []byte{},
privKey: nil,
certChainBytes: []byte{},
rootCertBytes: rootCertBytes,
}, nil
}
// GetAllPem returns all key/cert PEMs in KeyCertBundle together. Getting all values together avoids inconsistency.
func (b *KeyCertBundle) GetAllPem() (certBytes, privKeyBytes, certChainBytes, rootCertBytes []byte) {
b.mutex.RLock()
certBytes = copyBytes(b.certBytes)
privKeyBytes = copyBytes(b.privKeyBytes)
certChainBytes = copyBytes(b.certChainBytes)
rootCertBytes = copyBytes(b.rootCertBytes)
b.mutex.RUnlock()
return
}
// GetAll returns all key/cert in KeyCertBundle together. Getting all values together avoids inconsistency.
// NOTE: Callers should not modify the content of cert and privKey.
func (b *KeyCertBundle) GetAll() (cert *x509.Certificate, privKey *crypto.PrivateKey, certChainBytes,
rootCertBytes []byte,
) {
b.mutex.RLock()
cert = b.cert
privKey = b.privKey
certChainBytes = copyBytes(b.certChainBytes)
rootCertBytes = copyBytes(b.rootCertBytes)
b.mutex.RUnlock()
return
}
// GetCertChainPem returns the certificate chain PEM.
func (b *KeyCertBundle) GetCertChainPem() []byte {
b.mutex.RLock()
defer b.mutex.RUnlock()
return copyBytes(b.certChainBytes)
}
// GetRootCertPem returns the root certificate PEM.
func (b *KeyCertBundle) GetRootCertPem() []byte {
b.mutex.RLock()
defer b.mutex.RUnlock()
return copyBytes(b.rootCertBytes)
}
// VerifyAndSetAll verifies the key/certs, and sets all key/certs in KeyCertBundle together.
// Setting all values together avoids inconsistency.
func (b *KeyCertBundle) VerifyAndSetAll(certBytes, privKeyBytes, certChainBytes, rootCertBytes []byte) error {
if err := Verify(certBytes, privKeyBytes, certChainBytes, rootCertBytes); err != nil {
return err
}
b.setAllFromPem(certBytes, privKeyBytes, certChainBytes, rootCertBytes)
return nil
}
// Setting all values together avoids inconsistency.
func (b *KeyCertBundle) setAllFromPem(certBytes, privKeyBytes, certChainBytes, rootCertBytes []byte) {
b.mutex.Lock()
b.certBytes = copyBytes(certBytes)
b.privKeyBytes = copyBytes(privKeyBytes)
b.certChainBytes = copyBytes(certChainBytes)
b.rootCertBytes = copyBytes(rootCertBytes)
// cert and privKey are always reset to point to new addresses. This avoids modifying the pointed structs that
// could be still used outside of the class.
b.cert, _ = ParsePemEncodedCertificate(certBytes)
privKey, _ := ParsePemEncodedKey(privKeyBytes)
b.privKey = &privKey
b.mutex.Unlock()
}
// CertOptions returns the certificate config based on currently stored cert.
func (b *KeyCertBundle) CertOptions() (*CertOptions, error) {
b.mutex.RLock()
defer b.mutex.RUnlock()
ids, err := ExtractIDs(b.cert.Extensions)
if err != nil {
return nil, fmt.Errorf("failed to extract id %v", err)
}
if len(ids) != 1 {
return nil, fmt.Errorf("expect single id from the cert, found %v", ids)
}
opts := &CertOptions{
Host: ids[0],
Org: b.cert.Issuer.Organization[0],
IsCA: b.cert.IsCA,
TTL: b.cert.NotAfter.Sub(b.cert.NotBefore),
IsDualUse: ids[0] == b.cert.Subject.CommonName,
}
switch (*b.privKey).(type) {
case *rsa.PrivateKey:
size, err := GetRSAKeySize(*b.privKey)
if err != nil {
return nil, fmt.Errorf("failed to get RSA key size: %v", err)
}
opts.RSAKeySize = size
case *ecdsa.PrivateKey:
opts.ECSigAlg = EcdsaSigAlg
default:
return nil, errors.New("unknown private key type")
}
return opts, nil
}
// UpdateVerifiedKeyCertBundleFromFile Verifies and updates KeyCertBundle with new certs
func (b *KeyCertBundle) UpdateVerifiedKeyCertBundleFromFile(certFile string, privKeyFile string, certChainFiles []string, rootCertFile string) error {
certBytes, err := os.ReadFile(certFile)
if err != nil {
return err
}
privKeyBytes, err := os.ReadFile(privKeyFile)
if err != nil {
return err
}
certChainBytes := []byte{}
if len(certChainFiles) != 0 {
for _, f := range certChainFiles {
var b []byte
if b, err = os.ReadFile(f); err != nil {
return err
}
certChainBytes = append(certChainBytes, b...)
}
}
rootCertBytes, err := os.ReadFile(rootCertFile)
if err != nil {
return err
}
err = b.VerifyAndSetAll(certBytes, privKeyBytes, certChainBytes, rootCertBytes)
if err != nil {
return err
}
return nil
}
// ExtractRootCertExpiryTimestamp returns the unix timestamp when the root becomes expires.
func (b *KeyCertBundle) ExtractRootCertExpiryTimestamp() (float64, error) {
return extractCertExpiryTimestamp("root cert", b.GetRootCertPem())
}
// ExtractCACertExpiryTimestamp returns the unix timestamp when the cert chain becomes expires.
func (b *KeyCertBundle) ExtractCACertExpiryTimestamp() (float64, error) {
return extractCertExpiryTimestamp("CA cert", b.GetCertChainPem())
}
// TimeBeforeCertExpires returns the time duration before the cert gets expired.
// It returns an error if it failed to extract the cert expiration timestamp.
// The returned time duration could be a negative value indicating the cert has already been expired.
func TimeBeforeCertExpires(certBytes []byte, now time.Time) (time.Duration, error) {
if len(certBytes) == 0 {
return 0, fmt.Errorf("no certificate found")
}
certExpiryTimestamp, err := extractCertExpiryTimestamp("cert", certBytes)
if err != nil {
return 0, fmt.Errorf("failed to extract cert expiration timestamp: %v", err)
}
certExpiry := time.Duration(certExpiryTimestamp-float64(now.Unix())) * time.Second
return certExpiry, nil
}
// Verify that the cert chain, root cert and key/cert match.
func Verify(certBytes, privKeyBytes, certChainBytes, rootCertBytes []byte) error {
// Verify the cert can be verified from the root cert through the cert chain.
rcp := x509.NewCertPool()
rcp.AppendCertsFromPEM(rootCertBytes)
icp := x509.NewCertPool()
icp.AppendCertsFromPEM(certChainBytes)
opts := x509.VerifyOptions{
Intermediates: icp,
Roots: rcp,
}
cert, err := ParsePemEncodedCertificate(certBytes)
if err != nil {
return fmt.Errorf("failed to parse cert PEM: %v", err)
}
chains, err := cert.Verify(opts)
if len(chains) == 0 || err != nil {
return fmt.Errorf(
"cannot verify the cert with the provided root chain and cert "+
"pool with error: %v", err)
}
// Verify that the key can be correctly parsed.
if _, err = ParsePemEncodedKey(privKeyBytes); err != nil {
return fmt.Errorf("failed to parse private key PEM: %v", err)
}
// Verify the cert and key match.
if _, err := tls.X509KeyPair(certBytes, privKeyBytes); err != nil {
return fmt.Errorf("the cert does not match the key: %v", err)
}
return nil
}
func extractCertExpiryTimestamp(certType string, certPem []byte) (float64, error) {
cert, err := ParsePemEncodedCertificate(certPem)
if err != nil {
return -1, fmt.Errorf("failed to parse the %s: %v", certType, err)
}
end := cert.NotAfter
expiryTimestamp := float64(end.Unix())
if end.Before(time.Now()) {
return expiryTimestamp, fmt.Errorf("expired %s found, x509.NotAfter %v, please transit your %s", certType, end, certType)
}
return expiryTimestamp, nil
}
func copyBytes(src []byte) []byte {
bs := make([]byte, len(src))
copy(bs, src)
return bs
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"crypto/x509/pkix"
"encoding/asn1"
"fmt"
"net/netip"
"strings"
"istio.io/istio/pkg/spiffe"
)
// IdentityType represents type of an identity. This is used to properly encode
// an identity into a SAN extension.
type IdentityType int
const (
// TypeDNS represents a DNS name.
TypeDNS IdentityType = iota
// TypeIP represents an IP address.
TypeIP
// TypeURI represents a universal resource identifier.
TypeURI
)
var (
// Mapping from the type of an identity to the OID tag value for the X.509
// SAN field (see https://tools.ietf.org/html/rfc5280#appendix-A.2)
//
// SubjectAltName ::= GeneralNames
//
// GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName
//
// GeneralName ::= CHOICE {
// dNSName [2] IA5String,
// uniformResourceIdentifier [6] IA5String,
// iPAddress [7] OCTET STRING,
// }
oidTagMap = map[IdentityType]int{
TypeDNS: 2,
TypeURI: 6,
TypeIP: 7,
}
// A reversed map that maps from an OID tag to the corresponding identity
// type.
identityTypeMap = generateReversedMap(oidTagMap)
// The OID for the SAN extension (See
// http://www.alvestrand.no/objectid/2.5.29.17.html).
oidSubjectAlternativeName = asn1.ObjectIdentifier{2, 5, 29, 17}
)
// Identity is an object holding both the encoded identifier bytes as well as
// the type of the identity.
type Identity struct {
Type IdentityType
Value []byte
}
// BuildSubjectAltNameExtension builds the SAN extension for the certificate.
func BuildSubjectAltNameExtension(hosts string) (*pkix.Extension, error) {
ids := []Identity{}
for _, host := range strings.Split(hosts, ",") {
if ipa, _ := netip.ParseAddr(host); ipa.IsValid() {
// Use the 4-byte representation of the IP address when possible.
ip := ipa.AsSlice()
if ipa.Is4In6() {
eip := ipa.As4()
ip = eip[:]
}
ids = append(ids, Identity{Type: TypeIP, Value: ip})
} else if strings.HasPrefix(host, spiffe.URIPrefix) {
ids = append(ids, Identity{Type: TypeURI, Value: []byte(host)})
} else {
ids = append(ids, Identity{Type: TypeDNS, Value: []byte(host)})
}
}
san, err := BuildSANExtension(ids)
if err != nil {
return nil, fmt.Errorf("SAN extension building failure (%v)", err)
}
return san, nil
}
// BuildSANExtension builds a `pkix.Extension` of type "Subject
// Alternative Name" based on the given identities.
func BuildSANExtension(identites []Identity) (*pkix.Extension, error) {
rawValues := []asn1.RawValue{}
for _, i := range identites {
tag, ok := oidTagMap[i.Type]
if !ok {
return nil, fmt.Errorf("unsupported identity type: %v", i.Type)
}
rawValues = append(rawValues, asn1.RawValue{
Bytes: i.Value,
Class: asn1.ClassContextSpecific,
Tag: tag,
})
}
bs, err := asn1.Marshal(rawValues)
if err != nil {
return nil, fmt.Errorf("failed to marshal the raw values for SAN field (err: %s)", err)
}
// SAN is Critical because the subject is empty. This is compliant with X.509 and SPIFFE standards.
return &pkix.Extension{Id: oidSubjectAlternativeName, Critical: true, Value: bs}, nil
}
// ExtractIDsFromSAN takes a SAN extension and extracts the identities.
// The logic is mostly borrowed from
// https://github.com/golang/go/blob/master/src/crypto/x509/x509.go, with the
// addition of supporting extracting URIs.
func ExtractIDsFromSAN(sanExt *pkix.Extension) ([]Identity, error) {
if !sanExt.Id.Equal(oidSubjectAlternativeName) {
return nil, fmt.Errorf("the input is not a SAN extension")
}
var sequence asn1.RawValue
if rest, err := asn1.Unmarshal(sanExt.Value, &sequence); err != nil {
return nil, err
} else if len(rest) != 0 {
return nil, fmt.Errorf("the SAN extension is incorrectly encoded")
}
// Check the rawValue is a sequence.
if !sequence.IsCompound || sequence.Tag != asn1.TagSequence || sequence.Class != asn1.ClassUniversal {
return nil, fmt.Errorf("the SAN extension is incorrectly encoded")
}
ids := []Identity{}
for bytes := sequence.Bytes; len(bytes) > 0; {
var rawValue asn1.RawValue
var err error
bytes, err = asn1.Unmarshal(bytes, &rawValue)
if err != nil {
return nil, err
}
ids = append(ids, Identity{Type: identityTypeMap[rawValue.Tag], Value: rawValue.Bytes})
}
return ids, nil
}
// ExtractSANExtension extracts the "Subject Alternative Name" externsion from
// the given PKIX extension set.
func ExtractSANExtension(exts []pkix.Extension) *pkix.Extension {
for _, ext := range exts {
if ext.Id.Equal(oidSubjectAlternativeName) {
// We don't need to examine other extensions anymore since a certificate
// must not include more than one instance of a particular extension. See
// https://tools.ietf.org/html/rfc5280#section-4.2.
return &ext
}
}
return nil
}
// ExtractIDs first finds the SAN extension from the given extension set, then
// extract identities from the SAN extension.
func ExtractIDs(exts []pkix.Extension) ([]string, error) {
sanExt := ExtractSANExtension(exts)
if sanExt == nil {
return nil, fmt.Errorf("the SAN extension does not exist")
}
idsWithType, err := ExtractIDsFromSAN(sanExt)
if err != nil {
return nil, fmt.Errorf("failed to extract identities from SAN extension (error %v)", err)
}
ids := []string{}
for _, id := range idsWithType {
ids = append(ids, string(id.Value))
}
return ids, nil
}
func generateReversedMap(m map[IdentityType]int) map[int]IdentityType {
reversed := make(map[int]IdentityType)
for key, value := range m {
reversed[value] = key
}
return reversed
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"crypto/ecdsa"
"crypto/rsa"
"crypto/x509"
"fmt"
"os"
"reflect"
"sort"
"strings"
"time"
)
// VerifyFields contains the certificate fields to verify in the test.
type VerifyFields struct {
NotBefore time.Time
TTL time.Duration // NotAfter - NotBefore
ExtKeyUsage []x509.ExtKeyUsage
KeyUsage x509.KeyUsage
IsCA bool
Org string
CommonName string
Host string
}
// VerifyCertificate verifies a given PEM encoded certificate by
// - building one or more chains from the certificate to a root certificate;
// - checking fields are set as expected.
func VerifyCertificate(privPem []byte, certChainPem []byte, rootCertPem []byte, expectedFields *VerifyFields) error {
roots := x509.NewCertPool()
if rootCertPem != nil {
if ok := roots.AppendCertsFromPEM(rootCertPem); !ok {
return fmt.Errorf("failed to parse root certificate")
}
}
intermediates := x509.NewCertPool()
if ok := intermediates.AppendCertsFromPEM(certChainPem); !ok {
return fmt.Errorf("failed to parse certificate chain")
}
cert, err := ParsePemEncodedCertificate(certChainPem)
if err != nil {
return err
}
opts := x509.VerifyOptions{
Intermediates: intermediates,
Roots: roots,
}
host := ""
if expectedFields != nil {
host = expectedFields.Host
san := host
// uri scheme is currently not supported in go VerifyOptions. We verify
// this uri at the end as a special case.
if strings.HasPrefix(host, "spiffe") {
san = ""
}
opts.DNSName = san
}
opts.KeyUsages = append(opts.KeyUsages, x509.ExtKeyUsageAny)
if _, err = cert.Verify(opts); err != nil {
return fmt.Errorf("failed to verify certificate: " + err.Error())
}
if privPem != nil {
priv, err := ParsePemEncodedKey(privPem)
if err != nil {
return err
}
privRSAKey, privRSAOk := priv.(*rsa.PrivateKey)
pubRSAKey, pubRSAOk := cert.PublicKey.(*rsa.PublicKey)
privECKey, privECOk := priv.(*ecdsa.PrivateKey)
pubECKey, pubECOk := cert.PublicKey.(*ecdsa.PublicKey)
rsaMatch := privRSAOk && pubRSAOk
ecMatch := privECOk && pubECOk
if rsaMatch {
if !reflect.DeepEqual(privRSAKey.PublicKey, *pubRSAKey) {
return fmt.Errorf("the generated private RSA key and cert doesn't match")
}
} else if ecMatch {
if !reflect.DeepEqual(privECKey.PublicKey, *pubECKey) {
return fmt.Errorf("the generated private EC key and cert doesn't match")
}
} else {
return fmt.Errorf("algorithms for private key and cert do not match")
}
}
if strings.HasPrefix(host, "spiffe") {
matchHost := false
ids, err := ExtractIDs(cert.Extensions)
if err != nil {
return err
}
for _, id := range ids {
if strings.HasSuffix(id, host) {
matchHost = true
break
}
}
if !matchHost {
return fmt.Errorf("the certificate doesn't have the expected SAN for: %s", host)
}
}
if expectedFields != nil {
if nb := expectedFields.NotBefore; !nb.IsZero() && !nb.Equal(cert.NotBefore) {
return fmt.Errorf("unexpected value for 'NotBefore' field: want %v but got %v", nb, cert.NotBefore)
}
if ttl := expectedFields.TTL; ttl != 0 && ttl != (cert.NotAfter.Sub(cert.NotBefore)) {
return fmt.Errorf("unexpected value for 'NotAfter' - 'NotBefore': want %v but got %v", ttl, cert.NotAfter.Sub(cert.NotBefore))
}
if eku := sortExtKeyUsage(expectedFields.ExtKeyUsage); !reflect.DeepEqual(eku, sortExtKeyUsage(cert.ExtKeyUsage)) {
return fmt.Errorf("unexpected value for 'ExtKeyUsage' field: want %v but got %v", eku, cert.ExtKeyUsage)
}
if ku := expectedFields.KeyUsage; ku != cert.KeyUsage {
return fmt.Errorf("unexpected value for 'KeyUsage' field: want %v but got %v", ku, cert.KeyUsage)
}
if isCA := expectedFields.IsCA; isCA != cert.IsCA {
return fmt.Errorf("unexpected value for 'IsCA' field: want %t but got %t", isCA, cert.IsCA)
}
if org := expectedFields.Org; org != "" && !reflect.DeepEqual([]string{org}, cert.Issuer.Organization) {
return fmt.Errorf("unexpected value for 'Organization' field: want %v but got %v",
[]string{org}, cert.Issuer.Organization)
}
if cn := expectedFields.CommonName; cn != cert.Subject.CommonName {
return fmt.Errorf("unexpected value for 'CommonName' field: want %v but got %v",
cn, cert.Subject.CommonName)
}
}
return nil
}
func sortExtKeyUsage(extKeyUsage []x509.ExtKeyUsage) []int {
data := make([]int, len(extKeyUsage))
for i := range extKeyUsage {
data[i] = int(extKeyUsage[i])
}
sort.Ints(data)
return data
}
// FindRootCertFromCertificateChainBytes find the root cert from cert chain
func FindRootCertFromCertificateChainBytes(certBytes []byte) ([]byte, error) {
certChain, cert, err := ParsePemEncodedCertificateChain(certBytes)
if err != nil {
return nil, fmt.Errorf("error parsing root certificate: %s", err.Error())
}
rootCert := certChain[len(certChain)-1]
if !rootCert.IsCA {
return nil, fmt.Errorf("found root cert is not a ca type cert: %v", rootCert)
}
return cert, nil
}
// IsCertExpired returns whether a cert expires
func IsCertExpired(filepath string) (bool, error) {
var err error
var certPEMBlock []byte
certPEMBlock, err = os.ReadFile(filepath)
if err != nil {
return true, fmt.Errorf("failed to read the cert, error is %v", err)
}
x509Cert, err := ParsePemEncodedCertificate(certPEMBlock)
if err != nil {
return true, fmt.Errorf("failed to parse the cert, err is %v", err)
}
return x509Cert.NotAfter.Before(time.Now()), nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authenticate
import (
"context"
"fmt"
"net/http"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
"istio.io/istio/pkg/security"
"istio.io/istio/security/pkg/pki/util"
)
const (
ClientCertAuthenticatorType = "ClientCertAuthenticator"
)
// ClientCertAuthenticator extracts identities from client certificate.
type ClientCertAuthenticator struct{}
var _ security.Authenticator = &ClientCertAuthenticator{}
func (cca *ClientCertAuthenticator) AuthenticatorType() string {
return ClientCertAuthenticatorType
}
// Authenticate extracts identities from presented client certificates. This
// method assumes that certificate chain has been properly validated before
// this method is called. In other words, this method does not do certificate
// chain validation itself.
func (cca *ClientCertAuthenticator) Authenticate(authCtx security.AuthContext) (*security.Caller, error) {
if authCtx.GrpcContext != nil {
return cca.authenticateGrpc(authCtx.GrpcContext)
}
if authCtx.Request != nil {
return cca.authenticateHTTP(authCtx.Request)
}
return nil, nil
}
func (cca *ClientCertAuthenticator) authenticateGrpc(ctx context.Context) (*security.Caller, error) {
peer, ok := peer.FromContext(ctx)
if !ok || peer.AuthInfo == nil {
return nil, fmt.Errorf("no client certificate is presented")
}
if authType := peer.AuthInfo.AuthType(); authType != "tls" {
return nil, fmt.Errorf("unsupported auth type: %q", authType)
}
tlsInfo := peer.AuthInfo.(credentials.TLSInfo)
chains := tlsInfo.State.VerifiedChains
if len(chains) == 0 || len(chains[0]) == 0 {
return nil, fmt.Errorf("no verified chain is found")
}
ids, err := util.ExtractIDs(chains[0][0].Extensions)
if err != nil {
return nil, err
}
return &security.Caller{
AuthSource: security.AuthSourceClientCertificate,
Identities: ids,
}, nil
}
// authenticateHTTP performs mTLS authentication for http requests. Requires having the endpoints on a listener
// with proper TLS configuration.
func (cca *ClientCertAuthenticator) authenticateHTTP(req *http.Request) (*security.Caller, error) {
if req.TLS == nil || req.TLS.VerifiedChains == nil {
return nil, fmt.Errorf("no client certificate is presented")
}
chains := req.TLS.VerifiedChains
if len(chains) == 0 || len(chains[0]) == 0 {
return nil, fmt.Errorf("no verified chain is found")
}
ids, err := util.ExtractIDs(chains[0][0].Extensions)
if err != nil {
return nil, err
}
return &security.Caller{
AuthSource: security.AuthSourceClientCertificate,
Identities: ids,
}, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubeauth
import (
"context"
"fmt"
"net/http"
"strings"
"google.golang.org/grpc/metadata"
"k8s.io/client-go/kubernetes"
"istio.io/istio/pkg/cluster"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/jwt"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/spiffe"
"istio.io/istio/security/pkg/k8s/tokenreview"
"istio.io/istio/security/pkg/util"
)
const (
KubeJWTAuthenticatorType = "KubeJWTAuthenticator"
clusterIDMeta = "clusterid"
)
type RemoteKubeClientGetter func(clusterID cluster.ID) kubernetes.Interface
// KubeJWTAuthenticator authenticates K8s JWTs.
type KubeJWTAuthenticator struct {
// holder of a mesh configuration for dynamically updating trust domain
meshHolder mesh.Holder
jwtPolicy string
// Primary cluster kube client
kubeClient kubernetes.Interface
// Primary cluster ID
clusterID cluster.ID
// remote cluster kubeClient getter
remoteKubeClientGetter RemoteKubeClientGetter
}
var _ security.Authenticator = &KubeJWTAuthenticator{}
// NewKubeJWTAuthenticator creates a new kubeJWTAuthenticator.
func NewKubeJWTAuthenticator(meshHolder mesh.Holder, client kubernetes.Interface, clusterID cluster.ID,
remoteKubeClientGetter RemoteKubeClientGetter, jwtPolicy string,
) *KubeJWTAuthenticator {
return &KubeJWTAuthenticator{
meshHolder: meshHolder,
jwtPolicy: jwtPolicy,
kubeClient: client,
clusterID: clusterID,
remoteKubeClientGetter: remoteKubeClientGetter,
}
}
func (a *KubeJWTAuthenticator) AuthenticatorType() string {
return KubeJWTAuthenticatorType
}
func isAllowedKubernetesAudience(a string) bool {
// We do not use url.Parse() as it *requires* the protocol.
a = strings.TrimPrefix(a, "https://")
a = strings.TrimPrefix(a, "http://")
return strings.HasPrefix(a, "kubernetes.default.svc")
}
// Authenticate authenticates the call using the K8s JWT from the context.
// The returned Caller.Identities is in SPIFFE format.
func (a *KubeJWTAuthenticator) Authenticate(authRequest security.AuthContext) (*security.Caller, error) {
if authRequest.GrpcContext != nil {
return a.authenticateGrpc(authRequest.GrpcContext)
}
if authRequest.Request != nil {
return a.authenticateHTTP(authRequest.Request)
}
return nil, nil
}
func (a *KubeJWTAuthenticator) authenticateHTTP(req *http.Request) (*security.Caller, error) {
targetJWT, err := security.ExtractRequestToken(req)
if err != nil {
return nil, fmt.Errorf("target JWT extraction error: %v", err)
}
clusterID := cluster.ID(req.Header.Get(clusterIDMeta))
return a.authenticate(targetJWT, clusterID)
}
func (a *KubeJWTAuthenticator) authenticateGrpc(ctx context.Context) (*security.Caller, error) {
targetJWT, err := security.ExtractBearerToken(ctx)
if err != nil {
return nil, fmt.Errorf("target JWT extraction error: %v", err)
}
clusterID := extractClusterID(ctx)
return a.authenticate(targetJWT, clusterID)
}
func (a *KubeJWTAuthenticator) authenticate(targetJWT string, clusterID cluster.ID) (*security.Caller, error) {
kubeClient := a.getKubeClient(clusterID)
if kubeClient == nil {
return nil, fmt.Errorf("could not get cluster %s's kube client", clusterID)
}
var aud []string
// If the token has audience - we will validate it by setting it in the audiences field,
// This happens regardless of Require3PToken setting.
//
// If 'Require3PToken' is set - we will also set the audiences field, forcing the check.
// If Require3P is not set - and token does not have audience - we will
// tolerate the unbound tokens.
if !util.IsK8SUnbound(targetJWT) || security.Require3PToken.Get() {
aud = security.TokenAudiences
if tokenAud, _ := util.ExtractJwtAud(targetJWT); len(tokenAud) == 1 && isAllowedKubernetesAudience(tokenAud[0]) {
if a.jwtPolicy == jwt.PolicyFirstParty && !security.Require3PToken.Get() {
// For backwards compatibility, if first-party-jwt is used and they don't require 3p, allow it but warn
// This is intended to support first-party-jwt on Kubernetes 1.21+, where BoundServiceAccountTokenVolume
// became default and started setting an audience to one of defaultAllowedKubernetesAudiences.
// Users should disable first-party-jwt, but we don't want to break them on upgrade
log.Warnf("Insecure first-party-jwt option used to validate token; use third-party-jwt")
aud = nil
} else {
log.Warnf("Received token with aud %q, but expected 'kubernetes.default.svc'. BoundServiceAccountTokenVolume, "+
"default in Kubernetes 1.21+, is not compatible with first-party-jwt", aud)
}
}
// TODO: check the audience from token, no need to call
// apiserver if audience is not matching. This may also
// handle older apiservers that don't check audience.
} else {
// No audience will be passed to the check if the token
// is unbound and the setting to require bound tokens is off
aud = nil
}
id, err := tokenreview.ValidateK8sJwt(kubeClient, targetJWT, aud)
if err != nil {
return nil, fmt.Errorf("failed to validate the JWT from cluster %q: %v", clusterID, err)
}
if id.PodServiceAccount == "" {
return nil, fmt.Errorf("failed to parse the JWT; service account required")
}
if id.PodNamespace == "" {
return nil, fmt.Errorf("failed to parse the JWT; namespace required")
}
return &security.Caller{
AuthSource: security.AuthSourceIDToken,
Identities: []string{spiffe.MustGenSpiffeURI(id.PodNamespace, id.PodServiceAccount)},
KubernetesInfo: id,
}, nil
}
func (a *KubeJWTAuthenticator) getKubeClient(clusterID cluster.ID) kubernetes.Interface {
// first match local/primary cluster
// or if clusterID is not sent (we assume that its a single cluster)
if a.clusterID == clusterID || clusterID == "" {
return a.kubeClient
}
// secondly try other remote clusters
if a.remoteKubeClientGetter != nil {
if res := a.remoteKubeClientGetter(clusterID); res != nil {
return res
}
}
// we did not find the kube client for this cluster.
// return nil so that logs will show that this cluster is not available in istiod
return nil
}
func extractClusterID(ctx context.Context) cluster.ID {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
return ""
}
clusterIDHeader, exists := md[clusterIDMeta]
if !exists {
return ""
}
if len(clusterIDHeader) == 1 {
return cluster.ID(clusterIDHeader[0])
}
return ""
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authenticate
import (
"context"
"fmt"
"strings"
oidc "github.com/coreos/go-oidc/v3/oidc"
"istio.io/api/security/v1beta1"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/spiffe"
)
const (
IDTokenAuthenticatorType = "IDTokenAuthenticator"
)
type JwtAuthenticator struct {
audiences []string
verifier *oidc.IDTokenVerifier
}
var _ security.Authenticator = &JwtAuthenticator{}
// newJwtAuthenticator is used when running istiod outside of a cluster, to validate the tokens using OIDC
// K8S is created with --service-account-issuer, service-account-signing-key-file and service-account-api-audiences
// which enable OIDC.
func NewJwtAuthenticator(jwtRule *v1beta1.JWTRule) (*JwtAuthenticator, error) {
issuer := jwtRule.GetIssuer()
jwksURL := jwtRule.GetJwksUri()
// The key of a JWT issuer may change, so the key may need to be updated.
// Based on https://pkg.go.dev/github.com/coreos/go-oidc/v3/oidc#NewRemoteKeySet
// the oidc library handles caching and cache invalidation. Thus, the verifier
// is only created once in the constructor.
var verifier *oidc.IDTokenVerifier
if len(jwksURL) == 0 {
// OIDC discovery is used if jwksURL is not set.
provider, err := oidc.NewProvider(context.Background(), issuer)
// OIDC discovery may fail, e.g. http request for the OIDC server may fail.
if err != nil {
return nil, fmt.Errorf("failed at creating an OIDC provider for %v: %v", issuer, err)
}
verifier = provider.Verifier(&oidc.Config{SkipClientIDCheck: true})
} else {
keySet := oidc.NewRemoteKeySet(context.Background(), jwksURL)
verifier = oidc.NewVerifier(issuer, keySet, &oidc.Config{SkipClientIDCheck: true})
}
return &JwtAuthenticator{
verifier: verifier,
audiences: jwtRule.Audiences,
}, nil
}
// Authenticate - based on the old OIDC authenticator for mesh expansion.
func (j *JwtAuthenticator) Authenticate(authRequest security.AuthContext) (*security.Caller, error) {
if authRequest.GrpcContext != nil {
bearerToken, err := security.ExtractBearerToken(authRequest.GrpcContext)
if err != nil {
return nil, fmt.Errorf("ID token extraction error: %v", err)
}
return j.authenticate(authRequest.GrpcContext, bearerToken)
}
if authRequest.Request != nil {
bearerToken, err := security.ExtractRequestToken(authRequest.Request)
if err != nil {
return nil, fmt.Errorf("target JWT extraction error: %v", err)
}
return j.authenticate(authRequest.Request.Context(), bearerToken)
}
return nil, nil
}
func (j *JwtAuthenticator) authenticate(ctx context.Context, bearerToken string) (*security.Caller, error) {
idToken, err := j.verifier.Verify(ctx, bearerToken)
if err != nil {
return nil, fmt.Errorf("failed to verify the JWT token (error %v)", err)
}
sa := JwtPayload{}
// "aud" for trust domain, "sub" has "system:serviceaccount:$namespace:$serviceaccount".
// in future trust domain may use another field as a standard is defined.
if err := idToken.Claims(&sa); err != nil {
return nil, fmt.Errorf("failed to extract claims from ID token: %v", err)
}
if !strings.HasPrefix(sa.Sub, "system:serviceaccount") {
return nil, fmt.Errorf("invalid sub %v", sa.Sub)
}
parts := strings.Split(sa.Sub, ":")
ns := parts[2]
ksa := parts[3]
if !checkAudience(sa.Aud, j.audiences) {
return nil, fmt.Errorf("invalid audiences %v", sa.Aud)
}
return &security.Caller{
AuthSource: security.AuthSourceIDToken,
Identities: []string{spiffe.MustGenSpiffeURI(ns, ksa)},
}, nil
}
// checkAudience() returns true if the audiences to check are in
// the expected audiences. Otherwise, return false.
func checkAudience(audToCheck []string, audExpected []string) bool {
for _, a := range audToCheck {
for _, b := range audExpected {
if a == b {
return true
}
}
}
return false
}
type JwtPayload struct {
// Aud is the expected audience, defaults to istio-ca - but is based on istiod.yaml configuration.
// If set to a different value - use the value defined by istiod.yaml. Env variable can
// still override
Aud []string `json:"aud"`
// Exp is not currently used - we don't use the token for authn, just to determine k8s settings
Exp int `json:"exp"`
// Issuer - configured by K8S admin for projected tokens. Will be used to verify all tokens.
Iss string `json:"iss"`
Sub string `json:"sub"`
}
func (j JwtAuthenticator) AuthenticatorType() string {
return IDTokenAuthenticatorType
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package authenticate
import (
"fmt"
"net"
"net/netip"
"strings"
"github.com/alecholmes/xfccparser"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
)
const (
XfccAuthenticatorType = "XfccAuthenticator"
)
// XfccAuthenticator extracts identities from Xfcc header.
type XfccAuthenticator struct{}
var _ security.Authenticator = &XfccAuthenticator{}
func (xff XfccAuthenticator) AuthenticatorType() string {
return XfccAuthenticatorType
}
// Authenticate extracts identities from Xfcc Header.
func (xff XfccAuthenticator) Authenticate(ctx security.AuthContext) (*security.Caller, error) {
remoteAddr := ctx.RemoteAddress()
xfccHeader := ctx.Header(xfccparser.ForwardedClientCertHeader)
if len(remoteAddr) == 0 || len(xfccHeader) == 0 {
return nil, fmt.Errorf("caller from %s does not have Xfcc header", remoteAddr)
}
// First check if client is trusted client so that we can "trust" the Xfcc Header.
if !isTrustedAddress(remoteAddr, features.TrustedGatewayCIDR) {
return nil, fmt.Errorf("caller from %s is not in the trusted network. XfccAuthenticator can not be used", remoteAddr)
}
return buildSecurityCaller(xfccHeader[0])
}
func buildSecurityCaller(xfccHeader string) (*security.Caller, error) {
clientCerts, err := xfccparser.ParseXFCCHeader(xfccHeader)
if err != nil {
message := fmt.Sprintf("error in parsing xfcc header: %v", err)
return nil, fmt.Errorf(message)
}
if len(clientCerts) == 0 {
message := "xfcc header does not have atleast one client certs"
return nil, fmt.Errorf(message)
}
ids := []string{}
for _, cc := range clientCerts {
ids = append(ids, cc.URI)
ids = append(ids, cc.DNS...)
if cc.Subject != nil {
ids = append(ids, cc.Subject.CommonName)
}
}
return &security.Caller{
AuthSource: security.AuthSourceClientCertificate,
Identities: ids,
}, nil
}
func isTrustedAddress(addr string, trustedCidrs []string) bool {
ip, _, err := net.SplitHostPort(addr)
if err != nil {
log.Warnf("peer address %s can not be split in to proper host and port", addr)
return false
}
for _, cidr := range trustedCidrs {
if isInRange(ip, cidr) {
return true
}
}
// Always trust local host addresses.
return netip.MustParseAddr(ip).IsLoopback()
}
func isInRange(addr, cidr string) bool {
if strings.Contains(cidr, "/") {
ipp, err := netip.ParsePrefix(cidr)
if err != nil {
return false
}
return ipp.Contains(netip.MustParseAddr(addr))
}
return false
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ca
import (
"istio.io/istio/pkg/monitoring"
)
const (
errorlabel = "error"
)
var (
errorTag = monitoring.CreateLabel(errorlabel)
csrCounts = monitoring.NewSum(
"citadel_server_csr_count",
"The number of CSRs received by Citadel server.",
)
authnErrorCounts = monitoring.NewSum(
"citadel_server_authentication_failure_count",
"The number of authentication failures.",
)
csrParsingErrorCounts = monitoring.NewSum(
"citadel_server_csr_parsing_err_count",
"The number of errors occurred when parsing the CSR.",
)
idExtractionErrorCounts = monitoring.NewSum(
"citadel_server_id_extraction_err_count",
"The number of errors occurred when extracting the ID from CSR.",
)
certSignErrorCounts = monitoring.NewSum(
"citadel_server_csr_sign_err_count",
"The number of errors occurred when signing the CSR.",
)
successCounts = monitoring.NewSum(
"citadel_server_success_cert_issuance_count",
"The number of certificates issuances that have succeeded.",
)
rootCertExpiryTimestamp = monitoring.NewGauge(
"citadel_server_root_cert_expiry_timestamp",
"The unix timestamp, in seconds, when Citadel root cert will expire. "+
"A negative time indicates the cert is expired.",
)
certChainExpiryTimestamp = monitoring.NewGauge(
"citadel_server_cert_chain_expiry_timestamp",
"The unix timestamp, in seconds, when Citadel cert chain will expire. "+
"A negative time indicates the cert is expired.",
)
)
// monitoringMetrics are counters for certificate signing related operations.
type monitoringMetrics struct {
CSR monitoring.Metric
AuthnError monitoring.Metric
Success monitoring.Metric
CSRError monitoring.Metric
IDExtractionError monitoring.Metric
certSignErrors monitoring.Metric
}
// newMonitoringMetrics creates a new monitoringMetrics.
func newMonitoringMetrics() monitoringMetrics {
return monitoringMetrics{
CSR: csrCounts,
AuthnError: authnErrorCounts,
Success: successCounts,
CSRError: csrParsingErrorCounts,
IDExtractionError: idExtractionErrorCounts,
certSignErrors: certSignErrorCounts,
}
}
func (m *monitoringMetrics) GetCertSignError(err string) monitoring.Metric {
return m.certSignErrors.With(errorTag.Value(err))
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ca
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/kclient"
"istio.io/istio/pkg/security"
"istio.io/istio/pkg/spiffe"
)
// NodeAuthorizer is a component that implements a subset of Kubernetes Node Authorization
// (https://kubernetes.io/docs/reference/access-authn-authz/node/) for Istio CA. Specifically, it
// validates that a node proxy which requests certificates for workloads on its own node is requesting
// valid identities which run on that node (rather than arbitrary ones).
type NodeAuthorizer struct {
trustedNodeAccounts map[types.NamespacedName]struct{}
pods kclient.Client[*v1.Pod]
nodeIndex *kclient.Index[SaNode, *v1.Pod]
}
func NewNodeAuthorizer(client kube.Client, filter func(t any) bool, trustedNodeAccounts map[types.NamespacedName]struct{}) (*NodeAuthorizer, error) {
pods := kclient.NewFiltered[*v1.Pod](client, kclient.Filter{
ObjectFilter: filter,
ObjectTransform: kube.StripPodUnusedFields,
})
// Add an Index on the pods, storing the service account and node. This allows us to later efficiently query.
index := kclient.CreateIndex[SaNode, *v1.Pod](pods, func(pod *v1.Pod) []SaNode {
if len(pod.Spec.NodeName) == 0 {
return nil
}
if len(pod.Spec.ServiceAccountName) == 0 {
return nil
}
return []SaNode{{
ServiceAccount: types.NamespacedName{
Namespace: pod.Namespace,
Name: pod.Spec.ServiceAccountName,
},
Node: pod.Spec.NodeName,
}}
})
return &NodeAuthorizer{
pods: pods,
nodeIndex: index,
trustedNodeAccounts: trustedNodeAccounts,
}, nil
}
func (na *NodeAuthorizer) authenticateImpersonation(caller security.KubernetesInfo, requestedIdentityString string) error {
callerSa := types.NamespacedName{
Namespace: caller.PodNamespace,
Name: caller.PodServiceAccount,
}
// First, make sure the caller is allowed to impersonate, in general
if _, f := na.trustedNodeAccounts[callerSa]; !f {
return fmt.Errorf("caller (%v) is not allowed to impersonate", caller)
}
// Next, make sure the identity they want to impersonate is valid, in general
requestedIdentity, err := spiffe.ParseIdentity(requestedIdentityString)
if err != nil {
return fmt.Errorf("failed to validate impersonated identity %v", requestedIdentityString)
}
// Finally, we validate the requested identity is running on the same node the caller is on
callerPod := na.pods.Get(caller.PodName, caller.PodNamespace)
if callerPod == nil {
return fmt.Errorf("pod %v/%v not found", caller.PodNamespace, caller.PodName)
}
// Make sure UID is still valid for our current state
if callerPod.UID != types.UID(caller.PodUID) {
// This would only happen if a pod is re-created with the same name, and the CSR client is not in sync on which is current;
// this is fine and should be eventually consistent. Client is expected to retry in this case.
return fmt.Errorf("pod found, but UID does not match: %v vs %v", callerPod.UID, caller.PodUID)
}
if callerPod.Spec.ServiceAccountName != caller.PodServiceAccount {
// This should never happen, but just in case add an additional check
return fmt.Errorf("pod found, but ServiceAccount does not match: %v vs %v", callerPod.Spec.ServiceAccountName, caller.PodServiceAccount)
}
// We want to find out if there is any pod running with the requested identity on the callers node.
// The indexer (previously setup) creates a lookup table for a {Node, SA} pair, which we can lookup
k := SaNode{
ServiceAccount: types.NamespacedName{Name: requestedIdentity.ServiceAccount, Namespace: requestedIdentity.Namespace},
Node: callerPod.Spec.NodeName,
}
// TODO: this is currently single cluster; we will need to take the cluster of the proxy into account
// to support multi-cluster properly.
res := na.nodeIndex.Lookup(k)
// We don't care what pods are part of the index, only that there is at least one. If there is one,
// it is appropriate for the caller to request this identity.
if len(res) == 0 {
return fmt.Errorf("no instances of %q found on node %q", k.ServiceAccount, k.Node)
}
serverCaLog.Debugf("Node caller %v impersonated %v", caller, requestedIdentityString)
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ca
import (
"context"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/apimachinery/pkg/types"
pb "istio.io/api/security/v1alpha1"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/namespace"
"istio.io/istio/pkg/log"
"istio.io/istio/pkg/security"
"istio.io/istio/security/pkg/pki/ca"
caerror "istio.io/istio/security/pkg/pki/error"
"istio.io/istio/security/pkg/pki/util"
)
var serverCaLog = log.RegisterScope("serverca", "Citadel server log")
// CertificateAuthority contains methods to be supported by a CA.
type CertificateAuthority interface {
// Sign generates a certificate for a workload or CA, from the given CSR and cert opts.
Sign(csrPEM []byte, opts ca.CertOpts) ([]byte, error)
// SignWithCertChain is similar to Sign but returns the leaf cert and the entire cert chain.
SignWithCertChain(csrPEM []byte, opts ca.CertOpts) ([]string, error)
// GetCAKeyCertBundle returns the KeyCertBundle used by CA.
GetCAKeyCertBundle() *util.KeyCertBundle
}
// Server implements IstioCAService and IstioCertificateService and provides the services on the
// specified port.
type Server struct {
pb.UnimplementedIstioCertificateServiceServer
monitoring monitoringMetrics
Authenticators []security.Authenticator
ca CertificateAuthority
serverCertTTL time.Duration
nodeAuthorizer *NodeAuthorizer
}
type SaNode struct {
ServiceAccount types.NamespacedName
Node string
}
func (s SaNode) String() string {
return s.Node + "/" + s.ServiceAccount.String()
}
// CreateCertificate handles an incoming certificate signing request (CSR). It does
// authentication and authorization. Upon validated, signs a certificate that:
// the SAN is the identity of the caller in authentication result.
// the subject public key is the public key in the CSR.
// the validity duration is the ValidityDuration in request, or default value if the given duration is invalid.
// it is signed by the CA signing key.
func (s *Server) CreateCertificate(ctx context.Context, request *pb.IstioCertificateRequest) (
*pb.IstioCertificateResponse, error,
) {
s.monitoring.CSR.Increment()
caller, err := security.Authenticate(ctx, s.Authenticators)
if caller == nil || err != nil {
s.monitoring.AuthnError.Increment()
return nil, status.Error(codes.Unauthenticated, "request authenticate failure")
}
serverCaLog := serverCaLog.WithLabels("client", security.GetConnectionAddress(ctx))
// By default, we will use the callers identity for the certificate
sans := caller.Identities
crMetadata := request.Metadata.GetFields()
impersonatedIdentity := crMetadata[security.ImpersonatedIdentity].GetStringValue()
if impersonatedIdentity != "" {
serverCaLog.Debugf("impersonated identity: %s", impersonatedIdentity)
// If there is an impersonated identity, we will override to use that identity (only single value
// supported), if the real caller is authorized.
if s.nodeAuthorizer == nil {
s.monitoring.AuthnError.Increment()
// Return an opaque error (for security purposes) but log the full reason
serverCaLog.Warnf("impersonation not allowed, as node authorizer is not configured")
return nil, status.Error(codes.Unauthenticated, "request impersonation authentication failure")
}
if err := s.nodeAuthorizer.authenticateImpersonation(caller.KubernetesInfo, impersonatedIdentity); err != nil {
s.monitoring.AuthnError.Increment()
// Return an opaque error (for security purposes) but log the full reason
serverCaLog.Warnf("impersonation failed: %v", err)
return nil, status.Error(codes.Unauthenticated, "request impersonation authentication failure")
}
// Node is authorized to impersonate; overwrite the SAN to the impersonated identity.
sans = []string{impersonatedIdentity}
}
serverCaLog.Debugf("generating a certificate, sans: %v, requested ttl: %s", sans, time.Duration(request.ValidityDuration*int64(time.Second)))
certSigner := crMetadata[security.CertSigner].GetStringValue()
_, _, certChainBytes, rootCertBytes := s.ca.GetCAKeyCertBundle().GetAll()
certOpts := ca.CertOpts{
SubjectIDs: sans,
TTL: time.Duration(request.ValidityDuration) * time.Second,
ForCA: false,
CertSigner: certSigner,
}
var signErr error
var cert []byte
var respCertChain []string
if certSigner == "" {
cert, signErr = s.ca.Sign([]byte(request.Csr), certOpts)
} else {
serverCaLog.Debugf("signing CSR with cert chain")
respCertChain, signErr = s.ca.SignWithCertChain([]byte(request.Csr), certOpts)
}
if signErr != nil {
serverCaLog.Errorf("CSR signing error: %v", signErr.Error())
s.monitoring.GetCertSignError(signErr.(*caerror.Error).ErrorType()).Increment()
return nil, status.Errorf(signErr.(*caerror.Error).HTTPErrorCode(), "CSR signing error (%v)", signErr.(*caerror.Error))
}
if certSigner == "" {
respCertChain = []string{string(cert)}
if len(certChainBytes) != 0 {
respCertChain = append(respCertChain, string(certChainBytes))
serverCaLog.Debugf("Append cert chain to response, %s", string(certChainBytes))
}
}
if len(rootCertBytes) != 0 {
respCertChain = append(respCertChain, string(rootCertBytes))
}
response := &pb.IstioCertificateResponse{
CertChain: respCertChain,
}
s.monitoring.Success.Increment()
serverCaLog.Debugf("CSR successfully signed, sans %v.", caller.Identities)
return response, nil
}
func recordCertsExpiry(keyCertBundle *util.KeyCertBundle) {
rootCertExpiry, err := keyCertBundle.ExtractRootCertExpiryTimestamp()
if err != nil {
serverCaLog.Errorf("failed to extract root cert expiry timestamp (error %v)", err)
}
rootCertExpiryTimestamp.Record(rootCertExpiry)
if len(keyCertBundle.GetCertChainPem()) == 0 {
return
}
certChainExpiry, err := keyCertBundle.ExtractCACertExpiryTimestamp()
if err != nil {
serverCaLog.Errorf("failed to extract CA cert expiry timestamp (error %v)", err)
}
certChainExpiryTimestamp.Record(certChainExpiry)
}
// Register registers a GRPC server on the specified port.
func (s *Server) Register(grpcServer *grpc.Server) {
pb.RegisterIstioCertificateServiceServer(grpcServer, s)
}
// New creates a new instance of `IstioCAServiceServer`
func New(
ca CertificateAuthority,
ttl time.Duration,
authenticators []security.Authenticator,
client kube.Client,
filter namespace.DiscoveryFilter,
) (*Server, error) {
certBundle := ca.GetCAKeyCertBundle()
if len(certBundle.GetRootCertPem()) != 0 {
recordCertsExpiry(certBundle)
}
server := &Server{
Authenticators: authenticators,
serverCertTTL: ttl,
ca: ca,
monitoring: newMonitoringMetrics(),
}
if len(features.CATrustedNodeAccounts) > 0 && client != nil {
// TODO: do we need some way to delayed readiness until this is synced? Probably
// Worst case is we deny some requests though which are retried
na, err := NewNodeAuthorizer(client, filter, features.CATrustedNodeAccounts)
if err != nil {
return nil, err
}
server.nodeAuthorizer = na
}
return server, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ca
import (
"context"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"net"
"testing"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/status"
pb "istio.io/api/security/v1alpha1"
"istio.io/istio/pkg/security"
mockca "istio.io/istio/security/pkg/pki/ca/mock"
caerror "istio.io/istio/security/pkg/pki/error"
"istio.io/istio/security/pkg/pki/util"
"istio.io/istio/security/pkg/server/ca/authenticate"
)
type mockAuthenticator struct {
authSource security.AuthSource
identities []string
errMsg string
}
func (authn *mockAuthenticator) AuthenticatorType() string {
return "mockAuthenticator"
}
func (authn *mockAuthenticator) Authenticate(_ security.AuthContext) (*security.Caller, error) {
if len(authn.errMsg) > 0 {
return nil, fmt.Errorf("%v", authn.errMsg)
}
return &security.Caller{
AuthSource: authn.authSource,
Identities: authn.identities,
}, nil
}
type mockAuthInfo struct {
authType string
}
func (ai mockAuthInfo) AuthType() string {
return ai.authType
}
/*
This is a testing to send a request to the server using
the client cert authenticator instead of mock authenticator
*/
func TestCreateCertificateE2EUsingClientCertAuthenticator(t *testing.T) {
callerID := "test.identity"
ids := []util.Identity{
{Type: util.TypeURI, Value: []byte(callerID)},
}
sanExt, err := util.BuildSANExtension(ids)
if err != nil {
t.Error(err)
}
auth := &authenticate.ClientCertAuthenticator{}
server := &Server{
ca: &mockca.FakeCA{
SignedCert: []byte("cert"),
KeyCertBundle: util.NewKeyCertBundleFromPem(nil, nil, []byte("cert_chain"), []byte("root_cert")),
},
Authenticators: []security.Authenticator{auth},
monitoring: newMonitoringMetrics(),
}
mockCertChain := []string{"cert", "cert_chain", "root_cert"}
mockIPAddr := &net.IPAddr{IP: net.IPv4(192, 168, 1, 1)}
testCerts := map[string]struct {
certChain [][]*x509.Certificate
caller *security.Caller
fakeAuthInfo *mockAuthInfo
code codes.Code
ipAddr *net.IPAddr
}{
// no client certificate is presented
"No client certificate": {
certChain: nil,
caller: nil,
ipAddr: mockIPAddr,
code: codes.Unauthenticated,
},
// "unsupported auth type: not-tls"
"Unsupported auth type": {
certChain: nil,
caller: nil,
fakeAuthInfo: &mockAuthInfo{"not-tls"},
ipAddr: mockIPAddr,
code: codes.Unauthenticated,
},
// no cert chain presented
"Empty cert chain": {
certChain: [][]*x509.Certificate{},
caller: nil,
ipAddr: mockIPAddr,
code: codes.Unauthenticated,
},
// certificate misses the SAN field
"Certificate has no SAN": {
certChain: [][]*x509.Certificate{
{
{
Version: 1,
},
},
},
ipAddr: mockIPAddr,
code: codes.Unauthenticated,
},
// successful testcase with valid client certificate
"With client certificate": {
certChain: [][]*x509.Certificate{
{
{
Extensions: []pkix.Extension{*sanExt},
},
},
},
caller: &security.Caller{Identities: []string{callerID}},
ipAddr: mockIPAddr,
code: codes.OK,
},
}
for id, c := range testCerts {
request := &pb.IstioCertificateRequest{Csr: "dumb CSR"}
ctx := context.Background()
if c.certChain != nil {
tlsInfo := credentials.TLSInfo{
State: tls.ConnectionState{VerifiedChains: c.certChain},
}
p := &peer.Peer{Addr: c.ipAddr, AuthInfo: tlsInfo}
ctx = peer.NewContext(ctx, p)
}
if c.fakeAuthInfo != nil {
ctx = peer.NewContext(ctx, &peer.Peer{Addr: c.ipAddr, AuthInfo: c.fakeAuthInfo})
}
response, err := server.CreateCertificate(ctx, request)
s, _ := status.FromError(err)
code := s.Code()
if code != c.code {
t.Errorf("Case %s: expecting code to be (%d) but got (%d): %s", id, c.code, code, s.Message())
} else if c.code == codes.OK {
if len(response.CertChain) != len(mockCertChain) {
t.Errorf("Case %s: expecting cert chain length to be (%d) but got (%d)",
id, len(mockCertChain), len(response.CertChain))
}
for i, v := range response.CertChain {
if v != mockCertChain[i] {
t.Errorf("Case %s: expecting cert to be (%s) but got (%s) at position [%d] of cert chain.",
id, mockCertChain, v, i)
}
}
}
}
}
func TestCreateCertificate(t *testing.T) {
testCases := map[string]struct {
authenticators []security.Authenticator
ca CertificateAuthority
certChain []string
code codes.Code
}{
"No authenticator": {
authenticators: nil,
code: codes.Unauthenticated,
ca: &mockca.FakeCA{},
},
"Unauthenticated request": {
authenticators: []security.Authenticator{&mockAuthenticator{
errMsg: "Not authorized",
}},
code: codes.Unauthenticated,
ca: &mockca.FakeCA{},
},
"CA not ready": {
authenticators: []security.Authenticator{&mockAuthenticator{identities: []string{"test-identity"}}},
ca: &mockca.FakeCA{SignErr: caerror.NewError(caerror.CANotReady, fmt.Errorf("cannot sign"))},
code: codes.Internal,
},
"Invalid CSR": {
authenticators: []security.Authenticator{&mockAuthenticator{identities: []string{"test-identity"}}},
ca: &mockca.FakeCA{SignErr: caerror.NewError(caerror.CSRError, fmt.Errorf("cannot sign"))},
code: codes.InvalidArgument,
},
"Invalid TTL": {
authenticators: []security.Authenticator{&mockAuthenticator{identities: []string{"test-identity"}}},
ca: &mockca.FakeCA{SignErr: caerror.NewError(caerror.TTLError, fmt.Errorf("cannot sign"))},
code: codes.InvalidArgument,
},
"Failed to sign": {
authenticators: []security.Authenticator{&mockAuthenticator{identities: []string{"test-identity"}}},
ca: &mockca.FakeCA{SignErr: caerror.NewError(caerror.CertGenError, fmt.Errorf("cannot sign"))},
code: codes.Internal,
},
"Successful signing": {
authenticators: []security.Authenticator{&mockAuthenticator{identities: []string{"test-identity"}}},
ca: &mockca.FakeCA{
SignedCert: []byte("cert"),
KeyCertBundle: util.NewKeyCertBundleFromPem(nil, nil, []byte("cert_chain"), []byte("root_cert")),
},
certChain: []string{"cert", "cert_chain", "root_cert"},
code: codes.OK,
},
}
p := &peer.Peer{Addr: &net.IPAddr{IP: net.IPv4(192, 168, 1, 1)}, AuthInfo: credentials.TLSInfo{}}
ctx := peer.NewContext(context.Background(), p)
for id, c := range testCases {
server := &Server{
ca: c.ca,
Authenticators: c.authenticators,
monitoring: newMonitoringMetrics(),
}
request := &pb.IstioCertificateRequest{Csr: "dumb CSR"}
response, err := server.CreateCertificate(ctx, request)
s, _ := status.FromError(err)
code := s.Code()
if c.code != code {
t.Errorf("Case %s: expecting code to be (%d) but got (%d): %s", id, c.code, code, s.Message())
} else if c.code == codes.OK {
if len(response.CertChain) != len(c.certChain) {
t.Errorf("Case %s: expecting cert chain length to be (%d) but got (%d)",
id, len(c.certChain), len(response.CertChain))
}
for i, v := range response.CertChain {
if v != c.certChain[i] {
t.Errorf("Case %s: expecting cert to be (%s) but got (%s) at position [%d] of cert chain.",
id, c.certChain, v, i)
}
}
}
}
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"fmt"
"time"
"istio.io/istio/security/pkg/pki/util"
)
// CertUtil is an interface for utility functions on certificate.
type CertUtil interface {
// GetWaitTime returns the waiting time before renewing the certificate.
GetWaitTime([]byte, time.Time) (time.Duration, error)
}
// CertUtilImpl is the implementation of CertUtil, for production use.
type CertUtilImpl struct {
gracePeriodPercentage int
}
// NewCertUtil returns a new CertUtilImpl
func NewCertUtil(gracePeriodPercentage int) CertUtilImpl {
return CertUtilImpl{
gracePeriodPercentage: gracePeriodPercentage,
}
}
// GetWaitTime returns the waiting time before renewing the cert, based on current time, the timestamps in cert and
// grace period.
func (cu CertUtilImpl) GetWaitTime(certBytes []byte, now time.Time) (time.Duration, error) {
cert, certErr := util.ParsePemEncodedCertificate(certBytes)
if certErr != nil {
return time.Duration(0), certErr
}
timeToExpire := cert.NotAfter.Sub(now)
if timeToExpire < 0 {
return time.Duration(0), fmt.Errorf("certificate already expired at %s, but now is %s",
cert.NotAfter, now)
}
// Note: multiply time.Duration(int64) by an int (gracePeriodPercentage) will cause overflow (e.g.,
// when duration is time.Hour * 90000). So float64 is used instead.
gracePeriod := time.Duration(float64(cert.NotAfter.Sub(cert.NotBefore)) * (float64(cu.gracePeriodPercentage) / 100))
// waitTime is the duration between now and the grace period starts.
// It is the time until cert expiration minus the length of grace period.
waitTime := timeToExpire - gracePeriod
if waitTime < 0 {
// We are within the grace period.
return time.Duration(0), fmt.Errorf("got a certificate that should be renewed now")
}
return waitTime, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"strings"
"time"
)
// GetExp returns token expiration time, or error on failures.
func GetExp(token string) (time.Time, error) {
claims, err := parseJwtClaims(token)
if err != nil {
return time.Time{}, err
}
if claims["exp"] == nil {
// The JWT doesn't have "exp", so it's always valid. E.g., the K8s first party JWT.
return time.Time{}, nil
}
var expiration time.Time
switch exp := claims["exp"].(type) {
case float64:
expiration = time.Unix(int64(exp), 0)
case json.Number:
v, _ := exp.Int64()
expiration = time.Unix(v, 0)
}
return expiration, nil
}
// GetAud returns the claim `aud` from the token. Returns nil if not found.
func GetAud(token string) ([]string, error) {
claims, err := parseJwtClaims(token)
if err != nil {
return nil, err
}
rawAud := claims["aud"]
if rawAud == nil {
return nil, fmt.Errorf("no aud in the token claims")
}
data, err := json.Marshal(rawAud)
if err != nil {
return nil, err
}
var singleAud string
if err = json.Unmarshal(data, &singleAud); err == nil {
return []string{singleAud}, nil
}
var listAud []string
if err = json.Unmarshal(data, &listAud); err == nil {
return listAud, nil
}
return nil, err
}
type jwtPayload struct {
// Aud is JWT token audience - used to identify 3p tokens.
// It is empty for the default K8S tokens.
Aud []string `json:"aud"`
}
// IsK8SUnbound detects if the token is a K8S unbound token.
// It is a regular JWT with no audience and expiration, which can
// be exchanged with bound tokens with audience.
//
// This is used to determine if we check audience in the token.
// Clients should not use unbound tokens except in cases where
// bound tokens are not possible.
func IsK8SUnbound(jwt string) bool {
aud, f := ExtractJwtAud(jwt)
if !f {
return false // unbound tokens are valid JWT
}
return len(aud) == 0
}
// ExtractJwtAud extracts the audiences from a JWT token. If aud cannot be parse, the bool will be set
// to false. This distinguishes aud=[] from not parsed.
func ExtractJwtAud(jwt string) ([]string, bool) {
jwtSplit := strings.Split(jwt, ".")
if len(jwtSplit) != 3 {
return nil, false
}
payload := jwtSplit[1]
payloadBytes, err := DecodeJwtPart(payload)
if err != nil {
return nil, false
}
structuredPayload := jwtPayload{}
err = json.Unmarshal(payloadBytes, &structuredPayload)
if err != nil {
return nil, false
}
return structuredPayload.Aud, true
}
func parseJwtClaims(token string) (map[string]any, error) {
parts := strings.Split(token, ".")
if len(parts) != 3 {
return nil, fmt.Errorf("token contains an invalid number of segments: %d, expected: 3", len(parts))
}
// Decode the second part.
claimBytes, err := DecodeJwtPart(parts[1])
if err != nil {
return nil, err
}
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
claims := make(map[string]any)
if err := dec.Decode(&claims); err != nil {
return nil, fmt.Errorf("failed to decode the JWT claims")
}
return claims, nil
}
func DecodeJwtPart(seg string) ([]byte, error) {
if l := len(seg) % 4; l > 0 {
seg += strings.Repeat("=", 4-l)
}
return base64.URLEncoding.DecodeString(seg)
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint: revive
package fuzz
import (
"fmt"
fuzz "github.com/AdaLogics/go-fuzz-headers"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/serviceregistry"
"istio.io/istio/pilot/pkg/serviceregistry/aggregate"
)
var meshHolder fuzzMeshConfigHolder
type fuzzMeshConfigHolder struct {
trustDomainAliases []string
}
func (mh fuzzMeshConfigHolder) Mesh() *meshconfig.MeshConfig {
return &meshconfig.MeshConfig{
TrustDomainAliases: mh.trustDomainAliases,
}
}
// FuzzAggregateController implements a fuzzer
// that targets the add and delete registry apis
// of the aggregate controller. It does so by
// creating a controller with a pseudo-random
// Options{} and create pseudo-random service
// registries and deleting them.
func FuzzAggregateController(data []byte) int {
ops := map[int]string{
0: "AddRegistry",
1: "DeleteRegistry",
}
maxOps := 2
f := fuzz.NewConsumer(data)
opts := aggregate.Options{}
err := f.GenerateStruct(&opts)
if err != nil {
return 0
}
opts.MeshHolder = meshHolder
c := aggregate.NewController(opts)
iterations, err := f.GetInt()
if err != nil {
return 0
}
for i := 0; i < iterations%30; i++ {
opType, err := f.GetInt()
if err != nil {
return 0
}
switch ops[opType%maxOps] {
case "AddRegistry":
err = runAddRegistry(f, c)
case "DeleteRegistry":
err = runDeleteRegistry(f, c)
}
if err != nil {
return 0
}
}
return 1
}
// Helper function to create a registry.
func runAddRegistry(f *fuzz.ConsumeFuzzer, c *aggregate.Controller) error {
registry := serviceregistry.Simple{}
err := f.GenerateStruct(®istry)
if err != nil {
return err
}
if registry.DiscoveryController == nil {
return fmt.Errorf("registry required")
}
c.AddRegistry(registry)
return nil
}
// Helper function to delete a registry.
func runDeleteRegistry(f *fuzz.ConsumeFuzzer, c *aggregate.Controller) error {
registries := c.GetRegistries()
if len(registries) == 0 {
return fmt.Errorf("no registries")
}
index, err := f.GetInt()
if err != nil {
return err
}
selectedRegistry := registries[index%len(registries)]
c.DeleteRegistry(selectedRegistry.Cluster(), selectedRegistry.Provider())
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"bytes"
"os"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/analysis"
"istio.io/istio/pkg/config/analysis/analyzers"
"istio.io/istio/pkg/config/analysis/local"
"istio.io/istio/pkg/config/analysis/scope"
"istio.io/istio/pkg/log"
)
var availableAnalyzers = analyzers.All()
// createRandomConfigFile creates a single fuzzed config file
func createRandomConfigFile(f *fuzz.ConsumeFuzzer) (string, error) {
data, err := f.GetBytes()
if err != nil {
return "nobytes", err
}
tmpfile, err := os.CreateTemp("", "example")
if err != nil {
return "nofile", err
}
if _, err := tmpfile.Write(data); err != nil {
return "nofile", err
}
if err := tmpfile.Close(); err != nil {
return "nofile", err
}
return tmpfile.Name(), nil
}
// createRandomConfigFiles creates a slice of ReaderSources
func createRandomConfigFiles(f *fuzz.ConsumeFuzzer) ([]local.ReaderSource, error) {
var files []local.ReaderSource
numberOfFiles, err := f.GetInt()
if err != nil {
return files, err
}
maxFiles := numberOfFiles % 10
// Gather test files
for i := 0; i < maxFiles; i++ {
name, err := f.GetString()
if err != nil {
return files, err
}
rBytes, err := f.GetBytes()
if err != nil {
return files, err
}
r := bytes.NewReader(rBytes)
files = append(files, local.ReaderSource{Name: name, Reader: r})
}
return files, nil
}
// runAnalyzer runs the analyzer
func runAnalyzer(sa *local.IstiodAnalyzer) (local.AnalysisResult, error) {
prevLogLevel := scope.Processing.GetOutputLevel()
scope.Processing.SetOutputLevel(log.NoneLevel)
defer scope.Processing.SetOutputLevel(prevLogLevel)
cancel := make(chan struct{})
result, err := sa.Analyze(cancel)
if err != nil {
return local.AnalysisResult{}, err
}
return result, err
}
// FuzzAnalyzer implements the fuzzer
func FuzzAnalyzer(data []byte) int {
f := fuzz.NewConsumer(data)
analyzerIndex, err := f.GetInt()
if err != nil {
return 0
}
analyzer := availableAnalyzers[analyzerIndex%len(availableAnalyzers)]
requestedInputsByAnalyzer := make(map[string]map[config.GroupVersionKind]struct{})
analyzerName := analyzer.Metadata().Name
cr := func(col config.GroupVersionKind) {
if _, ok := requestedInputsByAnalyzer[analyzerName]; !ok {
requestedInputsByAnalyzer[analyzerName] = make(map[config.GroupVersionKind]struct{})
}
requestedInputsByAnalyzer[analyzerName][col] = struct{}{}
}
// Mesh config file
addMeshConfig, err := f.GetBool()
if err != nil {
return 0
}
meshConfigFile := ""
if addMeshConfig {
meshConfigFile, err = createRandomConfigFile(f)
if err != nil {
return 0
}
defer os.Remove(meshConfigFile)
}
// Mesh networks file
addMeshNetworks, err := f.GetBool()
if err != nil {
return 0
}
meshNetworkFile := ""
if addMeshNetworks {
meshNetworkFile, err = createRandomConfigFile(f)
if err != nil {
return 0
}
defer os.Remove(meshNetworkFile)
}
configFiles, err := createRandomConfigFiles(f)
if err != nil {
return 0
}
sa := local.NewSourceAnalyzer(analysis.Combine("testCase", analyzer), "", "istio-system", cr)
if addMeshConfig {
err = sa.AddFileKubeMeshConfig(meshConfigFile)
if err != nil {
return 0
}
}
if addMeshNetworks {
err = sa.AddFileKubeMeshNetworks(meshNetworkFile)
if err != nil {
return 0
}
}
// Include default resources
err = sa.AddDefaultResources()
if err != nil {
return 0
}
// Include resources from test files
err = sa.AddReaderKubeSource(configFiles)
if err != nil {
return 0
}
_, _ = runAnalyzer(sa)
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"fmt"
"time"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/autoregistration"
"istio.io/istio/pilot/pkg/config/memory"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/keepalive"
)
type fakeConn struct {
proxy *model.Proxy
connTime time.Time
stopped bool
}
func makeConn(proxy *model.Proxy, connTime time.Time) *fakeConn {
return &fakeConn{proxy: proxy, connTime: connTime}
}
func (f *fakeConn) ID() string {
return fmt.Sprintf("%s-%v", f.proxy.IPAddresses[0], f.connTime)
}
func (f *fakeConn) Proxy() *model.Proxy {
return f.proxy
}
func (f *fakeConn) ConnectedAt() time.Time {
return f.connTime
}
func (f *fakeConn) Stop() {
f.stopped = true
}
var (
// A valid WorkloadGroup.
// This can be modified to have pseudo-random
// values for more randomization.
tmplA = &v1alpha3.WorkloadGroup{
Template: &v1alpha3.WorkloadEntry{
Ports: map[string]uint32{"http": 80},
Labels: map[string]string{"app": "a"},
Network: "nw0",
Locality: "reg0/zone0/subzone0",
Weight: 1,
ServiceAccount: "sa-a",
},
}
// A valid Config.
// This can be modified to have pseudo-random
// values for more randomization.
wgA = config.Config{
Meta: config.Meta{
GroupVersionKind: gvk.WorkloadGroup,
Namespace: "a",
Name: "wg-a",
Labels: map[string]string{
"grouplabel": "notonentry",
},
},
Spec: tmplA,
Status: nil,
}
)
// FuzzWE implements a fuzzer that targets several apis
// in the workloadentry package. It does so by setting
// up a workloadentry controller with a proxy with
// pseudo-random values.
// The fuzzer then uses the controller to test:
// 1: OnConnect
// 2: OnDisconnect
// 3: QueueWorkloadEntryHealth
func FuzzWE(data []byte) int {
f := fuzz.NewConsumer(data)
proxy := &model.Proxy{}
err := f.GenerateStruct(proxy)
if err != nil {
return 0
}
if !proxy.FuzzValidate() {
return 0
}
store := memory.NewController(memory.Make(collections.All))
c := autoregistration.NewController(store, "", keepalive.Infinity)
err = createStore(store, wgA)
if err != nil {
fmt.Println(err)
return 0
}
stop := make(chan struct{})
go c.Run(stop)
defer close(stop)
conn := makeConn(proxy, time.Now())
err = c.OnConnect(conn)
if err != nil {
return 0
}
c.OnDisconnect(conn)
he := autoregistration.HealthEvent{}
err = f.GenerateStruct(&he)
if err != nil {
return 0
}
c.QueueWorkloadEntryHealth(proxy, he)
return 1
}
// Helper function to create a store.
func createStore(store model.ConfigStoreController, cfg config.Config) error {
if _, err := store.Create(cfg); err != nil {
return err
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint: revive
package fuzz
import (
"os"
"time"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/istio/pilot/pkg/bootstrap"
"istio.io/istio/pkg/config/mesh"
)
func FuzzNewBootstrapServer(data []byte) int {
f := fuzz.NewConsumer(data)
// Create mesh config file
meshConfigFile, err := createRandomConfigFile(f)
if err != nil {
return 0
}
defer os.Remove(meshConfigFile)
_, err = os.Stat(meshConfigFile)
if err != nil {
return 0
}
// Validate mesh config file
meshConfigYaml, err := mesh.ReadMeshConfigData(meshConfigFile)
if err != nil {
return 0
}
_, err = mesh.ApplyMeshConfigDefaults(meshConfigYaml)
if err != nil {
return 0
}
// Create kube config file
kubeConfigFile, err := createRandomConfigFile(f)
if err != nil {
return 0
}
defer os.Remove(kubeConfigFile)
args := &bootstrap.PilotArgs{}
err = f.GenerateStruct(args)
if err != nil {
return 0
}
args.MeshConfigFile = meshConfigFile
args.RegistryOptions.KubeConfig = kubeConfigFile
args.ShutdownDuration = 1 * time.Millisecond
stop := make(chan struct{})
s, err := bootstrap.NewServer(args)
if err != nil {
return 0
}
err = s.Start(stop)
if err != nil {
return 0
}
defer func() {
close(stop)
s.WaitUntilCompletion()
}()
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint: revive
package fuzz
import (
"bytes"
"errors"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/istio/istioctl/pkg/writer/compare"
)
func createIstiodDumps(f *fuzz.ConsumeFuzzer) (map[string][]byte, error) {
m := make(map[string][]byte)
maxNoEntries := 50
qty, err := f.GetInt()
if err != nil {
return m, err
}
noOfEntries := qty % maxNoEntries
if noOfEntries == 0 {
return m, errors.New("a map of zero-length was created")
}
for i := 0; i < noOfEntries; i++ {
k, err := f.GetString()
if err != nil {
return m, err
}
v, err := f.GetBytes()
if err != nil {
return m, err
}
m[k] = v
}
return m, nil
}
func FuzzCompareDiff(data []byte) int {
f := fuzz.NewConsumer(data)
istiodDumps, err := createIstiodDumps(f)
if err != nil {
return 0
}
envoyDump, err := f.GetBytes()
if err != nil {
return 0
}
var buf bytes.Buffer
c, err := compare.NewComparator(&buf, istiodDumps, envoyDump)
if err != nil {
return 0
}
_ = c.Diff()
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime/schema"
extensions "istio.io/api/extensions/v1alpha1"
networking "istio.io/api/networking/v1alpha3"
networkingv1beta1 "istio.io/api/networking/v1beta1"
security_beta "istio.io/api/security/v1beta1"
telemetry "istio.io/api/telemetry/v1alpha1"
"istio.io/istio/pilot/pkg/config/kube/crdclient"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collections"
"istio.io/istio/pkg/config/validation"
"istio.io/istio/pkg/kube"
)
func FuzzConfigValidation(data []byte) int {
f := fuzz.NewConsumer(data)
configIndex, err := f.GetInt()
if err != nil {
return -1
}
r := collections.Pilot.All()[configIndex%len(collections.Pilot.All())]
gvk := r.GroupVersionKind()
kgvk := schema.GroupVersionKind{
Group: gvk.Group,
Version: gvk.Version,
Kind: gvk.Kind,
}
object, err := kube.IstioScheme.New(kgvk)
if err != nil {
return 0
}
_, err = apimeta.TypeAccessor(object)
if err != nil {
return 0
}
err = f.GenerateStruct(&object)
if err != nil {
return 0
}
iobj := crdclient.TranslateObject(object, gvk, "cluster.local")
_, _ = r.ValidateConfig(iobj)
return 1
}
// FuzzConfigValidation2 implements a second fuzzer for config validation.
// The fuzzer targets the same API as FuzzConfigValidation above,
// but its approach to creating a fuzzed config is a bit different
// in that it utilizes Istio APIs to generate a Spec from json.
// We currently run both continuously to compare their performance.
func FuzzConfigValidation2(data []byte) int {
f := fuzz.NewConsumer(data)
configIndex, err := f.GetInt()
if err != nil {
return -1
}
r := collections.Pilot.All()[configIndex%len(collections.Pilot.All())]
spec, err := r.NewInstance()
if err != nil {
return 0
}
jsonData, err := f.GetString()
if err != nil {
return 0
}
err = config.ApplyJSON(spec, jsonData)
if err != nil {
return 0
}
m := config.Meta{}
err = f.GenerateStruct(&m)
if err != nil {
return 0
}
gvk := r.GroupVersionKind()
m.GroupVersionKind = gvk
_, _ = r.ValidateConfig(config.Config{
Meta: m,
Spec: spec,
})
return 1
}
func FuzzConfigValidation3(data []byte) int {
if len(data) < 10 {
return 0
}
f := fuzz.NewConsumer(data)
c := config.Config{}
err := f.GenerateStruct(&c)
if err != nil {
return 0
}
targetNumber, err := f.GetInt()
if err != nil {
return 0
}
numberOfTargets := targetNumber % 13
switch numberOfTargets {
case 0:
in := &networking.Gateway{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateGateway(c)
case 1:
in := &networking.TrafficPolicy{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateDestinationRule(c)
case 2:
in := &networking.EnvoyFilter_EnvoyConfigObjectPatch{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateEnvoyFilter(c)
case 3:
in := &networking.Sidecar{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateSidecar(c)
case 4:
in := &security_beta.AuthorizationPolicy{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateAuthorizationPolicy(c)
case 5:
in := &security_beta.RequestAuthentication{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateRequestAuthentication(c)
case 6:
in := &security_beta.PeerAuthentication{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidatePeerAuthentication(c)
case 7:
in := &networking.VirtualService{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateVirtualService(c)
case 8:
in := &networking.WorkloadEntry{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateWorkloadEntry(c)
case 9:
in := &networking.ServiceEntry{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateServiceEntry(c)
case 10:
in := &networkingv1beta1.ProxyConfig{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateProxyConfig(c)
case 11:
in := &telemetry.Telemetry{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateTelemetry(c)
case 12:
in := &extensions.WasmPlugin{}
err = f.GenerateStruct(in)
if err != nil {
return 0
}
c.Spec = in
_, _ = validation.ValidateWasmPlugin(c)
}
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"bytes"
"encoding/hex"
"fmt"
"reflect"
"strings"
"sync"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"github.com/davecgh/go-spew/spew"
legacyproto "github.com/golang/protobuf/proto" // nolint: staticcheck
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"google.golang.org/protobuf/testing/protocmp"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
clientextensions "istio.io/client-go/pkg/apis/extensions/v1alpha1"
clientnetworkingalpha "istio.io/client-go/pkg/apis/networking/v1alpha3"
clientnetworkingbeta "istio.io/client-go/pkg/apis/networking/v1beta1"
clientsecurity "istio.io/client-go/pkg/apis/security/v1beta1"
clienttelemetry "istio.io/client-go/pkg/apis/telemetry/v1alpha1"
"istio.io/istio/pkg/config/schema/collections"
)
var (
scheme = runtime.NewScheme()
initter sync.Once
)
func initRoundTrip() {
clientnetworkingalpha.AddToScheme(scheme)
clientnetworkingbeta.AddToScheme(scheme)
clientsecurity.AddToScheme(scheme)
clientextensions.AddToScheme(scheme)
clienttelemetry.AddToScheme(scheme)
}
// FuzzRoundtrip tests whether the pilot CRDs
// can be encoded and decoded.
func FuzzCRDRoundtrip(data []byte) int {
initter.Do(initRoundTrip)
if len(data) < 100 {
return 0
}
// select a target:
r := collections.Pilot.All()[int(data[0])%len(collections.Pilot.All())]
gvk := r.GroupVersionKind()
kgvk := schema.GroupVersionKind{
Group: gvk.Group,
Version: gvk.Version,
Kind: gvk.Kind,
}
object, err := scheme.New(kgvk)
if err != nil {
return 0
}
typeAcc, err := apimeta.TypeAccessor(object)
if err != nil {
panic(fmt.Sprintf("%q is not a TypeMeta and cannot be tested - add it to nonRoundTrippableInternalTypes: %v\n", kgvk, err))
}
f := fuzz.NewConsumer(data[1:])
err = f.GenerateStruct(object)
if err != nil {
return 0
}
err = checkForNilValues(object)
if err != nil {
return 0
}
typeAcc.SetKind(kgvk.Kind)
typeAcc.SetAPIVersion(kgvk.GroupVersion().String())
roundTrip(json.NewSerializer(json.DefaultMetaFactory, scheme, scheme, false), object)
return 1
}
// roundTrip performs the roundtrip of the object.
func roundTrip(codec runtime.Codec, object runtime.Object) {
printer := spew.ConfigState{DisableMethods: true}
// deep copy the original object
object = object.DeepCopyObject()
name := reflect.TypeOf(object).Elem().Name()
// encode (serialize) the deep copy using the provided codec
data, err := runtime.Encode(codec, object)
if err != nil {
return
}
// encode (serialize) a second time to verify that it was not varying
secondData, err := runtime.Encode(codec, object)
if err != nil {
panic("This should not fail since we are encoding for the second time")
}
// serialization to the wire must be stable to ensure that we don't write twice to the DB
// when the object hasn't changed.
if !bytes.Equal(data, secondData) {
panic(fmt.Sprintf("%v: serialization is not stable: %s\n", name, printer.Sprintf("%#v", object)))
}
// decode (deserialize) the encoded data back into an object
obj2, err := runtime.Decode(codec, data)
if err != nil {
panic(fmt.Sprintf("%v: %v\nCodec: %#v\nData: %s\nSource: %#v\n", name, err, codec, dataAsString(data), printer.Sprintf("%#v", object)))
}
// decode the encoded data into a new object (instead of letting the codec
// create a new object)
obj3 := reflect.New(reflect.TypeOf(object).Elem()).Interface().(runtime.Object)
if err := runtime.DecodeInto(codec, data, obj3); err != nil {
panic(fmt.Sprintf("%v: %v\n", name, err))
}
// ensure that the object produced from decoding the encoded data is equal
// to the original object
if diff := cmp.Diff(obj2, obj3, protocmp.Transform(), cmpopts.EquateNaNs()); diff != "" {
panic("These should not be different: " + diff)
}
}
// dataAsString is a simple helper.
func dataAsString(data []byte) string {
dataString := string(data)
if !strings.HasPrefix(dataString, "{") {
dataString = "\n" + hex.Dump(data)
legacyproto.NewBuffer(make([]byte, 0, 1024)).DebugPrint("decoded object", data)
}
return dataString
}
// checkForNilValues is a helper to check for nil
// values in the runtime objects.
// This part only converts the interface to a reflect.Value.
func checkForNilValues(targetStruct any) error {
v := reflect.ValueOf(targetStruct)
e := v.Elem()
err := checkForNil(e)
if err != nil {
return err
}
return nil
}
// Checks for nil values in a reflect.Value.
func checkForNil(e reflect.Value) error {
switch e.Kind() {
case reflect.Struct:
for i := 0; i < e.NumField(); i++ {
err := checkForNil(e.Field(i))
if err != nil {
return err
}
}
case reflect.Array, reflect.Slice:
for i := 0; i < e.Len(); i++ {
err := checkForNil(e.Index(i))
if err != nil {
return err
}
}
case reflect.Map:
if e.IsNil() {
return fmt.Errorf("field is nil")
}
for _, k := range e.MapKeys() {
if e.IsNil() {
return fmt.Errorf("field is nil")
}
err := checkForNil(e.MapIndex(k))
if err != nil {
return err
}
}
case reflect.Ptr:
if e.IsNil() {
return fmt.Errorf("field is nil")
}
err := checkForNil(e.Elem())
if err != nil {
return err
}
default:
return nil
}
return nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"istio.io/istio/pilot/pkg/config/kube/crd"
)
func FuzzParseInputs(data []byte) int {
_, _, err := crd.ParseInputs(string(data))
if err != nil {
return 0
}
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"istio.io/istio/operator/pkg/helmreconciler"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/object"
)
type fakeClientWrapper struct {
client.Client
}
func FuzzHelmReconciler(data []byte) int {
f := fuzz.NewConsumer(data)
k8sobjBytes, err := f.GetBytes()
if err != nil {
return 0
}
k8obj, err := object.ParseYAMLToK8sObject(k8sobjBytes)
if err != nil {
return 0
}
m := name.Manifest{}
err = f.GenerateStruct(&m)
if err != nil {
return 0
}
obj := k8obj.UnstructuredObject()
gvk := obj.GetObjectKind().GroupVersionKind()
if len(gvk.Kind) == 0 {
return 0
}
if len(gvk.Version) == 0 {
return 0
}
cl := &fakeClientWrapper{fake.NewClientBuilder().WithRuntimeObjects(obj).Build()}
h, err := helmreconciler.NewHelmReconciler(cl, nil, nil, nil)
if err != nil {
return 0
}
_ = h.ApplyObject(obj)
_, _ = h.Reconcile()
_, _ = h.ApplyManifest(m)
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"bytes"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/kube/inject"
)
func FuzzIntoResourceFile(data []byte) int {
f := fuzz.NewConsumer(data)
var sidecarTemplate map[string]string
err := f.FuzzMap(&sidecarTemplate)
if err != nil {
return 0
}
valuesConfig, err := f.GetString()
if err != nil {
return 0
}
meshYaml, err := f.GetString()
if err != nil {
return 0
}
mc, err := mesh.ApplyMeshConfigDefaults(meshYaml)
if err != nil {
return 0
}
inData, err := f.GetBytes()
if err != nil {
return 0
}
in := bytes.NewReader(inData)
var got bytes.Buffer
warn := func(s string) {}
revision, err := f.GetString()
if err != nil {
return 0
}
templs, err := inject.ParseTemplates(sidecarTemplate)
if err != nil {
return 0
}
vc, err := inject.NewValuesConfig(valuesConfig)
if err != nil {
return 0
}
_ = inject.IntoResourceFile(nil, templs, vc, revision, mc, in, &got, warn)
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/istio/pilot/pkg/config/kube/crd"
config2 "istio.io/istio/pkg/config"
"istio.io/istio/pkg/config/schema/collections"
)
// FuzzKubeCRD implements a fuzzer that targets
// the kube CRD in two steps.
// It first creates an object with a config
// that has had pseudo-random values inserted.
// When a valid object has been created, it
// tries and convert that object. If this
// conversion fails, it panics.
func FuzzKubeCRD(data []byte) int {
f := fuzz.NewConsumer(data)
config := config2.Config{}
err := f.GenerateStruct(&config)
if err != nil {
return 0
}
// Create a valid obj:
obj, err := crd.ConvertConfig(config)
if err != nil {
return 0
}
// Convert the obj and report if it fails.
_, err = crd.ConvertObject(collections.VirtualService, obj, "cluster")
if err != nil {
panic(err)
}
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
corev1 "k8s.io/api/core/v1"
knetworking "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/runtime"
meshconfig "istio.io/api/mesh/v1alpha1"
"istio.io/istio/pilot/pkg/config/kube/ingress"
"istio.io/istio/pkg/config"
"istio.io/istio/pkg/kube"
"istio.io/istio/pkg/kube/kclient"
)
func FuzzConvertIngressVirtualService(data []byte) int {
f := fuzz.NewConsumer(data)
ing := knetworking.Ingress{}
err := f.GenerateStruct(&ing)
if err != nil {
return 0
}
service := &corev1.Service{}
cfgs := map[string]*config.Config{}
serviceLister, teardown := newServiceLister(service)
defer teardown()
ingress.ConvertIngressVirtualService(ing, "mydomain", cfgs, serviceLister)
return 1
}
func FuzzConvertIngressV1alpha3(data []byte) int {
f := fuzz.NewConsumer(data)
ing := knetworking.Ingress{}
err := f.GenerateStruct(&ing)
if err != nil {
return 0
}
m := &meshconfig.MeshConfig{}
err = f.GenerateStruct(m)
if err != nil {
return 0
}
ingress.ConvertIngressV1alpha3(ing, m, "mydomain")
return 1
}
func newServiceLister(objects ...runtime.Object) (kclient.Client[*corev1.Service], func()) {
kc := kube.NewFakeClient(objects...)
stop := make(chan struct{})
kc.RunAndWait(stop)
teardown := func() {
close(stop)
}
return kclient.New[*corev1.Service](kc), teardown
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"istio.io/istio/pkg/config/mesh"
)
func FuzzParseMeshNetworks(data []byte) int {
_, _ = mesh.ParseMeshNetworks(string(data))
return 1
}
func FuzzValidateMeshConfig(data []byte) int {
_, _ = mesh.ApplyMeshConfigDefaults(string(data))
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file has a series of fuzzers that target different
// parts of Istio. They are placed here because it does not
// make sense to place them in different files yet.
// The fuzzers can be moved to other files without anything
// breaking on the OSS-fuzz side.
package fuzz
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/api/operator/v1alpha1"
"istio.io/istio/operator/pkg/apis/istio"
"istio.io/istio/operator/pkg/apis/istio/v1alpha1/validation"
"istio.io/istio/operator/pkg/controlplane"
"istio.io/istio/operator/pkg/name"
"istio.io/istio/operator/pkg/object"
"istio.io/istio/operator/pkg/patch"
"istio.io/istio/operator/pkg/translate"
"istio.io/istio/operator/pkg/util"
"istio.io/istio/operator/pkg/validate"
"istio.io/istio/pkg/config/analysis/diag"
"istio.io/istio/pkg/config/resource"
)
func FuzzCheckIstioOperatorSpec(data []byte) int {
f := fuzz.NewConsumer(data)
ispec := &v1alpha1.IstioOperatorSpec{}
err := f.GenerateStruct(ispec)
if err != nil {
return 0
}
_ = validate.CheckIstioOperatorSpec(ispec, false)
_ = validate.CheckIstioOperatorSpec(ispec, true)
return 1
}
func FuzzV1Alpha1ValidateConfig(data []byte) int {
f := fuzz.NewConsumer(data)
iop := &v1alpha1.IstioOperatorSpec{}
err := f.GenerateStruct(iop)
if err != nil {
return 0
}
_, _ = validation.ValidateConfig(false, iop)
return 1
}
func FuzzGetEnabledComponents(data []byte) int {
f := fuzz.NewConsumer(data)
iopSpec := &v1alpha1.IstioOperatorSpec{}
err := f.GenerateStruct(iopSpec)
if err != nil {
return 0
}
_, _ = translate.GetEnabledComponents(iopSpec)
return 1
}
func FuzzUnmarshalAndValidateIOPS(data []byte) int {
_, _ = istio.UnmarshalAndValidateIOPS(string(data))
return 1
}
func FuzzRenderManifests(data []byte) int {
f := fuzz.NewConsumer(data)
f.AllowUnexportedFields()
cp := &controlplane.IstioControlPlane{}
err := f.GenerateStruct(cp)
if err != nil {
return 0
}
_, _ = cp.RenderManifest()
return 1
}
func FuzzOverlayIOP(data []byte) int {
f := fuzz.NewConsumer(data)
base, err := f.GetString()
if err != nil {
return 0
}
overlay, err := f.GetString()
if err != nil {
return 0
}
_, err = util.OverlayIOP(base, overlay)
if err != nil {
return 0
}
return 1
}
func FuzzNewControlplane(data []byte) int {
f := fuzz.NewConsumer(data)
f.AllowUnexportedFields()
inInstallSpec := &v1alpha1.IstioOperatorSpec{}
err := f.GenerateStruct(inInstallSpec)
if err != nil {
return 0
}
inTranslator := &translate.Translator{}
err = f.GenerateStruct(inTranslator)
if err != nil {
return 0
}
if inTranslator.APIMapping == nil {
return 0
}
if inTranslator.KubernetesMapping == nil {
return 0
}
if inTranslator.GlobalNamespaces == nil {
return 0
}
if inTranslator.ComponentMaps == nil {
return 0
}
cm := &translate.ComponentMaps{}
err = f.GenerateStruct(cm)
if err != nil {
return 0
}
inTranslator.ComponentMaps[name.PilotComponentName] = cm
_, _ = controlplane.NewIstioControlPlane(inInstallSpec, inTranslator, nil, nil)
return 1
}
func FuzzResolveK8sConflict(data []byte) int {
f := fuzz.NewConsumer(data)
f.AllowUnexportedFields()
ko1 := &object.K8sObject{}
err := f.GenerateStruct(ko1)
if err != nil {
return 0
}
_ = ko1.ResolveK8sConflict()
return 1
}
func FuzzYAMLManifestPatch(data []byte) int {
f := fuzz.NewConsumer(data)
f.AllowUnexportedFields()
overlay := []*v1alpha1.K8SObjectOverlay{}
number, err := f.GetInt()
if err != nil {
return 0
}
for i := 0; i < number%20; i++ {
o := &v1alpha1.K8SObjectOverlay{}
err := f.GenerateStruct(o)
if err != nil {
return 0
}
overlay = append(overlay, o)
}
baseYAML, err := f.GetString()
if err != nil {
return 0
}
defaultNamespace, err := f.GetString()
if err != nil {
return 0
}
_, _ = patch.YAMLManifestPatch(baseYAML, defaultNamespace, overlay)
return 1
}
func FuzzGalleyDiag(data []byte) int {
f := fuzz.NewConsumer(data)
code, err := f.GetString()
if err != nil {
return 0
}
templ, err := f.GetString()
if err != nil {
return 0
}
mt := diag.NewMessageType(diag.Error, code, templ)
resourceIsNil, err := f.GetBool()
if err != nil {
return 0
}
parameter, err := f.GetString()
if err != nil {
return 0
}
var ri *resource.Instance
if resourceIsNil {
ri = nil
} else {
err = f.GenerateStruct(ri)
if err != nil {
return 0
}
}
m := diag.NewMessage(mt, ri, parameter)
_ = m.Unstructured(true)
_ = m.UnstructuredAnalysisMessageBase()
_ = m.Origin()
_, _ = m.MarshalJSON()
_ = m.String()
replStr, err := f.GetString()
if err == nil {
_ = m.ReplaceLine(replStr)
}
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"istio.io/istio/operator/pkg/translate"
)
func FuzzTranslateFromValueToSpec(data []byte) int {
tr := translate.NewReverseTranslator()
_, _ = tr.TranslateFromValueToSpec(data, false)
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"errors"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/serviceregistry/memory"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/mesh"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/slices"
)
var protocols = []protocol.Instance{
protocol.TCP,
protocol.UDP,
protocol.GRPC,
protocol.GRPCWeb,
protocol.HTTP,
protocol.HTTP_PROXY,
protocol.HTTP2,
protocol.HTTPS,
protocol.TLS,
protocol.Mongo,
protocol.Redis,
protocol.MySQL,
}
// Creates a new fuzzed ServiceInstance
func NewSI(f *fuzz.ConsumeFuzzer) (*model.ServiceInstance, error) {
si := &model.ServiceInstance{}
err := f.GenerateStruct(si)
if err != nil {
return si, err
}
s, err := NewS(f)
if err != nil {
return si, err
}
p, err := createPort(f)
if err != nil {
return si, err
}
s.Ports = append(s.Ports, p)
si.ServicePort = p
si.Service = s
err = si.Validate()
if err != nil {
return si, err
}
return si, nil
}
// Gets a protocol from global var protocols
func getProtocolInstance(f *fuzz.ConsumeFuzzer) (protocol.Instance, error) {
pIndex, err := f.GetInt()
if err != nil {
return protocol.Unsupported, errors.New("could not create protocolInstance")
}
i := protocols[pIndex%len(protocols)]
return i, nil
}
// Creates a new fuzzed Port
func createPort(f *fuzz.ConsumeFuzzer) (*model.Port, error) {
p := &model.Port{}
name, err := f.GetString()
if err != nil {
return p, err
}
port, err := f.GetInt()
if err != nil {
return p, err
}
protocolinstance, err := getProtocolInstance(f)
if err != nil {
return p, err
}
p.Name = name
p.Port = port
p.Protocol = protocolinstance
return p, nil
}
// Creates a new fuzzed Port slice
func createPorts(f *fuzz.ConsumeFuzzer) ([]*model.Port, error) {
ports := make([]*model.Port, 0, 20)
numberOfPorts, err := f.GetInt()
if err != nil {
return ports, err
}
// Maximum 20 ports:
maxPorts := numberOfPorts % 20
if maxPorts == 0 {
maxPorts = 1
}
for i := 0; i < maxPorts; i++ {
port, err := createPort(f)
if err != nil {
return ports, err
}
ports = append(ports, port)
}
return ports, nil
}
// Creates a new fuzzed Service
func NewS(f *fuzz.ConsumeFuzzer) (*model.Service, error) {
s := &model.Service{}
err := f.GenerateStruct(s)
if err != nil {
return s, err
}
ports, err := createPorts(f)
if err != nil {
return s, err
}
s.Ports = ports
hostname, err := f.GetString()
if err != nil {
return s, err
}
s.Hostname = host.Name(hostname)
err = s.Validate()
if err != nil {
return s, err
}
return s, nil
}
// Creates an Environment with fuzzed values
// and passes that to InitContext
func FuzzInitContext(data []byte) int {
f := fuzz.NewConsumer(data)
// Create service instances
serviceInstances := make([]*model.ServiceInstance, 0, 20)
number, err := f.GetInt()
if err != nil {
return 0
}
// We allow a maximum of 20 service instances
numberOfS := number % 20
for i := 0; i < numberOfS; i++ {
si, err := NewSI(f)
if err != nil {
return 0
}
serviceInstances = append(serviceInstances, si)
}
// Create services
services := make([]*model.Service, 0, 20)
number, err = f.GetInt()
if err != nil {
return 0
}
// We allow a maximum of 20 services
numberOfS = number % 20
for i := 0; i < numberOfS; i++ {
s, err := NewS(f)
if err != nil {
return 0
}
services = append(services, s)
}
configString, err := f.GetString()
if err != nil {
return 0
}
m, err := mesh.ApplyMeshConfigDefaults(configString)
if err != nil {
return 0
}
env := &model.Environment{}
store := model.NewFakeStore()
env.ConfigStore = store
sd := memory.NewServiceDiscovery(services...)
sd.WantGetProxyServiceTargets = slices.Map(serviceInstances, model.ServiceInstanceToTarget)
env.ServiceDiscovery = sd
env.Watcher = mesh.NewFixedWatcher(m)
env.EndpointIndex = model.NewEndpointIndex(model.DisabledCache{})
env.Init()
pc := model.NewPushContext()
_ = pc.InitContext(env, nil, nil)
return 1
}
func FuzzBNMUnmarshalJSON(data []byte) int {
var bnm model.BootstrapNodeMetadata
_ = bnm.UnmarshalJSON(data)
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/grpcgen"
)
func FuzzGrpcGenGenerate(data []byte) int {
f := fuzz.NewConsumer(data)
proxy := &model.Proxy{}
err := f.GenerateStruct(proxy)
if err != nil {
return 0
}
if !proxy.FuzzValidate() {
return 0
}
w := &model.WatchedResource{}
err = f.GenerateStruct(w)
if err != nil {
return 0
}
updates := &model.PushRequest{}
err = f.GenerateStruct(updates)
if err != nil {
return 0
}
generator := &grpcgen.GrpcConfigGenerator{}
_, _, _ = generator.Generate(proxy, w, updates)
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"fmt"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/istio/pilot/pkg/networking/util"
"istio.io/istio/pilot/pkg/security/authz/matcher"
)
func FuzzCidrRange(data []byte) int {
_, _ = util.AddrStrToCidrRange(string(data))
return 1
}
func FuzzHeaderMatcher(data []byte) int {
k, v, err := getKandV(data)
if err != nil {
return 0
}
_ = matcher.HeaderMatcher(k, v)
return 1
}
func FuzzHostMatcherWithRegex(data []byte) int {
k, v, err := getKandV(data)
if err != nil {
return 0
}
_ = matcher.HostMatcherWithRegex(k, v)
return 1
}
func FuzzHostMatcher(data []byte) int {
k, v, err := getKandV(data)
if err != nil {
return 0
}
_ = matcher.HostMatcher(k, v)
return 1
}
func FuzzMetadataListMatcher(data []byte) int {
f := fuzz.NewConsumer(data)
filter, err := f.GetString()
if err != nil {
return 0
}
number, err := f.GetInt()
if err != nil {
return 0
}
maxKeys := number % 100
keys := make([]string, 0, maxKeys)
for i := 0; i < maxKeys; i++ {
key, err := f.GetString()
if err != nil {
return 0
}
keys = append(keys, key)
}
value, err := f.GetString()
if err != nil {
return 0
}
_ = matcher.MetadataListMatcher(filter, keys, matcher.StringMatcher(value), false)
return 1
}
func getKandV(data []byte) (string, string, error) {
if len(data) < 10 {
return "", "", fmt.Errorf("not enough bytes")
}
if len(data)%2 != 0 {
return "", "", fmt.Errorf("not correct amount of bytes")
}
k := string(data[:len(data)/2])
v := string(data[(len(data)/2)+1:])
return k, v, nil
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"istio.io/istio/security/pkg/util"
)
func FuzzJwtUtil(data []byte) int {
_, _ = util.GetExp(string(data))
_, _ = util.GetAud(string(data))
_, _ = util.ExtractJwtAud(string(data))
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"crypto/x509/pkix"
"os"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/istio/security/pkg/pki/util"
)
// FuzzVerifyCertificate implements a fuzzer
// that tests util.VerifyCertificate
func FuzzVerifyCertificate(data []byte) int {
f := fuzz.NewConsumer(data)
privPem, err := f.GetBytes()
if err != nil {
return 0
}
certChainPem, err := f.GetBytes()
if err != nil {
return 0
}
rootCertPem, err := f.GetBytes()
if err != nil {
return 0
}
expectedFields := &util.VerifyFields{}
err = f.GenerateStruct(expectedFields)
if err != nil {
return 0
}
util.VerifyCertificate(privPem, certChainPem, rootCertPem, expectedFields)
return 1
}
// FindRootCertFromCertificateChainBytesFuzz implements a fuzzer
// that tests util.FindRootCertFromCertificateChainBytes
func FuzzFindRootCertFromCertificateChainBytes(data []byte) int {
_, _ = util.FindRootCertFromCertificateChainBytes(data)
return 1
}
func FuzzExtractIDs(data []byte) int {
f := fuzz.NewConsumer(data)
noOfExts, err := f.GetInt()
if err != nil {
return 0
}
noOfExtensions := noOfExts % 30
if noOfExtensions == 0 {
return 0
}
extensions := make([]pkix.Extension, 0)
for i := 0; i < noOfExtensions; i++ {
newExtension := pkix.Extension{}
err = f.GenerateStruct(&newExtension)
if err != nil {
return 0
}
extensions = append(extensions, newExtension)
}
_, _ = util.ExtractIDs(extensions)
return 1
}
// FuzzPemCertBytestoString implements a fuzzer
// that tests PemCertBytestoString
func FuzzPemCertBytestoString(data []byte) int {
_ = util.PemCertBytestoString(data)
return 1
}
// FuzzParsePemEncodedCertificateChain implements
// a fuzzer that tests ParsePemEncodedCertificateChain
func FuzzParsePemEncodedCertificateChain(data []byte) int {
_, _, _ = util.ParsePemEncodedCertificateChain(data)
return 1
}
// FuzzUpdateVerifiedKeyCertBundleFromFile implements a
// fuzzer that tests UpdateVerifiedKeyCertBundleFromFile
func FuzzUpdateVerifiedKeyCertBundleFromFile(data []byte) int {
f := fuzz.NewConsumer(data)
certFile, err := os.Create("certfile")
if err != nil {
return 0
}
defer certFile.Close()
defer os.Remove("certfile")
certFileBytes, err := f.GetBytes()
if err != nil {
return 0
}
_, err = certFile.Write(certFileBytes)
if err != nil {
return 0
}
privKeyFile, err := os.Create("privKeyFile")
if err != nil {
return 0
}
defer privKeyFile.Close()
defer os.Remove("privKeyFile")
privKeyFileBytes, err := f.GetBytes()
if err != nil {
return 0
}
_, err = privKeyFile.Write(privKeyFileBytes)
if err != nil {
return 0
}
certChainFile, err := os.Create("certChainFile")
if err != nil {
return 0
}
defer certChainFile.Close()
defer os.Remove("certChainFile")
certChainBytes, err := f.GetBytes()
if err != nil {
return 0
}
_, err = certChainFile.Write(certChainBytes)
if err != nil {
return 0
}
rootCertFile, err := os.Create("rootCertFile")
if err != nil {
return 0
}
defer rootCertFile.Close()
defer os.Remove("rootCertFile")
rootCertFileBytes, err := f.GetBytes()
if err != nil {
return 0
}
_, err = rootCertFile.Write(rootCertFileBytes)
if err != nil {
return 0
}
bundle, err := util.NewVerifiedKeyCertBundleFromFile("certfile", "privKeyFile", []string{"certChainFile"}, "rootCertFile")
if err != nil {
return 0
}
_, err = bundle.CertOptions()
if err == nil {
panic("Ran successfully")
}
newCertFile, err := os.Create("newCertfile")
if err != nil {
return 0
}
defer newCertFile.Close()
defer os.Remove("newCertFile")
newCertFileBytes, err := f.GetBytes()
if err != nil {
return 0
}
_, err = newCertFile.Write(newCertFileBytes)
if err != nil {
return 0
}
newPrivKeyFile, err := os.Create("newPrivKeyFile")
if err != nil {
return 0
}
defer newPrivKeyFile.Close()
defer os.Remove("newPrivKeyFile")
newPrivKeyFileBytes, err := f.GetBytes()
if err != nil {
return 0
}
_, err = newPrivKeyFile.Write(newPrivKeyFileBytes)
if err != nil {
return 0
}
newCertChainFile, err := os.Create("newCertChainFile")
if err != nil {
return 0
}
defer newCertChainFile.Close()
defer os.Remove("newCertChainFile")
newCertChainBytes, err := f.GetBytes()
if err != nil {
return 0
}
_, err = newCertChainFile.Write(newCertChainBytes)
if err != nil {
return 0
}
newRootCertFile, err := os.Create("newRootCertFile")
if err != nil {
return 0
}
defer newRootCertFile.Close()
defer os.Remove("newRootCertFile")
newRootCertFileBytes, err := f.GetBytes()
if err != nil {
return 0
}
_, err = newRootCertFile.Write(newRootCertFileBytes)
if err != nil {
return 0
}
bundle.UpdateVerifiedKeyCertBundleFromFile("newCertfile", "newPrivKeyFile", []string{"newCertChainFile"}, "newRootCertFile")
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"context"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"net"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
pb "istio.io/api/security/v1alpha1"
"istio.io/istio/pkg/security"
mockca "istio.io/istio/security/pkg/pki/ca/mock"
"istio.io/istio/security/pkg/pki/util"
"istio.io/istio/security/pkg/server/ca"
"istio.io/istio/security/pkg/server/ca/authenticate"
)
func FuzzGenCSR(data []byte) int {
f := fuzz.NewConsumer(data)
certOptions := util.CertOptions{}
err := f.GenerateStruct(&certOptions)
if err != nil {
return 0
}
_, _, _ = util.GenCSR(certOptions)
return 1
}
func fuzzedCertChain(f *fuzz.ConsumeFuzzer) ([][]*x509.Certificate, error) {
certChain := [][]*x509.Certificate{}
withPkixExtension, err := f.GetBool()
if err != nil {
return certChain, err
}
if withPkixExtension {
ids := []util.Identity{}
err := f.GenerateStruct(&ids)
if err != nil {
return certChain, err
}
sanExt, err := util.BuildSANExtension(ids)
if err != nil {
return certChain, err
}
certChain = [][]*x509.Certificate{
{
{
Extensions: []pkix.Extension{*sanExt},
},
},
}
}
return certChain, nil
}
func FuzzCreateCertE2EUsingClientCertAuthenticator(data []byte) int {
f := fuzz.NewConsumer(data)
certChainBytes, err := f.GetBytes()
if err != nil {
return 0
}
// Check that certChainBytes can be parsed successfully
_, err = util.ParsePemEncodedCertificate(certChainBytes)
if err != nil {
return 0
}
rootCertBytes, err := f.GetBytes()
if err != nil {
return 0
}
// Check that rootCertBytes can be parsed successfully
_, err = util.ParsePemEncodedCertificate(rootCertBytes)
if err != nil {
return 0
}
signedCert, err := f.GetBytes()
if err != nil {
return 0
}
auth := &authenticate.ClientCertAuthenticator{}
kcb := util.NewKeyCertBundleFromPem(nil, nil, certChainBytes, rootCertBytes)
mockCa := &mockca.FakeCA{
SignedCert: signedCert,
KeyCertBundle: kcb,
}
server, err := ca.New(mockCa, 1, []security.Authenticator{auth}, nil, nil)
if err != nil {
return 0
}
csrString, err := f.GetString()
if err != nil {
return 0
}
request := &pb.IstioCertificateRequest{Csr: csrString}
ctx := context.Background()
certChain, err := fuzzedCertChain(f)
if err != nil {
return 0
}
tlsInfo := credentials.TLSInfo{
State: tls.ConnectionState{VerifiedChains: certChain},
}
mockIPAddr := &net.IPAddr{IP: net.IPv4(192, 168, 1, 1)}
p := &peer.Peer{Addr: mockIPAddr, AuthInfo: tlsInfo}
ctx = peer.NewContext(ctx, p)
_, _ = server.CreateCertificate(ctx, request)
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/api/meta/v1alpha1"
"istio.io/istio/pilot/pkg/status/distribution"
)
// FuzzReconcileStatuses implements a fuzzer that targets
// status.ReconcileStatuses. It does so by inserting
// pseudo-random vlues in the config and the progress
// as well as pass a pseudo-random generation parameter.
func FuzzReconcileStatuses(data []byte) int {
f := fuzz.NewConsumer(data)
current := &v1alpha1.IstioStatus{}
err := f.GenerateStruct(current)
if err != nil {
return 0
}
desired := distribution.Progress{}
err = f.GenerateStruct(&desired)
if err != nil {
return 0
}
_, _ = distribution.ReconcileStatuses(current, desired)
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package utils
import (
"os"
"istio.io/istio/pkg/test"
)
type NopTester struct{}
func (n NopTester) Fail() {}
func (n NopTester) FailNow() {}
func (n NopTester) Fatal(args ...any) {}
func (n NopTester) Fatalf(format string, args ...any) {}
func (n NopTester) Log(args ...any) {}
func (n NopTester) Logf(format string, args ...any) {}
func (n NopTester) Skip(args ...any) {}
func (n NopTester) TempDir() string {
tempDir, _ := os.MkdirTemp("", "test")
return tempDir
}
func (n NopTester) Helper() {}
func (n NopTester) Cleanup(f func()) {}
var _ test.Failer = NopTester{}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"errors"
"testing"
fuzz "github.com/AdaLogics/go-fuzz-headers"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking/core/v1alpha3"
"istio.io/istio/tests/fuzz/utils"
)
func init() {
testing.Init()
}
func ValidateTestOptions(to v1alpha3.TestOptions) error {
for _, csc := range to.ConfigStoreCaches {
if csc == nil {
return errors.New("a ConfigStoreController was nil")
}
}
for _, sr := range to.ServiceRegistries {
if sr == nil {
return errors.New("a ServiceRegistry was nil")
}
}
return nil
}
func FuzzValidateClusters(data []byte) int {
proxy := model.Proxy{}
f := fuzz.NewConsumer(data)
to := v1alpha3.TestOptions{}
err := f.GenerateStruct(&to)
if err != nil {
return 0
}
err = ValidateTestOptions(to)
if err != nil {
return 0
}
err = f.GenerateStruct(&proxy)
if err != nil || !proxy.FuzzValidate() {
return 0
}
cg := v1alpha3.NewConfigGenTest(utils.NopTester{}, to)
p := cg.SetupProxy(&proxy)
_ = cg.Clusters(p)
_ = cg.Routes(p)
return 1
}
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fuzz
import (
"testing"
"istio.io/istio/pilot/pkg/config/kube/crd"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/test/xds"
)
func init() {
testing.Init()
}
func FuzzXds(data []byte) int {
t := &testing.T{}
// Use crd.ParseInputs to verify data
_, _, err := crd.ParseInputs(string(data))
if err != nil {
return 0
}
s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{
ConfigString: string(data),
})
proxy := s.SetupProxy(&model.Proxy{
ConfigNamespace: "app",
})
_ = s.Listeners(proxy)
_ = s.Routes(proxy)
return 1
}